aboutsummaryrefslogtreecommitdiffstats
path: root/security/apparmor/mount.c
diff options
context:
space:
mode:
authorJohn Johansen <john.johansen@canonical.com>2019-09-14 03:34:06 -0700
committerJohn Johansen <john.johansen@canonical.com>2019-11-22 16:41:08 -0800
commit341c1fda5e17156619fb71acfc7082b2669b4b72 (patch)
treee5fbbd464835c19aec5fa16ce4bd325a4667fa05 /security/apparmor/mount.c
parentapparmor: reduce rcu_read_lock scope for aa_file_perm mediation (diff)
downloadlinux-dev-341c1fda5e17156619fb71acfc7082b2669b4b72.tar.xz
linux-dev-341c1fda5e17156619fb71acfc7082b2669b4b72.zip
apparmor: make it so work buffers can be allocated from atomic context
In some situations AppArmor needs to be able to use its work buffers from atomic context. Add the ability to specify when in atomic context and hold a set of work buffers in reserve for atomic context to reduce the chance that a large work buffer allocation will need to be done. Fixes: df323337e507 ("apparmor: Use a memory pool instead per-CPU caches") Signed-off-by: John Johansen <john.johansen@canonical.com>
Diffstat (limited to '')
-rw-r--r--security/apparmor/mount.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index 2dbccb021663..b06f5cba8fc4 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -412,7 +412,7 @@ int aa_remount(struct aa_label *label, const struct path *path,
binary = path->dentry->d_sb->s_type->fs_flags & FS_BINARY_MOUNTDATA;
- buffer = aa_get_buffer();
+ buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
@@ -443,8 +443,8 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
if (error)
return error;
- buffer = aa_get_buffer();
- old_buffer = aa_get_buffer();
+ buffer = aa_get_buffer(false);
+ old_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!buffer || old_buffer)
goto out;
@@ -474,7 +474,7 @@ int aa_mount_change_type(struct aa_label *label, const struct path *path,
flags &= (MS_REC | MS_SILENT | MS_SHARED | MS_PRIVATE | MS_SLAVE |
MS_UNBINDABLE);
- buffer = aa_get_buffer();
+ buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
@@ -503,8 +503,8 @@ int aa_move_mount(struct aa_label *label, const struct path *path,
if (error)
return error;
- buffer = aa_get_buffer();
- old_buffer = aa_get_buffer();
+ buffer = aa_get_buffer(false);
+ old_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!buffer || !old_buffer)
goto out;
@@ -554,13 +554,13 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
}
}
- buffer = aa_get_buffer();
+ buffer = aa_get_buffer(false);
if (!buffer) {
error = -ENOMEM;
goto out;
}
if (dev_path) {
- dev_buffer = aa_get_buffer();
+ dev_buffer = aa_get_buffer(false);
if (!dev_buffer) {
error = -ENOMEM;
goto out;
@@ -624,7 +624,7 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
AA_BUG(!label);
AA_BUG(!mnt);
- buffer = aa_get_buffer();
+ buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
@@ -703,8 +703,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
AA_BUG(!old_path);
AA_BUG(!new_path);
- old_buffer = aa_get_buffer();
- new_buffer = aa_get_buffer();
+ old_buffer = aa_get_buffer(false);
+ new_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!old_buffer || !new_buffer)
goto out;