aboutsummaryrefslogtreecommitdiffstats
path: root/fs/file_table.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 13:50:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 13:50:22 -0700
commit5264406cdb66c7003eb3edf53c9773b1b20611b9 (patch)
treee94f76f64a0b3b45dcb9f9bec85cce2ba78e1221 /fs/file_table.c
parentMerge tag 'pull-work.dcache' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs (diff)
parentfirst_iovec_segment(): just return address (diff)
downloadlinux-dev-5264406cdb66c7003eb3edf53c9773b1b20611b9.tar.xz
linux-dev-5264406cdb66c7003eb3edf53c9773b1b20611b9.zip
Merge tag 'pull-work.iov_iter-base' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs iov_iter updates from Al Viro: "Part 1 - isolated cleanups and optimizations. One of the goals is to reduce the overhead of using ->read_iter() and ->write_iter() instead of ->read()/->write(). new_sync_{read,write}() has a surprising amount of overhead, in particular inside iocb_flags(). That's the explanation for the beginning of the series is in this pile; it's not directly iov_iter-related, but it's a part of the same work..." * tag 'pull-work.iov_iter-base' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: first_iovec_segment(): just return address iov_iter: massage calling conventions for first_{iovec,bvec}_segment() iov_iter: first_{iovec,bvec}_segment() - simplify a bit iov_iter: lift dealing with maxpages out of first_{iovec,bvec}_segment() iov_iter_get_pages{,_alloc}(): cap the maxsize with MAX_RW_COUNT iov_iter_bvec_advance(): don't bother with bvec_iter copy_page_{to,from}_iter(): switch iovec variants to generic keep iocb_flags() result cached in struct file iocb: delay evaluation of IS_SYNC(...) until we want to check IOCB_DSYNC struct file: use anonymous union member for rcuhead and llist btrfs: use IOMAP_DIO_NOSYNC teach iomap_dio_rw() to suppress dsync No need of likely/unlikely on calls of check_copy_size()
Diffstat (limited to 'fs/file_table.c')
-rw-r--r--fs/file_table.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/fs/file_table.c b/fs/file_table.c
index 5727a63a7b67..99c6796c9f28 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -45,7 +45,7 @@ static struct percpu_counter nr_files __cacheline_aligned_in_smp;
static void file_free_rcu(struct rcu_head *head)
{
- struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
+ struct file *f = container_of(head, struct file, f_rcuhead);
put_cred(f->f_cred);
kmem_cache_free(filp_cachep, f);
@@ -56,7 +56,7 @@ static inline void file_free(struct file *f)
security_file_free(f);
if (!(f->f_mode & FMODE_NOACCOUNT))
percpu_counter_dec(&nr_files);
- call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
+ call_rcu(&f->f_rcuhead, file_free_rcu);
}
/*
@@ -142,7 +142,7 @@ static struct file *__alloc_file(int flags, const struct cred *cred)
f->f_cred = get_cred(cred);
error = security_file_alloc(f);
if (unlikely(error)) {
- file_free_rcu(&f->f_u.fu_rcuhead);
+ file_free_rcu(&f->f_rcuhead);
return ERR_PTR(error);
}
@@ -243,6 +243,7 @@ static struct file *alloc_file(const struct path *path, int flags,
if ((file->f_mode & FMODE_WRITE) &&
likely(fop->write || fop->write_iter))
file->f_mode |= FMODE_CAN_WRITE;
+ file->f_iocb_flags = iocb_flags(file);
file->f_mode |= FMODE_OPENED;
file->f_op = fop;
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
@@ -343,13 +344,13 @@ static void delayed_fput(struct work_struct *unused)
struct llist_node *node = llist_del_all(&delayed_fput_list);
struct file *f, *t;
- llist_for_each_entry_safe(f, t, node, f_u.fu_llist)
+ llist_for_each_entry_safe(f, t, node, f_llist)
__fput(f);
}
static void ____fput(struct callback_head *work)
{
- __fput(container_of(work, struct file, f_u.fu_rcuhead));
+ __fput(container_of(work, struct file, f_rcuhead));
}
/*
@@ -376,8 +377,8 @@ void fput(struct file *file)
struct task_struct *task = current;
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
- init_task_work(&file->f_u.fu_rcuhead, ____fput);
- if (!task_work_add(task, &file->f_u.fu_rcuhead, TWA_RESUME))
+ init_task_work(&file->f_rcuhead, ____fput);
+ if (!task_work_add(task, &file->f_rcuhead, TWA_RESUME))
return;
/*
* After this task has run exit_task_work(),
@@ -386,7 +387,7 @@ void fput(struct file *file)
*/
}
- if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
+ if (llist_add(&file->f_llist, &delayed_fput_list))
schedule_delayed_work(&delayed_fput_work, 1);
}
}