diff options
Diffstat (limited to 'fs/gfs2/file.c')
-rw-r--r-- | fs/gfs2/file.c | 590 |
1 files changed, 413 insertions, 177 deletions
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index cb26be6f4351..60c6fb91fb58 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -25,6 +25,7 @@ #include <linux/dlm_plock.h> #include <linux/delay.h> #include <linux/backing-dev.h> +#include <linux/fileattr.h> #include "gfs2.h" #include "incore.h" @@ -118,8 +119,8 @@ static int gfs2_readdir(struct file *file, struct dir_context *ctx) return error; } -/** - * fsflag_gfs2flag +/* + * struct fsflag_gfs2flag * * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories, * and to GFS2_DIF_JDATA for non-directories. @@ -153,14 +154,17 @@ static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags) return fsflags; } -static int gfs2_get_flags(struct file *filp, u32 __user *ptr) +int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa) { - struct inode *inode = file_inode(filp); + struct inode *inode = d_inode(dentry); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int error; u32 fsflags; + if (d_is_special(dentry)) + return -ENOTTY; + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); error = gfs2_glock_nq(&gh); if (error) @@ -168,8 +172,7 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr) fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags); - if (put_user(fsflags, ptr)) - error = -EFAULT; + fileattr_fill_flags(fa, fsflags); gfs2_glock_dq(&gh); out_uninit: @@ -207,39 +210,23 @@ void gfs2_set_inode_flags(struct inode *inode) /** * do_gfs2_set_flags - set flags on an inode - * @filp: file pointer + * @inode: The inode * @reqflags: The flags to set * @mask: Indicates which flags are valid - * @fsflags: The FS_* inode flags passed in * */ -static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask, - const u32 fsflags) +static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask) { - struct inode *inode = file_inode(filp); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct buffer_head *bh; struct gfs2_holder gh; int error; - u32 new_flags, flags, oldflags; - - error = mnt_want_write_file(filp); - if (error) - return error; + u32 new_flags, flags; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (error) - goto out_drop_write; - - oldflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags); - error = vfs_ioc_setflags_prepare(inode, oldflags, fsflags); - if (error) - goto out; - - error = -EACCES; - if (!inode_owner_or_capable(inode)) - goto out; + return error; error = 0; flags = ip->i_diskflags; @@ -247,16 +234,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask, if ((new_flags ^ flags) == 0) goto out; - error = -EPERM; - if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) - goto out; - if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY)) - goto out; - if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) && - !capable(CAP_LINUX_IMMUTABLE)) - goto out; if (!IS_IMMUTABLE(inode)) { - error = gfs2_permission(inode, MAY_WRITE); + error = gfs2_permission(&init_user_ns, inode, MAY_WRITE); if (error) goto out; } @@ -291,20 +270,22 @@ out_trans_end: gfs2_trans_end(sdp); out: gfs2_glock_dq_uninit(&gh); -out_drop_write: - mnt_drop_write_file(filp); return error; } -static int gfs2_set_flags(struct file *filp, u32 __user *ptr) +int gfs2_fileattr_set(struct user_namespace *mnt_userns, + struct dentry *dentry, struct fileattr *fa) { - struct inode *inode = file_inode(filp); - u32 fsflags, gfsflags = 0; + struct inode *inode = d_inode(dentry); + u32 fsflags = fa->flags, gfsflags = 0; u32 mask; int i; - if (get_user(fsflags, ptr)) - return -EFAULT; + if (d_is_special(dentry)) + return -ENOTTY; + + if (fileattr_has_fsx(fa)) + return -EOPNOTSUPP; for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) { if (fsflags & fsflag_gfs2flag[i].fsflag) { @@ -325,7 +306,7 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr) mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA); } - return do_gfs2_set_flags(filp, gfsflags, mask, fsflags); + return do_gfs2_set_flags(inode, gfsflags, mask); } static int gfs2_getlabel(struct file *filp, char __user *label) @@ -342,10 +323,6 @@ static int gfs2_getlabel(struct file *filp, char __user *label) static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch(cmd) { - case FS_IOC_GETFLAGS: - return gfs2_get_flags(filp, (u32 __user *)arg); - case FS_IOC_SETFLAGS: - return gfs2_set_flags(filp, (u32 __user *)arg); case FITRIM: return gfs2_fitrim(filp, (void __user *)arg); case FS_IOC_GETFSLABEL: @@ -359,13 +336,6 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch(cmd) { - /* These are just misnamed, they actually get/put from/to user an int */ - case FS_IOC32_GETFLAGS: - cmd = FS_IOC_GETFLAGS; - break; - case FS_IOC32_SETFLAGS: - cmd = FS_IOC_SETFLAGS; - break; /* Keep this list in sync with gfs2_ioctl */ case FITRIM: case FS_IOC_GETFSLABEL: @@ -421,7 +391,7 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length) do { struct iomap iomap = { }; - if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap)) + if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap)) return -EIO; if (length < iomap.length) @@ -435,7 +405,6 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length) /** * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable - * @vma: The virtual memory area * @vmf: The virtual memory fault containing the page to become writable * * When the page becomes writable, we need to ensure that we have @@ -451,26 +420,25 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) struct gfs2_alloc_parms ap = { .aflags = 0, }; u64 offset = page_offset(page); unsigned int data_blocks, ind_blocks, rblocks; + vm_fault_t ret = VM_FAULT_LOCKED; struct gfs2_holder gh; unsigned int length; loff_t size; - int ret; + int err; sb_start_pagefault(inode->i_sb); - ret = gfs2_rsqa_alloc(ip); - if (ret) - goto out; - gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); - ret = gfs2_glock_nq(&gh); - if (ret) + err = gfs2_glock_nq(&gh); + if (err) { + ret = block_page_mkwrite_return(err); goto out_uninit; + } /* Check page index against inode size */ size = i_size_read(inode); if (offset >= size) { - ret = -EINVAL; + ret = VM_FAULT_SIGBUS; goto out_unlock; } @@ -478,8 +446,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) file_update_time(vmf->vma->vm_file); /* page is wholly or partially inside EOF */ - if (offset > size - PAGE_SIZE) - length = offset_in_page(size); + if (size - offset < PAGE_SIZE) + length = size - offset; else length = PAGE_SIZE; @@ -497,24 +465,30 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) !gfs2_write_alloc_required(ip, offset, length)) { lock_page(page); if (!PageUptodate(page) || page->mapping != inode->i_mapping) { - ret = -EAGAIN; + ret = VM_FAULT_NOPAGE; unlock_page(page); } goto out_unlock; } - ret = gfs2_rindex_update(sdp); - if (ret) + err = gfs2_rindex_update(sdp); + if (err) { + ret = block_page_mkwrite_return(err); goto out_unlock; + } gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; - ret = gfs2_quota_lock_check(ip, &ap); - if (ret) + err = gfs2_quota_lock_check(ip, &ap); + if (err) { + ret = block_page_mkwrite_return(err); goto out_unlock; - ret = gfs2_inplace_reserve(ip, &ap); - if (ret) + } + err = gfs2_inplace_reserve(ip, &ap); + if (err) { + ret = block_page_mkwrite_return(err); goto out_quota_unlock; + } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) @@ -523,28 +497,38 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) rblocks += RES_STATFS + RES_QUOTA; rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); } - ret = gfs2_trans_begin(sdp, rblocks, 0); - if (ret) + err = gfs2_trans_begin(sdp, rblocks, 0); + if (err) { + ret = block_page_mkwrite_return(err); goto out_trans_fail; + } + + /* Unstuff, if required, and allocate backing blocks for page */ + if (gfs2_is_stuffed(ip)) { + err = gfs2_unstuff_dinode(ip); + if (err) { + ret = block_page_mkwrite_return(err); + goto out_trans_end; + } + } lock_page(page); - ret = -EAGAIN; /* If truncated, we must retry the operation, we may have raced * with the glock demotion code. */ - if (!PageUptodate(page) || page->mapping != inode->i_mapping) - goto out_trans_end; + if (!PageUptodate(page) || page->mapping != inode->i_mapping) { + ret = VM_FAULT_NOPAGE; + goto out_page_locked; + } - /* Unstuff, if required, and allocate backing blocks for page */ - ret = 0; - if (gfs2_is_stuffed(ip)) - ret = gfs2_unstuff_dinode(ip, page); - if (ret == 0) - ret = gfs2_allocate_page_backing(page, length); + err = gfs2_allocate_page_backing(page, length); + if (err) + ret = block_page_mkwrite_return(err); -out_trans_end: - if (ret) +out_page_locked: + if (ret != VM_FAULT_LOCKED) unlock_page(page); +out_trans_end: gfs2_trans_end(sdp); out_trans_fail: gfs2_inplace_release(ip); @@ -554,23 +538,43 @@ out_unlock: gfs2_glock_dq(&gh); out_uninit: gfs2_holder_uninit(&gh); - if (ret == 0) { + if (ret == VM_FAULT_LOCKED) { set_page_dirty(page); wait_for_stable_page(page); } -out: sb_end_pagefault(inode->i_sb); - return block_page_mkwrite_return(ret); + return ret; +} + +static vm_fault_t gfs2_fault(struct vm_fault *vmf) +{ + struct inode *inode = file_inode(vmf->vma->vm_file); + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; + vm_fault_t ret; + int err; + + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); + err = gfs2_glock_nq(&gh); + if (err) { + ret = block_page_mkwrite_return(err); + goto out_uninit; + } + ret = filemap_fault(vmf); + gfs2_glock_dq(&gh); +out_uninit: + gfs2_holder_uninit(&gh); + return ret; } static const struct vm_operations_struct gfs2_vm_ops = { - .fault = filemap_fault, + .fault = gfs2_fault, .map_pages = filemap_map_pages, .page_mkwrite = gfs2_page_mkwrite, }; /** - * gfs2_mmap - + * gfs2_mmap * @file: The file to map * @vma: The VMA which described the mapping * @@ -635,7 +639,17 @@ int gfs2_open_common(struct inode *inode, struct file *file) gfs2_assert_warn(GFS2_SB(inode), !file->private_data); file->private_data = fp; + if (file->f_mode & FMODE_WRITE) { + ret = gfs2_qa_get(GFS2_I(inode)); + if (ret) + goto fail; + } return 0; + +fail: + kfree(file->private_data); + file->private_data = NULL; + return ret; } /** @@ -690,10 +704,11 @@ static int gfs2_release(struct inode *inode, struct file *file) kfree(file->private_data); file->private_data = NULL; - if (!(file->f_mode & FMODE_WRITE)) - return 0; - - gfs2_rsqa_delete(ip, &inode->i_writecount); + if (file->f_mode & FMODE_WRITE) { + if (gfs2_rs_active(&ip->i_res)) + gfs2_rs_delete(ip); + gfs2_qa_put(ip); + } return 0; } @@ -723,7 +738,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end, { struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - int sync_state = inode->i_state & I_DIRTY_ALL; + int sync_state = inode->i_state & I_DIRTY; struct gfs2_inode *ip = GFS2_I(inode); int ret = 0, ret1 = 0; @@ -736,7 +751,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end, if (!gfs2_is_jdata(ip)) sync_state &= ~I_DIRTY_PAGES; if (datasync) - sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME); + sync_state &= ~I_DIRTY_SYNC; if (sync_state) { ret = sync_inode_metadata(inode, 1); @@ -755,42 +770,118 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end, return ret ? ret : ret1; } -static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to) +static inline bool should_fault_in_pages(struct iov_iter *i, + struct kiocb *iocb, + size_t *prev_count, + size_t *window_size) +{ + size_t count = iov_iter_count(i); + size_t size, offs; + + if (!count) + return false; + if (!user_backed_iter(i)) + return false; + + size = PAGE_SIZE; + offs = offset_in_page(iocb->ki_pos); + if (*prev_count != count || !*window_size) { + size_t nr_dirtied; + + nr_dirtied = max(current->nr_dirtied_pause - + current->nr_dirtied, 8); + size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT); + } + + *prev_count = count; + *window_size = size - offs; + return true; +} + +static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to, + struct gfs2_holder *gh) { struct file *file = iocb->ki_filp; struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); - size_t count = iov_iter_count(to); - struct gfs2_holder gh; + size_t prev_count = 0, window_size = 0; + size_t read = 0; ssize_t ret; - if (!count) + /* + * In this function, we disable page faults when we're holding the + * inode glock while doing I/O. If a page fault occurs, we indicate + * that the inode glock may be dropped, fault in the pages manually, + * and retry. + * + * Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger + * physical as well as manual page faults, and we need to disable both + * kinds. + * + * For direct I/O, gfs2 takes the inode glock in deferred mode. This + * locking mode is compatible with other deferred holders, so multiple + * processes and nodes can do direct I/O to a file at the same time. + * There's no guarantee that reads or writes will be atomic. Any + * coordination among readers and writers needs to happen externally. + */ + + if (!iov_iter_count(to)) return 0; /* skip atime */ - gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); - ret = gfs2_glock_nq(&gh); + gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); +retry: + ret = gfs2_glock_nq(gh); if (ret) goto out_uninit; - + pagefault_disable(); + to->nofault = true; ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, - is_sync_kiocb(iocb)); - - gfs2_glock_dq(&gh); + IOMAP_DIO_PARTIAL, NULL, read); + to->nofault = false; + pagefault_enable(); + if (ret <= 0 && ret != -EFAULT) + goto out_unlock; + /* No increment (+=) because iomap_dio_rw returns a cumulative value. */ + if (ret > 0) + read = ret; + + if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) { + gfs2_glock_dq(gh); + window_size -= fault_in_iov_iter_writeable(to, window_size); + if (window_size) + goto retry; + } +out_unlock: + if (gfs2_holder_queued(gh)) + gfs2_glock_dq(gh); out_uninit: - gfs2_holder_uninit(&gh); - return ret; + gfs2_holder_uninit(gh); + /* User space doesn't expect partial success. */ + if (ret < 0) + return ret; + return read; } -static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) +static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from, + struct gfs2_holder *gh) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct gfs2_inode *ip = GFS2_I(inode); - size_t len = iov_iter_count(from); - loff_t offset = iocb->ki_pos; - struct gfs2_holder gh; + size_t prev_count = 0, window_size = 0; + size_t written = 0; ssize_t ret; /* + * In this function, we disable page faults when we're holding the + * inode glock while doing I/O. If a page fault occurs, we indicate + * that the inode glock may be dropped, fault in the pages manually, + * and retry. + * + * For writes, iomap_dio_rw only triggers manual page faults, so we + * don't need to disable physical ones. + */ + + /* * Deferred lock, even if its a write, since we do no allocation on * this path. All we need to change is the atime, and this lock mode * ensures that other nodes have flushed their buffered read caches @@ -798,36 +889,186 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) * unfortunately, have the option of only flushing a range like the * VFS does. */ - gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); - ret = gfs2_glock_nq(&gh); + gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); +retry: + ret = gfs2_glock_nq(gh); if (ret) goto out_uninit; - /* Silently fall back to buffered I/O when writing beyond EOF */ - if (offset + len > i_size_read(&ip->i_inode)) - goto out; + if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode)) + goto out_unlock; + from->nofault = true; ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, - is_sync_kiocb(iocb)); - -out: - gfs2_glock_dq(&gh); + IOMAP_DIO_PARTIAL, NULL, written); + from->nofault = false; + if (ret <= 0) { + if (ret == -ENOTBLK) + ret = 0; + if (ret != -EFAULT) + goto out_unlock; + } + /* No increment (+=) because iomap_dio_rw returns a cumulative value. */ + if (ret > 0) + written = ret; + + if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) { + gfs2_glock_dq(gh); + window_size -= fault_in_iov_iter_readable(from, window_size); + if (window_size) + goto retry; + } +out_unlock: + if (gfs2_holder_queued(gh)) + gfs2_glock_dq(gh); out_uninit: - gfs2_holder_uninit(&gh); - return ret; + gfs2_holder_uninit(gh); + /* User space doesn't expect partial success. */ + if (ret < 0) + return ret; + return written; } static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { + struct gfs2_inode *ip; + struct gfs2_holder gh; + size_t prev_count = 0, window_size = 0; + size_t read = 0; ssize_t ret; - if (iocb->ki_flags & IOCB_DIRECT) { - ret = gfs2_file_direct_read(iocb, to); - if (likely(ret != -ENOTBLK)) + /* + * In this function, we disable page faults when we're holding the + * inode glock while doing I/O. If a page fault occurs, we indicate + * that the inode glock may be dropped, fault in the pages manually, + * and retry. + */ + + if (iocb->ki_flags & IOCB_DIRECT) + return gfs2_file_direct_read(iocb, to, &gh); + + pagefault_disable(); + iocb->ki_flags |= IOCB_NOIO; + ret = generic_file_read_iter(iocb, to); + iocb->ki_flags &= ~IOCB_NOIO; + pagefault_enable(); + if (ret >= 0) { + if (!iov_iter_count(to)) + return ret; + read = ret; + } else if (ret != -EFAULT) { + if (ret != -EAGAIN) + return ret; + if (iocb->ki_flags & IOCB_NOWAIT) return ret; - iocb->ki_flags &= ~IOCB_DIRECT; } - return generic_file_read_iter(iocb, to); + ip = GFS2_I(iocb->ki_filp->f_mapping->host); + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); +retry: + ret = gfs2_glock_nq(&gh); + if (ret) + goto out_uninit; + pagefault_disable(); + ret = generic_file_read_iter(iocb, to); + pagefault_enable(); + if (ret <= 0 && ret != -EFAULT) + goto out_unlock; + if (ret > 0) + read += ret; + + if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) { + gfs2_glock_dq(&gh); + window_size -= fault_in_iov_iter_writeable(to, window_size); + if (window_size) + goto retry; + } +out_unlock: + if (gfs2_holder_queued(&gh)) + gfs2_glock_dq(&gh); +out_uninit: + gfs2_holder_uninit(&gh); + return read ? read : ret; +} + +static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, + struct iov_iter *from, + struct gfs2_holder *gh) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file); + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_holder *statfs_gh = NULL; + size_t prev_count = 0, window_size = 0; + size_t orig_count = iov_iter_count(from); + size_t written = 0; + ssize_t ret; + + /* + * In this function, we disable page faults when we're holding the + * inode glock while doing I/O. If a page fault occurs, we indicate + * that the inode glock may be dropped, fault in the pages manually, + * and retry. + */ + + if (inode == sdp->sd_rindex) { + statfs_gh = kmalloc(sizeof(*statfs_gh), GFP_NOFS); + if (!statfs_gh) + return -ENOMEM; + } + + gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh); +retry: + if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) { + window_size -= fault_in_iov_iter_readable(from, window_size); + if (!window_size) { + ret = -EFAULT; + goto out_uninit; + } + from->count = min(from->count, window_size); + } + ret = gfs2_glock_nq(gh); + if (ret) + goto out_uninit; + + if (inode == sdp->sd_rindex) { + struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); + + ret = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, + GL_NOCACHE, statfs_gh); + if (ret) + goto out_unlock; + } + + current->backing_dev_info = inode_to_bdi(inode); + pagefault_disable(); + ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); + pagefault_enable(); + current->backing_dev_info = NULL; + if (ret > 0) { + iocb->ki_pos += ret; + written += ret; + } + + if (inode == sdp->sd_rindex) + gfs2_glock_dq_uninit(statfs_gh); + + if (ret <= 0 && ret != -EFAULT) + goto out_unlock; + + from->count = orig_count - written; + if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) { + gfs2_glock_dq(gh); + goto retry; + } +out_unlock: + if (gfs2_holder_queued(gh)) + gfs2_glock_dq(gh); +out_uninit: + gfs2_holder_uninit(gh); + kfree(statfs_gh); + from->count = orig_count - written; + return written ? written : ret; } /** @@ -847,17 +1088,12 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; ssize_t ret; - ret = gfs2_rsqa_alloc(ip); - if (ret) - return ret; - gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); if (iocb->ki_flags & IOCB_APPEND) { - struct gfs2_holder gh; - ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); if (ret) return ret; @@ -881,16 +1117,17 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct address_space *mapping = file->f_mapping; ssize_t buffered, ret2; - ret = gfs2_file_direct_write(iocb, from); + ret = gfs2_file_direct_write(iocb, from, &gh); if (ret < 0 || !iov_iter_count(from)) goto out_unlock; iocb->ki_flags |= IOCB_DSYNC; - current->backing_dev_info = inode_to_bdi(inode); - buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); - current->backing_dev_info = NULL; - if (unlikely(buffered <= 0)) + buffered = gfs2_file_buffered_write(iocb, from, &gh); + if (unlikely(buffered <= 0)) { + if (!ret) + ret = buffered; goto out_unlock; + } /* * We need to ensure that the page cache pages are written to @@ -899,7 +1136,6 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) * the direct I/O range as we don't know if the buffered pages * made it to disk. */ - iocb->ki_pos += buffered; ret2 = generic_write_sync(iocb, buffered); invalidate_mapping_pages(mapping, (iocb->ki_pos - buffered) >> PAGE_SHIFT, @@ -907,13 +1143,9 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (!ret || ret2 > 0) ret += ret2; } else { - current->backing_dev_info = inode_to_bdi(inode); - ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); - current->backing_dev_info = NULL; - if (likely(ret > 0)) { - iocb->ki_pos += ret; + ret = gfs2_file_buffered_write(iocb, from, &gh); + if (likely(ret > 0)) ret = generic_write_sync(iocb, ret); - } } out_unlock: @@ -937,7 +1169,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, gfs2_trans_add_meta(ip->i_gl, dibh); if (gfs2_is_stuffed(ip)) { - error = gfs2_unstuff_dinode(ip, NULL); + error = gfs2_unstuff_dinode(ip); if (unlikely(error)) goto out; } @@ -945,8 +1177,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, while (offset < end) { struct iomap iomap = { }; - error = gfs2_iomap_get_alloc(inode, offset, end - offset, - &iomap); + error = gfs2_iomap_alloc(inode, offset, end - offset, &iomap); if (error) goto out; offset = iomap.offset + iomap.length; @@ -1066,8 +1297,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t goto out_qunlock; /* check if the selected rgrp limits our max_blks further */ - if (ap.allowed && ap.allowed < max_blks) - max_blks = ap.allowed; + if (ip->i_res.rs_reserved < max_blks) + max_blks = ip->i_res.rs_reserved; /* Almost done. Calculate bytes that can be written using * max_blks. We also recompute max_bytes, data_blocks and @@ -1149,17 +1380,11 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le if (mode & FALLOC_FL_PUNCH_HOLE) { ret = __gfs2_punch_hole(file, offset, len); } else { - ret = gfs2_rsqa_alloc(ip); - if (ret) - goto out_putw; - ret = __gfs2_fallocate(file, mode, offset, len); - if (ret) gfs2_rs_deltree(&ip->i_res); } -out_putw: put_write_access(inode); out_unlock: gfs2_glock_dq(&gh); @@ -1173,16 +1398,12 @@ static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { - int error; - struct gfs2_inode *ip = GFS2_I(out->f_mapping->host); - - error = gfs2_rsqa_alloc(ip); - if (error) - return (ssize_t)error; + ssize_t ret; gfs2_size_hint(out, *ppos, len); - return iter_file_splice_write(pipe, out, ppos, len, flags); + ret = iter_file_splice_write(pipe, out, ppos, len, flags); + return ret; } #ifdef CONFIG_GFS2_FS_LOCKING_DLM @@ -1204,9 +1425,6 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) if (!(fl->fl_flags & FL_POSIX)) return -ENOLCK; - if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) - return -ENOLCK; - if (cmd == F_CANCELLK) { /* Hack: */ cmd = F_SETLK; @@ -1225,6 +1443,22 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); } +static void __flock_holder_uninit(struct file *file, struct gfs2_holder *fl_gh) +{ + struct gfs2_glock *gl = fl_gh->gh_gl; + + /* + * Make sure gfs2_glock_put() won't sleep under the file->f_lock + * spinlock. + */ + + gfs2_glock_hold(gl); + spin_lock(&file->f_lock); + gfs2_holder_uninit(fl_gh); + spin_unlock(&file->f_lock); + gfs2_glock_put(gl); +} + static int do_flock(struct file *file, int cmd, struct file_lock *fl) { struct gfs2_file *fp = file->private_data; @@ -1237,7 +1471,9 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) int sleeptime; state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; - flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT; + flags = GL_EXACT | GL_NOPID; + if (!IS_SETLKW(cmd)) + flags |= LM_FLAG_TRY_1CB; mutex_lock(&fp->f_fl_mutex); @@ -1256,19 +1492,21 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) &gfs2_flock_glops, CREATE, &gl); if (error) goto out; + spin_lock(&file->f_lock); gfs2_holder_init(gl, state, flags, fl_gh); + spin_unlock(&file->f_lock); gfs2_glock_put(gl); } for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) { error = gfs2_glock_nq(fl_gh); if (error != GLR_TRYFAILED) break; - fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT; - fl_gh->gh_error = 0; + fl_gh->gh_flags &= ~LM_FLAG_TRY_1CB; + fl_gh->gh_flags |= LM_FLAG_TRY; msleep(sleeptime); } if (error) { - gfs2_holder_uninit(fl_gh); + __flock_holder_uninit(file, fl_gh); if (error == GLR_TRYFAILED) error = -EAGAIN; } else { @@ -1290,7 +1528,7 @@ static void do_unflock(struct file *file, struct file_lock *fl) locks_lock_file_wait(file, fl); if (gfs2_holder_initialized(fl_gh)) { gfs2_glock_dq(fl_gh); - gfs2_holder_uninit(fl_gh); + __flock_holder_uninit(file, fl_gh); } mutex_unlock(&fp->f_fl_mutex); } @@ -1308,8 +1546,6 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) { if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; - if (fl->fl_type & LOCK_MAND) - return -EOPNOTSUPP; if (fl->fl_type == F_UNLCK) { do_unflock(file, fl); @@ -1323,7 +1559,7 @@ const struct file_operations gfs2_file_fops = { .llseek = gfs2_llseek, .read_iter = gfs2_file_read_iter, .write_iter = gfs2_file_write_iter, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = gfs2_ioctl, .compat_ioctl = gfs2_compat_ioctl, .mmap = gfs2_mmap, @@ -1356,7 +1592,7 @@ const struct file_operations gfs2_file_fops_nolock = { .llseek = gfs2_llseek, .read_iter = gfs2_file_read_iter, .write_iter = gfs2_file_write_iter, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = gfs2_ioctl, .compat_ioctl = gfs2_compat_ioctl, .mmap = gfs2_mmap, |