aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/acl.c8
-rw-r--r--fs/9p/v9fs.h3
-rw-r--r--fs/9p/v9fs_vfs.h3
-rw-r--r--fs/9p/vfs_inode.c57
-rw-r--r--fs/9p/vfs_inode_dotl.c39
-rw-r--r--fs/9p/xattr.c1
-rw-r--r--fs/Kconfig6
-rw-r--r--fs/Kconfig.binfmt4
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/adfs.h3
-rw-r--r--fs/adfs/inode.c5
-rw-r--r--fs/affs/affs.h24
-rw-r--r--fs/affs/inode.c7
-rw-r--r--fs/affs/namei.c19
-rw-r--r--fs/afs/dir.c35
-rw-r--r--fs/afs/file.c1
-rw-r--r--fs/afs/fs_operation.c7
-rw-r--r--fs/afs/inode.c10
-rw-r--r--fs/afs/internal.h8
-rw-r--r--fs/afs/mntpt.c1
-rw-r--r--fs/afs/security.c3
-rw-r--r--fs/afs/xattr.c33
-rw-r--r--fs/anon_inodes.c157
-rw-r--r--fs/attr.c126
-rw-r--r--fs/autofs/root.c17
-rw-r--r--fs/bad_inode.c36
-rw-r--r--fs/bfs/dir.c12
-rw-r--r--fs/binfmt_elf.c26
-rw-r--r--fs/binfmt_elf_fdpic.c27
-rw-r--r--fs/binfmt_misc.c33
-rw-r--r--fs/block_dev.c45
-rw-r--r--fs/btrfs/Makefile19
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/backref.c17
-rw-r--r--fs/btrfs/backref.h9
-rw-r--r--fs/btrfs/block-group.c211
-rw-r--r--fs/btrfs/block-group.h28
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/check-integrity.c10
-rw-r--r--fs/btrfs/compression.c78
-rw-r--r--fs/btrfs/ctree.c11
-rw-r--r--fs/btrfs/ctree.h27
-rw-r--r--fs/btrfs/delalloc-space.c29
-rw-r--r--fs/btrfs/delayed-inode.c7
-rw-r--r--fs/btrfs/delayed-ref.c79
-rw-r--r--fs/btrfs/delayed-ref.h28
-rw-r--r--fs/btrfs/dev-replace.c186
-rw-r--r--fs/btrfs/dev-replace.h3
-rw-r--r--fs/btrfs/discard.c6
-rw-r--r--fs/btrfs/disk-io.c183
-rw-r--r--fs/btrfs/disk-io.h6
-rw-r--r--fs/btrfs/extent-tree.c382
-rw-r--r--fs/btrfs/extent_io.c845
-rw-r--r--fs/btrfs/extent_io.h17
-rw-r--r--fs/btrfs/extent_map.c18
-rw-r--r--fs/btrfs/file-item.c22
-rw-r--r--fs/btrfs/file.c72
-rw-r--r--fs/btrfs/free-space-cache.c142
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode.c465
-rw-r--r--fs/btrfs/ioctl.c75
-rw-r--r--fs/btrfs/lzo.c4
-rw-r--r--fs/btrfs/ordered-data.c224
-rw-r--r--fs/btrfs/ordered-data.h57
-rw-r--r--fs/btrfs/qgroup.c8
-rw-r--r--fs/btrfs/qgroup.h2
-rw-r--r--fs/btrfs/raid56.c41
-rw-r--r--fs/btrfs/reada.c35
-rw-r--r--fs/btrfs/ref-verify.c47
-rw-r--r--fs/btrfs/reflink.c29
-rw-r--r--fs/btrfs/relocation.c99
-rw-r--r--fs/btrfs/scrub.c156
-rw-r--r--fs/btrfs/send.c38
-rw-r--r--fs/btrfs/space-info.c365
-rw-r--r--fs/btrfs/space-info.h25
-rw-r--r--fs/btrfs/subpage.c278
-rw-r--r--fs/btrfs/subpage.h91
-rw-r--r--fs/btrfs/super.c12
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/btrfs-tests.c2
-rw-r--r--fs/btrfs/tests/extent-map-tests.c2
-rw-r--r--fs/btrfs/transaction.c152
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/btrfs/tree-checker.c16
-rw-r--r--fs/btrfs/tree-log.c285
-rw-r--r--fs/btrfs/volumes.c366
-rw-r--r--fs/btrfs/volumes.h8
-rw-r--r--fs/btrfs/xattr.c33
-rw-r--r--fs/btrfs/zlib.c5
-rw-r--r--fs/btrfs/zoned.c877
-rw-r--r--fs/btrfs/zoned.h157
-rw-r--r--fs/btrfs/zstd.c6
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/cachefiles/interface.c4
-rw-r--r--fs/cachefiles/namei.c21
-rw-r--r--fs/cachefiles/xattr.c29
-rw-r--r--fs/ceph/acl.c6
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c70
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/inode.c79
-rw-r--r--fs/ceph/snap.c10
-rw-r--r--fs/ceph/super.h52
-rw-r--r--fs/ceph/xattr.c1
-rw-r--r--fs/cifs/cifs_debug.c127
-rw-r--r--fs/cifs/cifs_swn.c4
-rw-r--r--fs/cifs/cifsacl.c382
-rw-r--r--fs/cifs/cifsacl.h4
-rw-r--r--fs/cifs/cifsencrypt.c6
-rw-r--r--fs/cifs/cifsfs.c22
-rw-r--r--fs/cifs/cifsfs.h27
-rw-r--r--fs/cifs/cifsglob.h28
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/connect.c315
-rw-r--r--fs/cifs/dfs_cache.c33
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/fs_context.c81
-rw-r--r--fs/cifs/fs_context.h6
-rw-r--r--fs/cifs/inode.c59
-rw-r--r--fs/cifs/link.c3
-rw-r--r--fs/cifs/sess.c3
-rw-r--r--fs/cifs/smb2inode.c1
-rw-r--r--fs/cifs/smb2misc.c10
-rw-r--r--fs/cifs/smb2ops.c119
-rw-r--r--fs/cifs/smb2pdu.c28
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/trace.h36
-rw-r--r--fs/cifs/transport.c72
-rw-r--r--fs/cifs/xattr.c1
-rw-r--r--fs/coda/coda_linux.h8
-rw-r--r--fs/coda/dir.c18
-rw-r--r--fs/coda/inode.c9
-rw-r--r--fs/coda/pioctl.c6
-rw-r--r--fs/compat_binfmt_elf.c7
-rw-r--r--fs/configfs/configfs_internal.h6
-rw-r--r--fs/configfs/dir.c3
-rw-r--r--fs/configfs/file.c6
-rw-r--r--fs/configfs/inode.c5
-rw-r--r--fs/configfs/symlink.c6
-rw-r--r--fs/coredump.c14
-rw-r--r--fs/cramfs/inode.c18
-rw-r--r--fs/crypto/bio.c6
-rw-r--r--fs/crypto/policy.c2
-rw-r--r--fs/dax.c5
-rw-r--r--fs/dcache.c92
-rw-r--r--fs/dcookies.c356
-rw-r--r--fs/debugfs/inode.c14
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/inode.c84
-rw-r--r--fs/ecryptfs/main.c6
-rw-r--r--fs/ecryptfs/mmap.c4
-rw-r--r--fs/efivarfs/file.c2
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/erofs/data.c30
-rw-r--r--fs/erofs/inode.c7
-rw-r--r--fs/erofs/internal.h5
-rw-r--r--fs/erofs/namei.c4
-rw-r--r--fs/erofs/super.c4
-rw-r--r--fs/erofs/xattr.c10
-rw-r--r--fs/erofs/zdata.c2
-rw-r--r--fs/erofs/zmap.c10
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c20
-rw-r--r--fs/exfat/balloc.c4
-rw-r--r--fs/exfat/exfat_fs.h10
-rw-r--r--fs/exfat/exfat_raw.h4
-rw-r--r--fs/exfat/fatent.c43
-rw-r--r--fs/exfat/file.c16
-rw-r--r--fs/exfat/namei.c14
-rw-r--r--fs/exfat/super.c31
-rw-r--r--fs/ext2/acl.c6
-rw-r--r--fs/ext2/acl.h3
-rw-r--r--fs/ext2/ext2.h5
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c15
-rw-r--r--fs/ext2/ioctl.c6
-rw-r--r--fs/ext2/namei.c22
-rw-r--r--fs/ext2/xattr_security.c1
-rw-r--r--fs/ext2/xattr_trusted.c1
-rw-r--r--fs/ext2/xattr_user.c1
-rw-r--r--fs/ext4/.kunitconfig3
-rw-r--r--fs/ext4/Kconfig3
-rw-r--r--fs/ext4/acl.c5
-rw-r--r--fs/ext4/acl.h3
-rw-r--r--fs/ext4/balloc.c38
-rw-r--r--fs/ext4/ext4.h25
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/fast_commit.c42
-rw-r--r--fs/ext4/file.c5
-rw-r--r--fs/ext4/fsync.c2
-rw-r--r--fs/ext4/ialloc.c9
-rw-r--r--fs/ext4/inode.c59
-rw-r--r--fs/ext4/ioctl.c27
-rw-r--r--fs/ext4/mballoc.c11
-rw-r--r--fs/ext4/namei.c144
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/ext4/readpage.c3
-rw-r--r--fs/ext4/super.c23
-rw-r--r--fs/ext4/sysfs.c7
-rw-r--r--fs/ext4/verity.c89
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/ext4/xattr_hurd.c1
-rw-r--r--fs/ext4/xattr_security.c1
-rw-r--r--fs/ext4/xattr_trusted.c1
-rw-r--r--fs/ext4/xattr_user.c1
-rw-r--r--fs/f2fs/Kconfig20
-rw-r--r--fs/f2fs/Makefile1
-rw-r--r--fs/f2fs/acl.c26
-rw-r--r--fs/f2fs/acl.h3
-rw-r--r--fs/f2fs/checkpoint.c179
-rw-r--r--fs/f2fs/compress.c195
-rw-r--r--fs/f2fs/data.c447
-rw-r--r--fs/f2fs/debug.c12
-rw-r--r--fs/f2fs/f2fs.h113
-rw-r--r--fs/f2fs/file.c100
-rw-r--r--fs/f2fs/gc.c8
-rw-r--r--fs/f2fs/inline.c4
-rw-r--r--fs/f2fs/namei.c31
-rw-r--r--fs/f2fs/node.c6
-rw-r--r--fs/f2fs/segment.c21
-rw-r--r--fs/f2fs/segment.h8
-rw-r--r--fs/f2fs/super.c206
-rw-r--r--fs/f2fs/sysfs.c132
-rw-r--r--fs/f2fs/trace.c165
-rw-r--r--fs/f2fs/trace.h43
-rw-r--r--fs/f2fs/xattr.c27
-rw-r--r--fs/fat/fat.h6
-rw-r--r--fs/fat/file.c26
-rw-r--r--fs/fat/misc.c23
-rw-r--r--fs/fat/namei_msdos.c12
-rw-r--r--fs/fat/namei_vfat.c15
-rw-r--r--fs/fcntl.c22
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/file.c36
-rw-r--r--fs/fs-writeback.c116
-rw-r--r--fs/fuse/acl.c3
-rw-r--r--fs/fuse/dev.c32
-rw-r--r--fs/fuse/dir.c46
-rw-r--r--fs/fuse/fuse_i.h5
-rw-r--r--fs/fuse/virtio_fs.c9
-rw-r--r--fs/fuse/xattr.c2
-rw-r--r--fs/gfs2/acl.c5
-rw-r--r--fs/gfs2/acl.h3
-rw-r--r--fs/gfs2/bmap.c10
-rw-r--r--fs/gfs2/file.c23
-rw-r--r--fs/gfs2/glock.c22
-rw-r--r--fs/gfs2/glock.h6
-rw-r--r--fs/gfs2/glops.c38
-rw-r--r--fs/gfs2/incore.h54
-rw-r--r--fs/gfs2/inode.c74
-rw-r--r--fs/gfs2/inode.h3
-rw-r--r--fs/gfs2/lock_dlm.c8
-rw-r--r--fs/gfs2/log.c531
-rw-r--r--fs/gfs2/log.h20
-rw-r--r--fs/gfs2/lops.c28
-rw-r--r--fs/gfs2/lops.h23
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/ops_fstype.c75
-rw-r--r--fs/gfs2/recovery.c14
-rw-r--r--fs/gfs2/rgrp.c442
-rw-r--r--fs/gfs2/rgrp.h6
-rw-r--r--fs/gfs2/super.c85
-rw-r--r--fs/gfs2/super.h10
-rw-r--r--fs/gfs2/trace_gfs2.h37
-rw-r--r--fs/gfs2/trans.c104
-rw-r--r--fs/gfs2/trans.h5
-rw-r--r--fs/gfs2/util.c72
-rw-r--r--fs/gfs2/util.h3
-rw-r--r--fs/gfs2/xattr.c55
-rw-r--r--fs/hfs/attr.c1
-rw-r--r--fs/hfs/dir.c13
-rw-r--r--fs/hfs/hfs_fs.h3
-rw-r--r--fs/hfs/inode.c8
-rw-r--r--fs/hfsplus/dir.c22
-rw-r--r--fs/hfsplus/hfsplus_fs.h5
-rw-r--r--fs/hfsplus/inode.c18
-rw-r--r--fs/hfsplus/ioctl.c2
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hfsplus/xattr.c1
-rw-r--r--fs/hfsplus/xattr_security.c1
-rw-r--r--fs/hfsplus/xattr_trusted.c1
-rw-r--r--fs/hfsplus/xattr_user.c1
-rw-r--r--fs/hostfs/hostfs_kern.c39
-rw-r--r--fs/hpfs/hpfs_fn.h2
-rw-r--r--fs/hpfs/inode.c7
-rw-r--r--fs/hpfs/namei.c20
-rw-r--r--fs/hugetlbfs/inode.c107
-rw-r--r--fs/init.c24
-rw-r--r--fs/inode.c87
-rw-r--r--fs/internal.h12
-rw-r--r--fs/io-wq.c656
-rw-r--r--fs/io-wq.h40
-rw-r--r--fs/io_uring.c4784
-rw-r--r--fs/iomap/buffered-io.c15
-rw-r--r--fs/iomap/direct-io.c78
-rw-r--r--fs/iomap/seek.c125
-rw-r--r--fs/iomap/swapfile.c10
-rw-r--r--fs/isofs/dir.c1
-rw-r--r--fs/isofs/inode.c9
-rw-r--r--fs/isofs/namei.c1
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jffs2/acl.c6
-rw-r--r--fs/jffs2/acl.h3
-rw-r--r--fs/jffs2/compr_rtime.c3
-rw-r--r--fs/jffs2/dir.c33
-rw-r--r--fs/jffs2/fs.c7
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/security.c1
-rw-r--r--fs/jffs2/summary.c3
-rw-r--r--fs/jffs2/xattr_trusted.c1
-rw-r--r--fs/jffs2/xattr_user.c1
-rw-r--r--fs/jfs/acl.c5
-rw-r--r--fs/jfs/file.c9
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_acl.h3
-rw-r--r--fs/jfs/jfs_dmap.c2
-rw-r--r--fs/jfs/jfs_filsys.h1
-rw-r--r--fs/jfs/jfs_inode.c2
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/jfs_mount.c10
-rw-r--r--fs/jfs/jfs_txnmgr.c35
-rw-r--r--fs/jfs/namei.c21
-rw-r--r--fs/jfs/super.c1
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/kernfs/dir.c6
-rw-r--r--fs/kernfs/inode.c19
-rw-r--r--fs/kernfs/kernfs-internal.h9
-rw-r--r--fs/libfs.c43
-rw-r--r--fs/lockd/svc4proc.c24
-rw-r--r--fs/lockd/svcproc.c24
-rw-r--r--fs/locks.c3
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/minix/file.c7
-rw-r--r--fs/minix/inode.c6
-rw-r--r--fs/minix/minix.h3
-rw-r--r--fs/minix/namei.c24
-rw-r--r--fs/mount.h10
-rw-r--r--fs/mpage.c6
-rw-r--r--fs/namei.c600
-rw-r--r--fs/namespace.c537
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/blocklayout/blocklayout.c11
-rw-r--r--fs/nfs/callback_xdr.c2
-rw-r--r--fs/nfs/dir.c83
-rw-r--r--fs/nfs/export.c18
-rw-r--r--fs/nfs/file.c27
-rw-r--r--fs/nfs/fs_context.c35
-rw-r--r--fs/nfs/fscache.c4
-rw-r--r--fs/nfs/inode.c127
-rw-r--r--fs/nfs/internal.h17
-rw-r--r--fs/nfs/namespace.c15
-rw-r--r--fs/nfs/nfs3_fs.h3
-rw-r--r--fs/nfs/nfs3acl.c4
-rw-r--r--fs/nfs/nfs3xdr.c3
-rw-r--r--fs/nfs/nfs42proc.c12
-rw-r--r--fs/nfs/nfs4client.c1
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4proc.c57
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfs/read.c204
-rw-r--r--fs/nfs/super.c19
-rw-r--r--fs/nfs/unlink.c6
-rw-r--r--fs/nfs/write.c45
-rw-r--r--fs/nfs_common/Makefile2
-rw-r--r--fs/nfs_common/nfs_ssc.c2
-rw-r--r--fs/nfs_common/nfsacl.c52
-rw-r--r--fs/nfsd/Kconfig2
-rw-r--r--fs/nfsd/blocklayout.c2
-rw-r--r--fs/nfsd/export.c80
-rw-r--r--fs/nfsd/export.h15
-rw-r--r--fs/nfsd/filecache.c2
-rw-r--r--fs/nfsd/netns.h23
-rw-r--r--fs/nfsd/nfs2acl.c73
-rw-r--r--fs/nfsd/nfs3acl.c51
-rw-r--r--fs/nfsd/nfs3proc.c93
-rw-r--r--fs/nfsd/nfs3xdr.c582
-rw-r--r--fs/nfsd/nfs4acl.c5
-rw-r--r--fs/nfsd/nfs4callback.c1
-rw-r--r--fs/nfsd/nfs4proc.c14
-rw-r--r--fs/nfsd/nfs4recover.c6
-rw-r--r--fs/nfsd/nfs4state.c179
-rw-r--r--fs/nfsd/nfscache.c52
-rw-r--r--fs/nfsd/nfsctl.c22
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfsfh.c7
-rw-r--r--fs/nfsd/nfsfh.h5
-rw-r--r--fs/nfsd/nfsproc.c94
-rw-r--r--fs/nfsd/nfssvc.c34
-rw-r--r--fs/nfsd/nfsxdr.c350
-rw-r--r--fs/nfsd/state.h3
-rw-r--r--fs/nfsd/stats.c114
-rw-r--r--fs/nfsd/stats.h96
-rw-r--r--fs/nfsd/vfs.c54
-rw-r--r--fs/nfsd/xdr.h12
-rw-r--r--fs/nfsd/xdr3.h20
-rw-r--r--fs/nilfs2/inode.c14
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/namei.c19
-rw-r--r--fs/nilfs2/nilfs.h6
-rw-r--r--fs/nilfs2/segbuf.c6
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/notify/fanotify/fanotify_user.c4
-rw-r--r--fs/notify/group.c25
-rw-r--r--fs/notify/inotify/inotify_user.c6
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ntfs/inode.h3
-rw-r--r--fs/ntfs/layout.h4
-rw-r--r--fs/ocfs2/acl.c6
-rw-r--r--fs/ocfs2/acl.h3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c8
-rw-r--r--fs/ocfs2/dlm/dlmast.c10
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c17
-rw-r--r--fs/ocfs2/file.c20
-rw-r--r--fs/ocfs2/file.h11
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/ocfs2/namei.c21
-rw-r--r--fs/ocfs2/refcounttree.c6
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/omfs/dir.c13
-rw-r--r--fs/omfs/file.c7
-rw-r--r--fs/omfs/inode.c2
-rw-r--r--fs/open.c41
-rw-r--r--fs/orangefs/acl.c6
-rw-r--r--fs/orangefs/file.c5
-rw-r--r--fs/orangefs/inode.c20
-rw-r--r--fs/orangefs/namei.c12
-rw-r--r--fs/orangefs/orangefs-kernel.h13
-rw-r--r--fs/orangefs/xattr.c1
-rw-r--r--fs/overlayfs/copy_up.c22
-rw-r--r--fs/overlayfs/dir.c31
-rw-r--r--fs/overlayfs/file.c6
-rw-r--r--fs/overlayfs/inode.c27
-rw-r--r--fs/overlayfs/overlayfs.h45
-rw-r--r--fs/overlayfs/super.c21
-rw-r--r--fs/overlayfs/util.c4
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/posix_acl.c103
-rw-r--r--fs/proc/base.c47
-rw-r--r--fs/proc/fd.c5
-rw-r--r--fs/proc/fd.h3
-rw-r--r--fs/proc/generic.c12
-rw-r--r--fs/proc/internal.h6
-rw-r--r--fs/proc/meminfo.c10
-rw-r--r--fs/proc/proc_net.c5
-rw-r--r--fs/proc/proc_sysctl.c19
-rw-r--r--fs/proc/root.c5
-rw-r--r--fs/proc/self.c7
-rw-r--r--fs/proc/task_mmu.c11
-rw-r--r--fs/proc/vmcore.c7
-rw-r--r--fs/proc_namespace.c3
-rw-r--r--fs/pstore/inode.c2
-rw-r--r--fs/pstore/platform.c4
-rw-r--r--fs/pstore/ram_core.c2
-rw-r--r--fs/pstore/zone.c2
-rw-r--r--fs/quota/quota_v2.c11
-rw-r--r--fs/ramfs/file-nommu.c9
-rw-r--r--fs/ramfs/inode.c31
-rw-r--r--fs/read_write.c19
-rw-r--r--fs/reiserfs/acl.h3
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/inode.c7
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/namei.c21
-rw-r--r--fs/reiserfs/reiserfs.h3
-rw-r--r--fs/reiserfs/xattr.c13
-rw-r--r--fs/reiserfs/xattr.h3
-rw-r--r--fs/reiserfs/xattr_acl.c8
-rw-r--r--fs/reiserfs/xattr_security.c3
-rw-r--r--fs/reiserfs/xattr_trusted.c3
-rw-r--r--fs/reiserfs/xattr_user.c3
-rw-r--r--fs/remap_range.c7
-rw-r--r--fs/select.c10
-rw-r--r--fs/seq_file.c5
-rw-r--r--fs/splice.c53
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--fs/stat.c26
-rw-r--r--fs/statfs.c5
-rw-r--r--fs/super.c15
-rw-r--r--fs/sysfs/file.c11
-rw-r--r--fs/sysv/file.c7
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/itree.c6
-rw-r--r--fs/sysv/namei.c21
-rw-r--r--fs/sysv/sysv.h3
-rw-r--r--fs/tracefs/inode.c4
-rw-r--r--fs/ubifs/auth.c2
-rw-r--r--fs/ubifs/dir.c30
-rw-r--r--fs/ubifs/file.c5
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/replay.c4
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/ubifs/ubifs.h5
-rw-r--r--fs/ubifs/xattr.c3
-rw-r--r--fs/udf/file.c9
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c9
-rw-r--r--fs/udf/namei.c24
-rw-r--r--fs/udf/super.c9
-rw-r--r--fs/udf/symlink.c7
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c7
-rw-r--r--fs/ufs/namei.c19
-rw-r--r--fs/ufs/ufs.h3
-rw-r--r--fs/userfaultfd.c19
-rw-r--r--fs/utimes.c3
-rw-r--r--fs/vboxsf/dir.c12
-rw-r--r--fs/vboxsf/utils.c9
-rw-r--r--fs/vboxsf/vfsmod.h8
-rw-r--r--fs/verity/Makefile1
-rw-r--r--fs/verity/enable.c2
-rw-r--r--fs/verity/fsverity_private.h13
-rw-r--r--fs/verity/open.c133
-rw-r--r--fs/verity/read_metadata.c195
-rw-r--r--fs/verity/signature.c20
-rw-r--r--fs/xattr.c139
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c50
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h3
-rw-r--r--fs/xfs/libxfs/xfs_attr.c22
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c315
-rw-r--r--fs/xfs/libxfs/xfs_btree.c45
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c2
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h6
-rw-r--r--fs/xfs/libxfs/xfs_fs.h1
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c27
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h63
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/scrub/common.c4
-rw-r--r--fs/xfs/xfs_acl.c5
-rw-r--r--fs/xfs/xfs_acl.h3
-rw-r--r--fs/xfs/xfs_aops.c17
-rw-r--r--fs/xfs/xfs_bio_io.c2
-rw-r--r--fs/xfs/xfs_bmap_item.c10
-rw-r--r--fs/xfs/xfs_bmap_util.c81
-rw-r--r--fs/xfs/xfs_buf.c34
-rw-r--r--fs/xfs/xfs_buf.h11
-rw-r--r--fs/xfs/xfs_dquot.c47
-rw-r--r--fs/xfs/xfs_error.c6
-rw-r--r--fs/xfs/xfs_extent_busy.c14
-rw-r--r--fs/xfs/xfs_file.c443
-rw-r--r--fs/xfs/xfs_fsops.c32
-rw-r--r--fs/xfs/xfs_fsops.h4
-rw-r--r--fs/xfs/xfs_globals.c7
-rw-r--r--fs/xfs/xfs_icache.c438
-rw-r--r--fs/xfs/xfs_icache.h24
-rw-r--r--fs/xfs/xfs_inode.c170
-rw-r--r--fs/xfs/xfs_inode.h16
-rw-r--r--fs/xfs/xfs_ioctl.c104
-rw-r--r--fs/xfs/xfs_ioctl32.c13
-rw-r--r--fs/xfs/xfs_iomap.c82
-rw-r--r--fs/xfs/xfs_iops.c129
-rw-r--r--fs/xfs/xfs_iops.h3
-rw-r--r--fs/xfs/xfs_itable.c23
-rw-r--r--fs/xfs/xfs_itable.h1
-rw-r--r--fs/xfs/xfs_iwalk.c5
-rw-r--r--fs/xfs/xfs_linux.h3
-rw-r--r--fs/xfs/xfs_log.c142
-rw-r--r--fs/xfs/xfs_log.h4
-rw-r--r--fs/xfs/xfs_mount.c133
-rw-r--r--fs/xfs/xfs_mount.h10
-rw-r--r--fs/xfs/xfs_mru_cache.c2
-rw-r--r--fs/xfs/xfs_pwork.c25
-rw-r--r--fs/xfs/xfs_pwork.h4
-rw-r--r--fs/xfs/xfs_qm.c119
-rw-r--r--fs/xfs/xfs_quota.h49
-rw-r--r--fs/xfs/xfs_reflink.c103
-rw-r--r--fs/xfs/xfs_rtalloc.c5
-rw-r--r--fs/xfs/xfs_super.c86
-rw-r--r--fs/xfs/xfs_super.h6
-rw-r--r--fs/xfs/xfs_symlink.c23
-rw-r--r--fs/xfs/xfs_symlink.h5
-rw-r--r--fs/xfs/xfs_sysctl.c40
-rw-r--r--fs/xfs/xfs_sysctl.h3
-rw-r--r--fs/xfs/xfs_trace.c1
-rw-r--r--fs/xfs/xfs_trace.h72
-rw-r--r--fs/xfs/xfs_trans.c222
-rw-r--r--fs/xfs/xfs_trans.h43
-rw-r--r--fs/xfs/xfs_trans_dquot.c71
-rw-r--r--fs/xfs/xfs_xattr.c7
-rw-r--r--fs/zonefs/Makefile2
-rw-r--r--fs/zonefs/super.c139
-rw-r--r--fs/zonefs/trace.h104
592 files changed, 18752 insertions, 11265 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 6261719f6f2a..bb1b286c49ae 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -239,6 +239,7 @@ static int v9fs_xattr_get_acl(const struct xattr_handler *handler,
}
static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -258,7 +259,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EPERM;
if (value) {
/* update the cached acl value */
@@ -279,7 +280,8 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
struct iattr iattr = { 0 };
struct posix_acl *old_acl = acl;
- retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
+ retval = posix_acl_update_mode(&init_user_ns, inode,
+ &iattr.ia_mode, &acl);
if (retval)
goto err_out;
if (!acl) {
@@ -297,7 +299,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
* What is the following setxattr update the
* mode ?
*/
- v9fs_vfs_setattr_dotl(dentry, &iattr);
+ v9fs_vfs_setattr_dotl(&init_user_ns, dentry, &iattr);
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 7b763776306e..4ca56c5dd637 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -135,7 +135,8 @@ extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
-extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+extern int v9fs_vfs_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index fd2a2b040250..d44ade76966a 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -59,7 +59,8 @@ void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
int v9fs_uflags2omode(int uflags, int extended);
void v9fs_blank_wstat(struct p9_wstat *wstat);
-int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *);
+int v9fs_vfs_setattr_dotl(struct user_namespace *, struct dentry *,
+ struct iattr *);
int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
int datasync);
int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 4a937fac1acb..8d97f0b45e9c 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -251,7 +251,7 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
{
int err = 0;
- inode_init_owner(inode, NULL, mode);
+ inode_init_owner(&init_user_ns,inode, NULL, mode);
inode->i_blocks = 0;
inode->i_rdev = rdev;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
@@ -676,8 +676,8 @@ error:
*/
static int
-v9fs_vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+v9fs_vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
u32 perm = unixmode2p9mode(v9ses, mode);
@@ -702,7 +702,8 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
*
*/
-static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int v9fs_vfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int err;
u32 perm;
@@ -907,9 +908,9 @@ int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
*/
int
-v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+v9fs_vfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
int retval;
struct inode *old_inode;
@@ -1016,8 +1017,8 @@ done:
*/
static int
-v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+v9fs_vfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct v9fs_session_info *v9ses;
@@ -1027,7 +1028,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- generic_fillattr(d_inode(dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(dentry), stat);
return 0;
}
fid = v9fs_fid_lookup(dentry);
@@ -1040,7 +1041,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
return PTR_ERR(st);
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
- generic_fillattr(d_inode(dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(dentry), stat);
p9stat_free(st);
kfree(st);
@@ -1054,7 +1055,8 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
*
*/
-static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
+static int v9fs_vfs_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *iattr)
{
int retval, use_dentry = 0;
struct v9fs_session_info *v9ses;
@@ -1062,7 +1064,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
struct p9_wstat wstat;
p9_debug(P9_DEBUG_VFS, "\n");
- retval = setattr_prepare(dentry, iattr);
+ retval = setattr_prepare(&init_user_ns, dentry, iattr);
if (retval)
return retval;
@@ -1118,7 +1120,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
v9fs_invalidate_inode_attr(d_inode(dentry));
- setattr_copy(d_inode(dentry), iattr);
+ setattr_copy(&init_user_ns, d_inode(dentry), iattr);
mark_inode_dirty(d_inode(dentry));
return 0;
}
@@ -1137,9 +1139,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct super_block *sb, unsigned int flags)
{
umode_t mode;
- char ext[32];
- char tag_name[14];
- unsigned int i_nlink;
struct v9fs_session_info *v9ses = sb->s_fs_info;
struct v9fs_inode *v9inode = V9FS_I(inode);
@@ -1157,18 +1156,18 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
inode->i_gid = stat->n_gid;
}
if ((S_ISREG(inode->i_mode)) || (S_ISDIR(inode->i_mode))) {
- if (v9fs_proto_dotu(v9ses) && (stat->extension[0] != '\0')) {
+ if (v9fs_proto_dotu(v9ses)) {
+ unsigned int i_nlink;
/*
- * Hadlink support got added later to
- * to the .u extension. So there can be
- * server out there that doesn't support
- * this even with .u extension. So check
- * for non NULL stat->extension
+ * Hadlink support got added later to the .u extension.
+ * So there can be a server out there that doesn't
+ * support this even with .u extension. That would
+ * just leave us with stat->extension being an empty
+ * string, though.
*/
- strlcpy(ext, stat->extension, sizeof(ext));
/* HARDLINKCOUNT %u */
- sscanf(ext, "%13s %u", tag_name, &i_nlink);
- if (!strncmp(tag_name, "HARDLINKCOUNT", 13))
+ if (sscanf(stat->extension,
+ " HARDLINKCOUNT %u", &i_nlink) == 1)
set_nlink(inode, i_nlink);
}
}
@@ -1295,7 +1294,8 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
*/
static int
-v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+v9fs_vfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
p9_debug(P9_DEBUG_VFS, " %lu,%pd,%s\n",
dir->i_ino, dentry, symname);
@@ -1348,7 +1348,8 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
*/
static int
-v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
+v9fs_vfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
int retval;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 823c2eb5f1bf..1dc7af046615 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -33,8 +33,8 @@
#include "acl.h"
static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
- dev_t rdev);
+v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t omode, dev_t rdev);
/**
* v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
@@ -218,10 +218,10 @@ int v9fs_open_to_dotl_flags(int flags)
*/
static int
-v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
- bool excl)
+v9fs_vfs_create_dotl(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t omode, bool excl)
{
- return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
+ return v9fs_vfs_mknod_dotl(mnt_userns, dir, dentry, omode, 0);
}
static int
@@ -367,8 +367,9 @@ err_clunk_old_fid:
*
*/
-static int v9fs_vfs_mkdir_dotl(struct inode *dir,
- struct dentry *dentry, umode_t omode)
+static int v9fs_vfs_mkdir_dotl(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ umode_t omode)
{
int err;
struct v9fs_session_info *v9ses;
@@ -457,8 +458,9 @@ error:
}
static int
-v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+v9fs_vfs_getattr_dotl(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct v9fs_session_info *v9ses;
@@ -468,7 +470,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
- generic_fillattr(d_inode(dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(dentry), stat);
return 0;
}
fid = v9fs_fid_lookup(dentry);
@@ -485,7 +487,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
return PTR_ERR(st);
v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
- generic_fillattr(d_inode(dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(dentry), stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
@@ -540,7 +542,8 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
*
*/
-int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
+int v9fs_vfs_setattr_dotl(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *iattr)
{
int retval, use_dentry = 0;
struct p9_fid *fid = NULL;
@@ -549,7 +552,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
p9_debug(P9_DEBUG_VFS, "\n");
- retval = setattr_prepare(dentry, iattr);
+ retval = setattr_prepare(&init_user_ns, dentry, iattr);
if (retval)
return retval;
@@ -590,7 +593,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
truncate_setsize(inode, iattr->ia_size);
v9fs_invalidate_inode_attr(inode);
- setattr_copy(inode, iattr);
+ setattr_copy(&init_user_ns, inode, iattr);
mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE) {
/* We also want to update ACL when we update mode bits */
@@ -684,8 +687,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
}
static int
-v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
- const char *symname)
+v9fs_vfs_symlink_dotl(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
int err;
kgid_t gid;
@@ -824,8 +827,8 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
*
*/
static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
- dev_t rdev)
+v9fs_vfs_mknod_dotl(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t omode, dev_t rdev)
{
int err;
kgid_t gid;
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 87217dd0433e..ee331845e2c7 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -157,6 +157,7 @@ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
}
static int v9fs_xattr_handler_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/Kconfig b/fs/Kconfig
index da524c4d7b7e..a55bda4233bb 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -203,7 +203,7 @@ config TMPFS_XATTR
config TMPFS_INODE64
bool "Use 64-bit ino_t by default in tmpfs"
- depends on TMPFS && 64BIT && !(S390 || ALPHA)
+ depends on TMPFS && 64BIT
default n
help
tmpfs has historically used only inode numbers as wide as an unsigned
@@ -333,6 +333,10 @@ config NFS_COMMON
depends on NFSD || NFS_FS || LOCKD
default y
+config NFS_V4_2_SSC_HELPER
+ tristate
+ default y if NFS_V4=y || NFS_FS=y
+
source "net/sunrpc/Kconfig"
source "fs/ceph/Kconfig"
source "fs/cifs/Kconfig"
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 885da6d983b4..c6f1c8c1934e 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -29,7 +29,7 @@ config BINFMT_ELF
latest version).
config COMPAT_BINFMT_ELF
- bool
+ def_bool y
depends on COMPAT && BINFMT_ELF
select ELFCORE
@@ -45,7 +45,7 @@ config ARCH_USE_GNU_PROPERTY
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y if !BINFMT_ELF
- depends on (ARM || (SUPERH && !MMU) || C6X)
+ depends on (ARM || (SUPERH && !MMU))
select ELFCORE
help
ELF FDPIC binaries are based on ELF, but allow the individual load
diff --git a/fs/Makefile b/fs/Makefile
index 999d1a23f036..3215fe205256 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -64,7 +64,6 @@ obj-$(CONFIG_SYSFS) += sysfs/
obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-y += devpts/
-obj-$(CONFIG_PROFILING) += dcookies.o
obj-$(CONFIG_DLM) += dlm/
# Do not add any filesystems before this line
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 699c4fa8b78b..06b7c92343ad 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -144,7 +144,8 @@ struct adfs_discmap {
/* Inode stuff */
struct inode *adfs_iget(struct super_block *sb, struct object_info *obj);
int adfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-int adfs_notify_change(struct dentry *dentry, struct iattr *attr);
+int adfs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
/* map.c */
int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset);
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 32620f4a7623..fb7ee026d101 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -292,14 +292,15 @@ out:
* later.
*/
int
-adfs_notify_change(struct dentry *dentry, struct iattr *attr)
+adfs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
unsigned int ia_valid = attr->ia_valid;
int error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
/*
* we can't change the UID or GID of any file -
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index a755bef7c4c7..bfa89e131ead 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -167,27 +167,33 @@ extern const struct export_operations affs_export_ops;
extern int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len);
extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int);
extern int affs_unlink(struct inode *dir, struct dentry *dentry);
-extern int affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool);
-extern int affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+extern int affs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool);
+extern int affs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode);
extern int affs_rmdir(struct inode *dir, struct dentry *dentry);
extern int affs_link(struct dentry *olddentry, struct inode *dir,
struct dentry *dentry);
-extern int affs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname);
-extern int affs_rename2(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags);
+extern int affs_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ const char *symname);
+extern int affs_rename2(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags);
/* inode.c */
extern struct inode *affs_new_inode(struct inode *dir);
-extern int affs_notify_change(struct dentry *dentry, struct iattr *attr);
+extern int affs_notify_change(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr);
extern void affs_evict_inode(struct inode *inode);
extern struct inode *affs_iget(struct super_block *sb,
unsigned long ino);
extern int affs_write_inode(struct inode *inode,
struct writeback_control *wbc);
-extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type);
+extern int affs_add_entry(struct inode *dir, struct inode *inode,
+ struct dentry *dentry, s32 type);
/* file.c */
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 044412110b52..2352a75bd9d6 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -216,14 +216,15 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc)
}
int
-affs_notify_change(struct dentry *dentry, struct iattr *attr)
+affs_notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
pr_debug("notify_change(%lu,0x%x)\n", inode->i_ino, attr->ia_valid);
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
goto out;
@@ -249,7 +250,7 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr)
affs_truncate(inode);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
if (attr->ia_valid & ATTR_MODE)
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 41c5749f4db7..bcab18956b4f 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -242,7 +242,8 @@ affs_unlink(struct inode *dir, struct dentry *dentry)
}
int
-affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
+affs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -273,7 +274,8 @@ affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
}
int
-affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+affs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int error;
@@ -311,7 +313,8 @@ affs_rmdir(struct inode *dir, struct dentry *dentry)
}
int
-affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+affs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
@@ -460,8 +463,10 @@ affs_xrename(struct inode *old_dir, struct dentry *old_dentry,
return -EIO;
bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino);
- if (!bh_new)
+ if (!bh_new) {
+ affs_brelse(bh_old);
return -EIO;
+ }
/* Remove old header from its parent directory. */
affs_lock_dir(old_dir);
@@ -498,9 +503,9 @@ done:
return retval;
}
-int affs_rename2(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+int affs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 7bd659ad959e..17548c1faf02 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -28,18 +28,19 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int
loff_t fpos, u64 ino, unsigned dtype);
static int afs_lookup_filldir(struct dir_context *ctx, const char *name, int nlen,
loff_t fpos, u64 ino, unsigned dtype);
-static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl);
-static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl);
+static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode);
static int afs_rmdir(struct inode *dir, struct dentry *dentry);
static int afs_unlink(struct inode *dir, struct dentry *dentry);
static int afs_link(struct dentry *from, struct inode *dir,
struct dentry *dentry);
-static int afs_symlink(struct inode *dir, struct dentry *dentry,
- const char *content);
-static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags);
+static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *content);
+static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags);
static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags);
static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
@@ -69,7 +70,6 @@ const struct inode_operations afs_dir_inode_operations = {
.permission = afs_permission,
.getattr = afs_getattr,
.setattr = afs_setattr,
- .listxattr = afs_listxattr,
};
const struct address_space_operations afs_dir_aops = {
@@ -1325,7 +1325,8 @@ static const struct afs_operation_ops afs_mkdir_operation = {
/*
* create a directory on an AFS filesystem
*/
-static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
@@ -1619,8 +1620,8 @@ static const struct afs_operation_ops afs_create_operation = {
/*
* create a regular file on an AFS filesystem
*/
-static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
@@ -1741,8 +1742,8 @@ static const struct afs_operation_ops afs_symlink_operation = {
/*
* create a symlink in an AFS filesystem
*/
-static int afs_symlink(struct inode *dir, struct dentry *dentry,
- const char *content)
+static int afs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *content)
{
struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
@@ -1876,9 +1877,9 @@ static const struct afs_operation_ops afs_rename_operation = {
/*
* rename a file in an AFS filesystem and/or move it between directories
*/
-static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct afs_operation *op;
struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 85f5adf21aa0..960b64268623 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -43,7 +43,6 @@ const struct inode_operations afs_file_inode_operations = {
.getattr = afs_getattr,
.setattr = afs_setattr,
.permission = afs_permission,
- .listxattr = afs_listxattr,
};
const struct address_space_operations afs_fs_aops = {
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
index 97cab12b0a6c..71c58723763d 100644
--- a/fs/afs/fs_operation.c
+++ b/fs/afs/fs_operation.c
@@ -181,10 +181,13 @@ void afs_wait_for_operation(struct afs_operation *op)
if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
op->ops->issue_yfs_rpc)
op->ops->issue_yfs_rpc(op);
- else
+ else if (op->ops->issue_afs_rpc)
op->ops->issue_afs_rpc(op);
+ else
+ op->ac.error = -ENOTSUPP;
- op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
+ if (op->call)
+ op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
}
switch (op->error) {
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index b0d7b892090d..12be88716e4c 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -27,7 +27,6 @@
static const struct inode_operations afs_symlink_inode_operations = {
.get_link = page_get_link,
- .listxattr = afs_listxattr,
};
static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
@@ -734,8 +733,8 @@ error_unlock:
/*
* read the attributes of an inode
*/
-int afs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct afs_vnode *vnode = AFS_FS_I(inode);
@@ -745,7 +744,7 @@ int afs_getattr(const struct path *path, struct kstat *stat,
do {
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) &&
stat->nlink > 0)
stat->nlink -= 1;
@@ -857,7 +856,8 @@ static const struct afs_operation_ops afs_setattr_operation = {
/*
* set the attributes of an inode
*/
-int afs_setattr(struct dentry *dentry, struct iattr *attr)
+int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 0d150a29e39e..1627b1872812 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -1149,8 +1149,9 @@ extern struct inode *afs_iget(struct afs_operation *, struct afs_vnode_param *);
extern struct inode *afs_root_iget(struct super_block *, struct key *);
extern bool afs_check_validity(struct afs_vnode *);
extern int afs_validate(struct afs_vnode *, struct key *);
-extern int afs_getattr(const struct path *, struct kstat *, u32, unsigned int);
-extern int afs_setattr(struct dentry *, struct iattr *);
+extern int afs_getattr(struct user_namespace *mnt_userns, const struct path *,
+ struct kstat *, u32, unsigned int);
+extern int afs_setattr(struct user_namespace *mnt_userns, struct dentry *, struct iattr *);
extern void afs_evict_inode(struct inode *);
extern int afs_drop_inode(struct inode *);
@@ -1361,7 +1362,7 @@ extern void afs_zap_permits(struct rcu_head *);
extern struct key *afs_request_key(struct afs_cell *);
extern struct key *afs_request_key_rcu(struct afs_cell *);
extern int afs_check_permit(struct afs_vnode *, struct key *, afs_access_t *);
-extern int afs_permission(struct inode *, int);
+extern int afs_permission(struct user_namespace *, struct inode *, int);
extern void __exit afs_clean_up_permit_cache(void);
/*
@@ -1508,7 +1509,6 @@ extern int afs_launder_page(struct page *);
* xattr.c
*/
extern const struct xattr_handler *afs_xattr_handlers[];
-extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
/*
* yfsclient.c
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 052dab2f5c03..bbb2c210d139 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -32,7 +32,6 @@ const struct inode_operations afs_mntpt_inode_operations = {
.lookup = afs_mntpt_lookup,
.readlink = page_readlink,
.getattr = afs_getattr,
- .listxattr = afs_listxattr,
};
const struct inode_operations afs_autocell_inode_operations = {
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 9cf3102f370c..3c7a8fc4f93f 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -396,7 +396,8 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key,
* - AFS ACLs are attached to directories only, and a file is controlled by its
* parent directory's ACL
*/
-int afs_permission(struct inode *inode, int mask)
+int afs_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
afs_access_t access;
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 95c573dcda11..7751b0b3f81d 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -11,29 +11,6 @@
#include <linux/xattr.h>
#include "internal.h"
-static const char afs_xattr_list[] =
- "afs.acl\0"
- "afs.cell\0"
- "afs.fid\0"
- "afs.volume\0"
- "afs.yfs.acl\0"
- "afs.yfs.acl_inherited\0"
- "afs.yfs.acl_num_cleaned\0"
- "afs.yfs.vol_acl";
-
-/*
- * Retrieve a list of the supported xattrs.
- */
-ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
- if (size == 0)
- return sizeof(afs_xattr_list);
- if (size < sizeof(afs_xattr_list))
- return -ERANGE;
- memcpy(buffer, afs_xattr_list, sizeof(afs_xattr_list));
- return sizeof(afs_xattr_list);
-}
-
/*
* Deal with the result of a successful fetch ACL operation.
*/
@@ -120,6 +97,7 @@ static const struct afs_operation_ops afs_store_acl_operation = {
* Set a file's AFS3 ACL.
*/
static int afs_xattr_set_acl(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry,
struct inode *inode, const char *name,
const void *buffer, size_t size, int flags)
@@ -230,6 +208,8 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
else
ret = -ERANGE;
}
+ } else if (ret == -ENOTSUPP) {
+ ret = -ENODATA;
}
error_yacl:
@@ -248,12 +228,14 @@ static const struct afs_operation_ops yfs_store_opaque_acl2_operation = {
* Set a file's YFS ACL.
*/
static int afs_xattr_set_yfs(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry,
struct inode *inode, const char *name,
const void *buffer, size_t size, int flags)
{
struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
+ int ret;
if (flags == XATTR_CREATE ||
strcmp(name, "acl") != 0)
@@ -268,7 +250,10 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
return afs_put_operation(op);
op->ops = &yfs_store_opaque_acl2_operation;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ if (ret == -ENOTSUPP)
+ ret = -ENODATA;
+ return ret;
}
static const struct xattr_handler afs_xattr_yfs_handler = {
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 89714308c25b..a280156138ed 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -55,61 +55,79 @@ static struct file_system_type anon_inode_fs_type = {
.kill_sb = kill_anon_super,
};
-/**
- * anon_inode_getfile - creates a new file instance by hooking it up to an
- * anonymous inode, and a dentry that describe the "class"
- * of the file
- *
- * @name: [in] name of the "class" of the new file
- * @fops: [in] file operations for the new file
- * @priv: [in] private data for the new file (will be file's private_data)
- * @flags: [in] flags
- *
- * Creates a new file by hooking it on a single inode. This is useful for files
- * that do not need to have a full-fledged inode in order to operate correctly.
- * All the files created with anon_inode_getfile() will share a single inode,
- * hence saving memory and avoiding code duplication for the file/inode/dentry
- * setup. Returns the newly created file* or an error pointer.
- */
-struct file *anon_inode_getfile(const char *name,
- const struct file_operations *fops,
- void *priv, int flags)
+static struct inode *anon_inode_make_secure_inode(
+ const char *name,
+ const struct inode *context_inode)
{
- struct file *file;
+ struct inode *inode;
+ const struct qstr qname = QSTR_INIT(name, strlen(name));
+ int error;
+
+ inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+ if (IS_ERR(inode))
+ return inode;
+ inode->i_flags &= ~S_PRIVATE;
+ error = security_inode_init_security_anon(inode, &qname, context_inode);
+ if (error) {
+ iput(inode);
+ return ERR_PTR(error);
+ }
+ return inode;
+}
- if (IS_ERR(anon_inode_inode))
- return ERR_PTR(-ENODEV);
+static struct file *__anon_inode_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode,
+ bool secure)
+{
+ struct inode *inode;
+ struct file *file;
if (fops->owner && !try_module_get(fops->owner))
return ERR_PTR(-ENOENT);
- /*
- * We know the anon_inode inode count is always greater than zero,
- * so ihold() is safe.
- */
- ihold(anon_inode_inode);
- file = alloc_file_pseudo(anon_inode_inode, anon_inode_mnt, name,
+ if (secure) {
+ inode = anon_inode_make_secure_inode(name, context_inode);
+ if (IS_ERR(inode)) {
+ file = ERR_CAST(inode);
+ goto err;
+ }
+ } else {
+ inode = anon_inode_inode;
+ if (IS_ERR(inode)) {
+ file = ERR_PTR(-ENODEV);
+ goto err;
+ }
+ /*
+ * We know the anon_inode inode count is always
+ * greater than zero, so ihold() is safe.
+ */
+ ihold(inode);
+ }
+
+ file = alloc_file_pseudo(inode, anon_inode_mnt, name,
flags & (O_ACCMODE | O_NONBLOCK), fops);
if (IS_ERR(file))
- goto err;
+ goto err_iput;
- file->f_mapping = anon_inode_inode->i_mapping;
+ file->f_mapping = inode->i_mapping;
file->private_data = priv;
return file;
+err_iput:
+ iput(inode);
err:
- iput(anon_inode_inode);
module_put(fops->owner);
return file;
}
-EXPORT_SYMBOL_GPL(anon_inode_getfile);
/**
- * anon_inode_getfd - creates a new file instance by hooking it up to an
- * anonymous inode, and a dentry that describe the "class"
- * of the file
+ * anon_inode_getfile - creates a new file instance by hooking it up to an
+ * anonymous inode, and a dentry that describe the "class"
+ * of the file
*
* @name: [in] name of the "class" of the new file
* @fops: [in] file operations for the new file
@@ -118,12 +136,23 @@ EXPORT_SYMBOL_GPL(anon_inode_getfile);
*
* Creates a new file by hooking it on a single inode. This is useful for files
* that do not need to have a full-fledged inode in order to operate correctly.
- * All the files created with anon_inode_getfd() will share a single inode,
+ * All the files created with anon_inode_getfile() will share a single inode,
* hence saving memory and avoiding code duplication for the file/inode/dentry
- * setup. Returns new descriptor or an error code.
+ * setup. Returns the newly created file* or an error pointer.
*/
-int anon_inode_getfd(const char *name, const struct file_operations *fops,
- void *priv, int flags)
+struct file *anon_inode_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags)
+{
+ return __anon_inode_getfile(name, fops, priv, flags, NULL, false);
+}
+EXPORT_SYMBOL_GPL(anon_inode_getfile);
+
+static int __anon_inode_getfd(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode,
+ bool secure)
{
int error, fd;
struct file *file;
@@ -133,7 +162,8 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops,
return error;
fd = error;
- file = anon_inode_getfile(name, fops, priv, flags);
+ file = __anon_inode_getfile(name, fops, priv, flags, context_inode,
+ secure);
if (IS_ERR(file)) {
error = PTR_ERR(file);
goto err_put_unused_fd;
@@ -146,8 +176,55 @@ err_put_unused_fd:
put_unused_fd(fd);
return error;
}
+
+/**
+ * anon_inode_getfd - creates a new file instance by hooking it up to
+ * an anonymous inode and a dentry that describe
+ * the "class" of the file
+ *
+ * @name: [in] name of the "class" of the new file
+ * @fops: [in] file operations for the new file
+ * @priv: [in] private data for the new file (will be file's private_data)
+ * @flags: [in] flags
+ *
+ * Creates a new file by hooking it on a single inode. This is
+ * useful for files that do not need to have a full-fledged inode in
+ * order to operate correctly. All the files created with
+ * anon_inode_getfd() will use the same singleton inode, reducing
+ * memory use and avoiding code duplication for the file/inode/dentry
+ * setup. Returns a newly created file descriptor or an error code.
+ */
+int anon_inode_getfd(const char *name, const struct file_operations *fops,
+ void *priv, int flags)
+{
+ return __anon_inode_getfd(name, fops, priv, flags, NULL, false);
+}
EXPORT_SYMBOL_GPL(anon_inode_getfd);
+/**
+ * anon_inode_getfd_secure - Like anon_inode_getfd(), but creates a new
+ * !S_PRIVATE anon inode rather than reuse the singleton anon inode, and calls
+ * the inode_init_security_anon() LSM hook. This allows the inode to have its
+ * own security context and for a LSM to reject creation of the inode.
+ *
+ * @name: [in] name of the "class" of the new file
+ * @fops: [in] file operations for the new file
+ * @priv: [in] private data for the new file (will be file's private_data)
+ * @flags: [in] flags
+ * @context_inode:
+ * [in] the logical relationship with the new inode (optional)
+ *
+ * The LSM may use @context_inode in inode_init_security_anon(), but a
+ * reference to it is not held.
+ */
+int anon_inode_getfd_secure(const char *name, const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode)
+{
+ return __anon_inode_getfd(name, fops, priv, flags, context_inode, true);
+}
+EXPORT_SYMBOL_GPL(anon_inode_getfd_secure);
+
static int __init anon_inode_init(void)
{
anon_inode_mnt = kern_mount(&anon_inode_fs_type);
diff --git a/fs/attr.c b/fs/attr.c
index b4bbdbd4c8ca..87ef39db1c34 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -18,27 +18,55 @@
#include <linux/evm.h>
#include <linux/ima.h>
-static bool chown_ok(const struct inode *inode, kuid_t uid)
+/**
+ * chown_ok - verify permissions to chown inode
+ * @mnt_userns: user namespace of the mount @inode was found from
+ * @inode: inode to check permissions on
+ * @uid: uid to chown @inode to
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ */
+static bool chown_ok(struct user_namespace *mnt_userns,
+ const struct inode *inode,
+ kuid_t uid)
{
- if (uid_eq(current_fsuid(), inode->i_uid) &&
- uid_eq(uid, inode->i_uid))
+ kuid_t kuid = i_uid_into_mnt(mnt_userns, inode);
+ if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, kuid))
return true;
- if (capable_wrt_inode_uidgid(inode, CAP_CHOWN))
+ if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
return true;
- if (uid_eq(inode->i_uid, INVALID_UID) &&
+ if (uid_eq(kuid, INVALID_UID) &&
ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN))
return true;
return false;
}
-static bool chgrp_ok(const struct inode *inode, kgid_t gid)
+/**
+ * chgrp_ok - verify permissions to chgrp inode
+ * @mnt_userns: user namespace of the mount @inode was found from
+ * @inode: inode to check permissions on
+ * @gid: gid to chown @inode to
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ */
+static bool chgrp_ok(struct user_namespace *mnt_userns,
+ const struct inode *inode, kgid_t gid)
{
- if (uid_eq(current_fsuid(), inode->i_uid) &&
- (in_group_p(gid) || gid_eq(gid, inode->i_gid)))
+ kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
+ if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)) &&
+ (in_group_p(gid) || gid_eq(gid, kgid)))
return true;
- if (capable_wrt_inode_uidgid(inode, CAP_CHOWN))
+ if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
return true;
- if (gid_eq(inode->i_gid, INVALID_GID) &&
+ if (gid_eq(kgid, INVALID_GID) &&
ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN))
return true;
return false;
@@ -46,6 +74,7 @@ static bool chgrp_ok(const struct inode *inode, kgid_t gid)
/**
* setattr_prepare - check if attribute changes to a dentry are allowed
+ * @mnt_userns: user namespace of the mount the inode was found from
* @dentry: dentry to check
* @attr: attributes to change
*
@@ -55,10 +84,17 @@ static bool chgrp_ok(const struct inode *inode, kgid_t gid)
* SGID bit from mode if user is not allowed to set it. Also file capabilities
* and IMA extended attributes are cleared if ATTR_KILL_PRIV is set.
*
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ *
* Should be called as the first thing in ->setattr implementations,
* possibly after taking additional locks.
*/
-int setattr_prepare(struct dentry *dentry, struct iattr *attr)
+int setattr_prepare(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
unsigned int ia_valid = attr->ia_valid;
@@ -78,27 +114,27 @@ int setattr_prepare(struct dentry *dentry, struct iattr *attr)
goto kill_priv;
/* Make sure a caller can chown. */
- if ((ia_valid & ATTR_UID) && !chown_ok(inode, attr->ia_uid))
+ if ((ia_valid & ATTR_UID) && !chown_ok(mnt_userns, inode, attr->ia_uid))
return -EPERM;
/* Make sure caller can chgrp. */
- if ((ia_valid & ATTR_GID) && !chgrp_ok(inode, attr->ia_gid))
+ if ((ia_valid & ATTR_GID) && !chgrp_ok(mnt_userns, inode, attr->ia_gid))
return -EPERM;
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EPERM;
/* Also check the setgid bit! */
- if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
- inode->i_gid) &&
- !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
+ i_gid_into_mnt(mnt_userns, inode)) &&
+ !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) {
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EPERM;
}
@@ -107,7 +143,7 @@ kill_priv:
if (ia_valid & ATTR_KILL_PRIV) {
int error;
- error = security_inode_killpriv(dentry);
+ error = security_inode_killpriv(mnt_userns, dentry);
if (error)
return error;
}
@@ -162,20 +198,33 @@ EXPORT_SYMBOL(inode_newsize_ok);
/**
* setattr_copy - copy simple metadata updates into the generic inode
+ * @mnt_userns: user namespace of the mount the inode was found from
* @inode: the inode to be updated
* @attr: the new attributes
*
* setattr_copy must be called with i_mutex held.
*
* setattr_copy updates the inode's metadata with that specified
- * in attr. Noticeably missing is inode size update, which is more complex
+ * in attr on idmapped mounts. If file ownership is changed setattr_copy
+ * doesn't map ia_uid and ia_gid. It will asssume the caller has already
+ * provided the intended values. Necessary permission checks to determine
+ * whether or not the S_ISGID property needs to be removed are performed with
+ * the correct idmapped mount permission helpers.
+ * Noticeably missing is inode size update, which is more complex
* as it requires pagecache updates.
*
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ *
* The inode is not marked as dirty after this operation. The rationale is
* that for "simple" filesystems, the struct inode is the inode storage.
* The caller is free to mark the inode dirty afterwards if needed.
*/
-void setattr_copy(struct inode *inode, const struct iattr *attr)
+void setattr_copy(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
@@ -191,9 +240,9 @@ void setattr_copy(struct inode *inode, const struct iattr *attr)
inode->i_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
-
- if (!in_group_p(inode->i_gid) &&
- !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
+ if (!in_group_p(kgid) &&
+ !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
mode &= ~S_ISGID;
inode->i_mode = mode;
}
@@ -202,6 +251,7 @@ EXPORT_SYMBOL(setattr_copy);
/**
* notify_change - modify attributes of a filesytem object
+ * @mnt_userns: user namespace of the mount the inode was found from
* @dentry: object affected
* @attr: new attributes
* @delegated_inode: returns inode, if the inode is delegated
@@ -214,13 +264,23 @@ EXPORT_SYMBOL(setattr_copy);
* retry. Because breaking a delegation may take a long time, the
* caller should drop the i_mutex before doing so.
*
+ * If file ownership is changed notify_change() doesn't map ia_uid and
+ * ia_gid. It will asssume the caller has already provided the intended values.
+ *
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported. Also, passing NULL is fine for callers holding
* the file open for write, as there can be no conflicting delegation in
* that case.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
*/
-int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **delegated_inode)
+int notify_change(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr, struct inode **delegated_inode)
{
struct inode *inode = dentry->d_inode;
umode_t mode = inode->i_mode;
@@ -243,8 +303,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
if (IS_IMMUTABLE(inode))
return -EPERM;
- if (!inode_owner_or_capable(inode)) {
- error = inode_permission(inode, MAY_WRITE);
+ if (!inode_owner_or_capable(mnt_userns, inode)) {
+ error = inode_permission(mnt_userns, inode, MAY_WRITE);
if (error)
return error;
}
@@ -320,9 +380,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
/* Don't allow modifications of files with invalid uids or
* gids unless those uids & gids are being made valid.
*/
- if (!(ia_valid & ATTR_UID) && !uid_valid(inode->i_uid))
+ if (!(ia_valid & ATTR_UID) &&
+ !uid_valid(i_uid_into_mnt(mnt_userns, inode)))
return -EOVERFLOW;
- if (!(ia_valid & ATTR_GID) && !gid_valid(inode->i_gid))
+ if (!(ia_valid & ATTR_GID) &&
+ !gid_valid(i_gid_into_mnt(mnt_userns, inode)))
return -EOVERFLOW;
error = security_inode_setattr(dentry, attr);
@@ -333,13 +395,13 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
return error;
if (inode->i_op->setattr)
- error = inode->i_op->setattr(dentry, attr);
+ error = inode->i_op->setattr(mnt_userns, dentry, attr);
else
- error = simple_setattr(dentry, attr);
+ error = simple_setattr(mnt_userns, dentry, attr);
if (!error) {
fsnotify_change(dentry, ia_valid);
- ima_inode_post_setattr(dentry);
+ ima_inode_post_setattr(mnt_userns, dentry);
evm_inode_post_setattr(dentry, ia_valid);
}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 5aaa1732bf1e..91fe4548c256 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -10,10 +10,12 @@
#include "autofs_i.h"
-static int autofs_dir_symlink(struct inode *, struct dentry *, const char *);
+static int autofs_dir_symlink(struct user_namespace *, struct inode *,
+ struct dentry *, const char *);
static int autofs_dir_unlink(struct inode *, struct dentry *);
static int autofs_dir_rmdir(struct inode *, struct dentry *);
-static int autofs_dir_mkdir(struct inode *, struct dentry *, umode_t);
+static int autofs_dir_mkdir(struct user_namespace *, struct inode *,
+ struct dentry *, umode_t);
static long autofs_root_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long autofs_root_compat_ioctl(struct file *,
@@ -524,9 +526,9 @@ static struct dentry *autofs_lookup(struct inode *dir,
return NULL;
}
-static int autofs_dir_symlink(struct inode *dir,
- struct dentry *dentry,
- const char *symname)
+static int autofs_dir_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ const char *symname)
{
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_info *ino = autofs_dentry_ino(dentry);
@@ -715,8 +717,9 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
return 0;
}
-static int autofs_dir_mkdir(struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static int autofs_dir_mkdir(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_info *ino = autofs_dentry_ino(dentry);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 54f0ce444272..48e16144c1f7 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -27,8 +27,9 @@ static const struct file_operations bad_file_ops =
.open = bad_file_open,
};
-static int bad_inode_create (struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
+static int bad_inode_create(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
{
return -EIO;
}
@@ -50,14 +51,15 @@ static int bad_inode_unlink(struct inode *dir, struct dentry *dentry)
return -EIO;
}
-static int bad_inode_symlink (struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int bad_inode_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ const char *symname)
{
return -EIO;
}
-static int bad_inode_mkdir(struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static int bad_inode_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
return -EIO;
}
@@ -67,13 +69,14 @@ static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
return -EIO;
}
-static int bad_inode_mknod (struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int bad_inode_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
return -EIO;
}
-static int bad_inode_rename2(struct inode *old_dir, struct dentry *old_dentry,
+static int bad_inode_rename2(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -86,18 +89,21 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
return -EIO;
}
-static int bad_inode_permission(struct inode *inode, int mask)
+static int bad_inode_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
return -EIO;
}
-static int bad_inode_getattr(const struct path *path, struct kstat *stat,
+static int bad_inode_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
return -EIO;
}
-static int bad_inode_setattr(struct dentry *direntry, struct iattr *attrs)
+static int bad_inode_setattr(struct user_namespace *mnt_userns,
+ struct dentry *direntry, struct iattr *attrs)
{
return -EIO;
}
@@ -140,13 +146,15 @@ static int bad_inode_atomic_open(struct inode *inode, struct dentry *dentry,
return -EIO;
}
-static int bad_inode_tmpfile(struct inode *inode, struct dentry *dentry,
+static int bad_inode_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *inode, struct dentry *dentry,
umode_t mode)
{
return -EIO;
}
-static int bad_inode_set_acl(struct inode *inode, struct posix_acl *acl,
+static int bad_inode_set_acl(struct user_namespace *mnt_userns,
+ struct inode *inode, struct posix_acl *acl,
int type)
{
return -EIO;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index d8dfe3a0cb39..34d4f68f786b 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -75,8 +75,8 @@ const struct file_operations bfs_dir_operations = {
.llseek = generic_file_llseek,
};
-static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int bfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
int err;
struct inode *inode;
@@ -96,7 +96,7 @@ static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
set_bit(ino, info->si_imap);
info->si_freei--;
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
inode->i_blocks = 0;
inode->i_op = &bfs_file_inops;
@@ -199,9 +199,9 @@ out_brelse:
return error;
}
-static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int bfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct inode *old_inode, *new_inode;
struct buffer_head *old_bh, *new_bh;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 950bc177238a..b12ba98ae9f5 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -186,6 +186,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
unsigned char k_rand_bytes[16];
int items;
elf_addr_t *elf_info;
+ elf_addr_t flags = 0;
int ei_index;
const struct cred *cred = current_cred();
struct vm_area_struct *vma;
@@ -260,7 +261,9 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
NEW_AUX_ENT(AT_BASE, interp_load_addr);
- NEW_AUX_ENT(AT_FLAGS, 0);
+ if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0)
+ flags |= AT_FLAGS_PRESERVE_ARGV0;
+ NEW_AUX_ENT(AT_FLAGS, flags);
NEW_AUX_ENT(AT_ENTRY, e_entry);
NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
@@ -1495,7 +1498,7 @@ static void fill_note(struct memelfnote *note, const char *name, int type,
* fill up all the fields in prstatus from the given task struct, except
* registers which need to be filled up separately.
*/
-static void fill_prstatus(struct elf_prstatus *prstatus,
+static void fill_prstatus(struct elf_prstatus_common *prstatus,
struct task_struct *p, long signr)
{
prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
@@ -1717,11 +1720,11 @@ static void do_thread_regset_writeback(struct task_struct *task,
}
#ifndef PRSTATUS_SIZE
-#define PRSTATUS_SIZE(S, R) sizeof(S)
+#define PRSTATUS_SIZE sizeof(struct elf_prstatus)
#endif
#ifndef SET_PR_FPVALID
-#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
+#define SET_PR_FPVALID(S) ((S)->pr_fpvalid = 1)
#endif
static int fill_thread_core_info(struct elf_thread_core_info *t,
@@ -1729,7 +1732,6 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
long signr, size_t *total)
{
unsigned int i;
- int regset0_size;
/*
* NT_PRSTATUS is the one special case, because the regset data
@@ -1737,14 +1739,12 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
* than being the whole note contents. We fill the reset in here.
* We assume that regset 0 is NT_PRSTATUS.
*/
- fill_prstatus(&t->prstatus, t->task, signr);
- regset0_size = regset_get(t->task, &view->regsets[0],
+ fill_prstatus(&t->prstatus.common, t->task, signr);
+ regset_get(t->task, &view->regsets[0],
sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
- if (regset0_size < 0)
- return 0;
fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
- PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
+ PRSTATUS_SIZE, &t->prstatus);
*total += notesize(&t->notes[0]);
do_thread_regset_writeback(t->task, &view->regsets[0]);
@@ -1772,7 +1772,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
continue;
if (is_fpreg)
- SET_PR_FPVALID(&t->prstatus, 1, regset0_size);
+ SET_PR_FPVALID(&t->prstatus);
fill_note(&t->notes[i], is_fpreg ? "CORE" : "LINUX",
note_type, ret, data);
@@ -1961,7 +1961,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
struct task_struct *p = t->thread;
t->num_notes = 0;
- fill_prstatus(&t->prstatus, p, signr);
+ fill_prstatus(&t->prstatus.common, p, signr);
elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
@@ -2040,7 +2040,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
}
/* now collect the dump for the current */
memset(info->prstatus, 0, sizeof(*info->prstatus));
- fill_prstatus(info->prstatus, current, siginfo->si_signo);
+ fill_prstatus(&info->prstatus->common, current, siginfo->si_signo);
elf_core_copy_regs(&info->prstatus->pr_reg, regs);
/* Set up header */
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index be4062b8ba75..3cfd6cd46f26 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -506,6 +506,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
char __user *u_platform, *u_base_platform, *p;
int loop;
int nr; /* reset for each csp adjustment */
+ unsigned long flags = 0;
#ifdef CONFIG_MMU
/* In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
@@ -648,7 +649,9 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec_params->hdr.e_phnum);
NEW_AUX_ENT(AT_BASE, interp_params->elfhdr_addr);
- NEW_AUX_ENT(AT_FLAGS, 0);
+ if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0)
+ flags |= AT_FLAGS_PRESERVE_ARGV0;
+ NEW_AUX_ENT(AT_FLAGS, flags);
NEW_AUX_ENT(AT_ENTRY, exec_params->entry_addr);
NEW_AUX_ENT(AT_UID, (elf_addr_t) from_kuid_munged(cred->user_ns, cred->uid));
NEW_AUX_ENT(AT_EUID, (elf_addr_t) from_kuid_munged(cred->user_ns, cred->euid));
@@ -1191,18 +1194,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
struct elf_prstatus_fdpic
{
- struct elf_siginfo pr_info; /* Info associated with signal */
- short pr_cursig; /* Current signal */
- unsigned long pr_sigpend; /* Set of pending signals */
- unsigned long pr_sighold; /* Set of held signals */
- pid_t pr_pid;
- pid_t pr_ppid;
- pid_t pr_pgrp;
- pid_t pr_sid;
- struct __kernel_old_timeval pr_utime; /* User time */
- struct __kernel_old_timeval pr_stime; /* System time */
- struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
- struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
+ struct elf_prstatus_common common;
elf_gregset_t pr_reg; /* GP registers */
/* When using FDPIC, the loadmap addresses need to be communicated
* to GDB in order for GDB to do the necessary relocations. The
@@ -1301,7 +1293,7 @@ static inline void fill_note(struct memelfnote *note, const char *name, int type
* fill up all the fields in prstatus from the given task struct, except
* registers which need to be filled up separately.
*/
-static void fill_prstatus(struct elf_prstatus_fdpic *prstatus,
+static void fill_prstatus(struct elf_prstatus_common *prstatus,
struct task_struct *p, long signr)
{
prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
@@ -1332,9 +1324,6 @@ static void fill_prstatus(struct elf_prstatus_fdpic *prstatus,
}
prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
-
- prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
- prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
}
static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
@@ -1405,7 +1394,9 @@ static struct elf_thread_status *elf_dump_thread_status(long signr, struct task_
if (!t)
return t;
- fill_prstatus(&t->prstatus, p, signr);
+ fill_prstatus(&t->prstatus.common, p, signr);
+ t->prstatus.pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
+ t->prstatus.pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
regset_get(p, &view->regsets[0],
sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 3880a82da1dc..e1eae7ea823a 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -153,7 +153,9 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
goto ret;
- if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
+ if (fmt->flags & MISC_FMT_PRESERVE_ARGV0) {
+ bprm->interp_flags |= BINPRM_FLAGS_PRESERVE_ARGV0;
+ } else {
retval = remove_arg_zero(bprm);
if (retval)
goto ret;
@@ -647,12 +649,24 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
struct super_block *sb = file_inode(file)->i_sb;
struct dentry *root = sb->s_root, *dentry;
int err = 0;
+ struct file *f = NULL;
e = create_entry(buffer, count);
if (IS_ERR(e))
return PTR_ERR(e);
+ if (e->flags & MISC_FMT_OPEN_FILE) {
+ f = open_exec(e->interpreter);
+ if (IS_ERR(f)) {
+ pr_notice("register: failed to install interpreter file %s\n",
+ e->interpreter);
+ kfree(e);
+ return PTR_ERR(f);
+ }
+ e->interp_file = f;
+ }
+
inode_lock(d_inode(root));
dentry = lookup_one_len(e->name, root, strlen(e->name));
err = PTR_ERR(dentry);
@@ -676,21 +690,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
goto out2;
}
- if (e->flags & MISC_FMT_OPEN_FILE) {
- struct file *f;
-
- f = open_exec(e->interpreter);
- if (IS_ERR(f)) {
- err = PTR_ERR(f);
- pr_notice("register: failed to install interpreter file %s\n", e->interpreter);
- simple_release_fs(&bm_mnt, &entry_count);
- iput(inode);
- inode = NULL;
- goto out2;
- }
- e->interp_file = f;
- }
-
e->dentry = dget(dentry);
inode->i_private = e;
inode->i_fop = &bm_entry_operations;
@@ -707,6 +706,8 @@ out:
inode_unlock(d_inode(root));
if (err) {
+ if (f)
+ filp_close(f, NULL);
kfree(e);
return err;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 235b5042672e..92ed7d5df677 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -118,15 +118,23 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
if (!(mode & FMODE_EXCL)) {
int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
if (err)
- return err;
+ goto invalidate;
}
truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, truncate_bdev_range);
return 0;
+
+invalidate:
+ /*
+ * Someone else has handle exclusively open. Try invalidating instead.
+ * The 'end' argument is inclusive so the rounding is safe.
+ */
+ return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
+ lstart >> PAGE_SHIFT,
+ lend >> PAGE_SHIFT);
}
-EXPORT_SYMBOL(truncate_bdev_range);
static void set_init_blocksize(struct block_device *bdev)
{
@@ -222,7 +230,7 @@ static void blkdev_bio_end_io_simple(struct bio *bio)
static ssize_t
__blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
- int nr_pages)
+ unsigned int nr_pages)
{
struct file *file = iocb->ki_filp;
struct block_device *bdev = I_BDEV(bdev_file_inode(file));
@@ -356,8 +364,8 @@ static void blkdev_bio_end_io(struct bio *bio)
}
}
-static ssize_t
-__blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
+static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ unsigned int nr_pages)
{
struct file *file = iocb->ki_filp;
struct inode *inode = bdev_file_inode(file);
@@ -424,7 +432,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
- nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
+ nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
if (!nr_pages) {
bool polled = false;
@@ -487,15 +495,16 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
static ssize_t
blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
- int nr_pages;
+ unsigned int nr_pages;
- nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES + 1);
- if (!nr_pages)
+ if (!iov_iter_count(iter))
return 0;
- if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES)
+
+ nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
+ if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_VECS)
return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
- return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES));
+ return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
}
static __init int blkdev_init(void)
@@ -688,7 +697,7 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
* i_mutex and doing so causes performance issues with concurrent
* O_SYNC writers to a block device.
*/
- error = blkdev_issue_flush(bdev, GFP_KERNEL);
+ error = blkdev_issue_flush(bdev);
if (error == -EOPNOTSUPP)
error = 0;
@@ -1270,7 +1279,7 @@ rescan:
return ret;
}
/*
- * Only exported for for loop and dasd for historic reasons. Don't use in new
+ * Only exported for loop and dasd for historic reasons. Don't use in new
* code!
*/
EXPORT_SYMBOL_GPL(bdev_disk_changed);
@@ -1808,13 +1817,11 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
return error;
/*
- * Invalidate again; if someone wandered in and dirtied a page,
- * the caller will be given -EBUSY. The third argument is
- * inclusive, so the rounding here is safe.
+ * Invalidate the page cache again; if someone wandered in and dirtied
+ * a page, we just discard it - userspace has no way of knowing whether
+ * the write happened before or after discard completing...
*/
- return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
- start >> PAGE_SHIFT,
- end >> PAGE_SHIFT);
+ return truncate_bdev_range(bdev, file->f_mode, start, end);
}
const struct file_operations def_blk_fops = {
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 9f1b1a88e317..b634c42115ea 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -1,5 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
+# Subset of W=1 warnings
+subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
+subdir-ccflags-y += -Wmissing-declarations
+subdir-ccflags-y += -Wmissing-format-attribute
+subdir-ccflags-y += -Wmissing-prototypes
+subdir-ccflags-y += -Wold-style-definition
+subdir-ccflags-y += -Wmissing-include-dirs
+subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
+subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
+subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
+subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
+# The following turn off the warnings enabled by -Wextra
+subdir-ccflags-y += -Wno-missing-field-initializers
+subdir-ccflags-y += -Wno-sign-compare
+subdir-ccflags-y += -Wno-type-limits
+
obj-$(CONFIG_BTRFS_FS) := btrfs.o
btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
@@ -11,7 +27,8 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
- block-rsv.o delalloc-space.o block-group.o discard.o reflink.o
+ block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \
+ subpage.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index a0af1b952c4d..d95eb5c8cb37 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -107,13 +107,15 @@ out:
return ret;
}
-int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int ret;
umode_t old_mode = inode->i_mode;
if (type == ACL_TYPE_ACCESS && acl) {
- ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ ret = posix_acl_update_mode(&init_user_ns, inode,
+ &inode->i_mode, &acl);
if (ret)
return ret;
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 9cadacf3ec27..f47c1528eb9a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1501,7 +1501,13 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
}
/**
- * btrfs_check_shared - tell us whether an extent is shared
+ * Check if an extent is shared or not
+ *
+ * @root: root inode belongs to
+ * @inum: inode number of the inode whose extent we are checking
+ * @bytenr: logical bytenr of the extent we are checking
+ * @roots: list of roots this extent is shared among
+ * @tmp: temporary list used for iteration
*
* btrfs_check_shared uses the backref walking code but will short
* circuit as soon as it finds a root or inode that doesn't match the
@@ -2541,13 +2547,6 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
list_del(&edge->list[UPPER]);
btrfs_backref_free_edge(cache, edge);
- if (RB_EMPTY_NODE(&upper->rb_node)) {
- BUG_ON(!list_empty(&node->upper));
- btrfs_backref_drop_node(cache, node);
- node = upper;
- node->lowest = 1;
- continue;
- }
/*
* Add the node to leaf node list if no other child block
* cached.
@@ -2624,7 +2623,7 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
/* Only reloc backref cache cares about a specific root */
if (cache->is_reloc) {
root = find_reloc_root(cache->fs_info, cur->bytenr);
- if (WARN_ON(!root))
+ if (!root)
return -ENOENT;
cur->root = root;
} else {
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index ff705cc564a9..17abde7f794c 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -296,6 +296,9 @@ static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
struct btrfs_backref_node *node)
{
if (node) {
+ ASSERT(list_empty(&node->list));
+ ASSERT(list_empty(&node->lower));
+ ASSERT(node->eb == NULL);
cache->nr_nodes--;
btrfs_put_root(node->root);
kfree(node);
@@ -340,11 +343,11 @@ static inline void btrfs_backref_drop_node_buffer(
static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
struct btrfs_backref_node *node)
{
- BUG_ON(!list_empty(&node->upper));
+ ASSERT(list_empty(&node->upper));
btrfs_backref_drop_node_buffer(node);
- list_del(&node->list);
- list_del(&node->lower);
+ list_del_init(&node->list);
+ list_del_init(&node->lower);
if (!RB_EMPTY_NODE(&node->rb_node))
rb_erase(&node->rb_node, &tree->rb_root);
btrfs_backref_free_node(tree, node);
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 48ebc106a606..744b99ddc28c 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -15,6 +15,7 @@
#include "delalloc-space.h"
#include "discard.h"
#include "raid56.h"
+#include "zoned.h"
/*
* Return target flags in extended format or 0 if restripe for this chunk_type
@@ -724,6 +725,10 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
struct btrfs_caching_control *caching_ctl = NULL;
int ret = 0;
+ /* Allocator for zoned filesystems does not use the cache at all */
+ if (btrfs_is_zoned(fs_info))
+ return 0;
+
caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
if (!caching_ctl)
return -ENOMEM;
@@ -896,6 +901,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
btrfs_return_cluster_to_free_space(block_group, cluster);
spin_unlock(&cluster->refill_lock);
+ btrfs_clear_treelog_bg(block_group);
+
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
@@ -1008,12 +1015,17 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
WARN_ON(block_group->space_info->total_bytes
< block_group->length);
WARN_ON(block_group->space_info->bytes_readonly
- < block_group->length);
+ < block_group->length - block_group->zone_unusable);
+ WARN_ON(block_group->space_info->bytes_zone_unusable
+ < block_group->zone_unusable);
WARN_ON(block_group->space_info->disk_total
< block_group->length * factor);
}
block_group->space_info->total_bytes -= block_group->length;
- block_group->space_info->bytes_readonly -= block_group->length;
+ block_group->space_info->bytes_readonly -=
+ (block_group->length - block_group->zone_unusable);
+ block_group->space_info->bytes_zone_unusable -=
+ block_group->zone_unusable;
block_group->space_info->disk_total -= block_group->length * factor;
spin_unlock(&block_group->space_info->lock);
@@ -1150,6 +1162,11 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
spin_lock(&sinfo->lock);
spin_lock(&cache->lock);
+ if (cache->swap_extents) {
+ ret = -ETXTBSY;
+ goto out;
+ }
+
if (cache->ro) {
cache->ro++;
ret = 0;
@@ -1157,7 +1174,7 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
}
num_bytes = cache->length - cache->reserved - cache->pinned -
- cache->bytes_super - cache->used;
+ cache->bytes_super - cache->zone_unusable - cache->used;
/*
* Data never overcommits, even in mixed mode, so do just the straight
@@ -1188,6 +1205,12 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
if (!ret) {
sinfo->bytes_readonly += num_bytes;
+ if (btrfs_is_zoned(cache->fs_info)) {
+ /* Migrate zone_unusable bytes to readonly */
+ sinfo->bytes_readonly += cache->zone_unusable;
+ sinfo->bytes_zone_unusable -= cache->zone_unusable;
+ cache->zone_unusable = 0;
+ }
cache->ro++;
list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
}
@@ -1262,6 +1285,13 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
+ /*
+ * Long running balances can keep us blocked here for eternity, so
+ * simply skip deletion if we're unable to get the mutex.
+ */
+ if (!mutex_trylock(&fs_info->delete_unused_bgs_mutex))
+ return;
+
spin_lock(&fs_info->unused_bgs_lock);
while (!list_empty(&fs_info->unused_bgs)) {
int trimming;
@@ -1281,8 +1311,6 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
- mutex_lock(&fs_info->delete_unused_bgs_mutex);
-
/* Don't want to race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
@@ -1371,9 +1399,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
btrfs_space_info_update_bytes_pinned(fs_info, space_info,
-block_group->pinned);
space_info->bytes_readonly += block_group->pinned;
- percpu_counter_add_batch(&space_info->total_bytes_pinned,
- -block_group->pinned,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
+ __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
block_group->pinned = 0;
spin_unlock(&block_group->lock);
@@ -1389,8 +1415,12 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
goto flip_async;
- /* DISCARD can flip during remount */
- trimming = btrfs_test_opt(fs_info, DISCARD_SYNC);
+ /*
+ * DISCARD can flip during remount. On zoned filesystems, we
+ * need to reset sequential-required zones.
+ */
+ trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) ||
+ btrfs_is_zoned(fs_info);
/* Implicit trim during transaction commit. */
if (trimming)
@@ -1428,11 +1458,11 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
end_trans:
btrfs_end_transaction(trans);
next:
- mutex_unlock(&fs_info->delete_unused_bgs_mutex);
btrfs_put_block_group(block_group);
spin_lock(&fs_info->unused_bgs_lock);
}
spin_unlock(&fs_info->unused_bgs_lock);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
return;
flip_async:
@@ -1561,8 +1591,11 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
/**
- * btrfs_rmap_block - Map a physical disk address to a list of logical addresses
+ * Map a physical disk address to a list of logical addresses
+ *
+ * @fs_info: the filesystem
* @chunk_start: logical address of block group
+ * @bdev: physical device to resolve, can be NULL to indicate any device
* @physical: physical address to map to logical addresses
* @logical: return array of logical addresses which map to @physical
* @naddrs: length of @logical
@@ -1572,9 +1605,9 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
* Used primarily to exclude those portions of a block group that contain super
* block copies.
*/
-EXPORT_FOR_TESTS
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
- u64 physical, u64 **logical, int *naddrs, int *stripe_len)
+ struct block_device *bdev, u64 physical, u64 **logical,
+ int *naddrs, int *stripe_len)
{
struct extent_map *em;
struct map_lookup *map;
@@ -1592,6 +1625,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
map = em->map_lookup;
data_stripe_length = em->orig_block_len;
io_stripe_size = map->stripe_len;
+ chunk_start = em->start;
/* For RAID5/6 adjust to a full IO stripe length */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
@@ -1606,14 +1640,18 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
for (i = 0; i < map->num_stripes; i++) {
bool already_inserted = false;
u64 stripe_nr;
+ u64 offset;
int j;
if (!in_range(physical, map->stripes[i].physical,
data_stripe_length))
continue;
+ if (bdev && map->stripes[i].dev->bdev != bdev)
+ continue;
+
stripe_nr = physical - map->stripes[i].physical;
- stripe_nr = div64_u64(stripe_nr, map->stripe_len);
+ stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
stripe_nr = stripe_nr * map->num_stripes + i;
@@ -1627,7 +1665,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
* instead of map->stripe_len
*/
- bytenr = chunk_start + stripe_nr * io_stripe_size;
+ bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
/* Ensure we don't add duplicate addresses */
for (j = 0; j < nr; j++) {
@@ -1669,7 +1707,7 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- ret = btrfs_rmap_block(fs_info, cache->start,
+ ret = btrfs_rmap_block(fs_info, cache->start, NULL,
bytenr, &logical, &nr, &stripe_len);
if (ret)
return ret;
@@ -1805,24 +1843,8 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
return ret;
}
-static void read_block_group_item(struct btrfs_block_group *cache,
- struct btrfs_path *path,
- const struct btrfs_key *key)
-{
- struct extent_buffer *leaf = path->nodes[0];
- struct btrfs_block_group_item bgi;
- int slot = path->slots[0];
-
- cache->length = key->offset;
-
- read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
- sizeof(bgi));
- cache->used = btrfs_stack_block_group_used(&bgi);
- cache->flags = btrfs_stack_block_group_flags(&bgi);
-}
-
static int read_one_block_group(struct btrfs_fs_info *info,
- struct btrfs_path *path,
+ struct btrfs_block_group_item *bgi,
const struct btrfs_key *key,
int need_clear)
{
@@ -1837,7 +1859,9 @@ static int read_one_block_group(struct btrfs_fs_info *info,
if (!cache)
return -ENOMEM;
- read_block_group_item(cache, path, key);
+ cache->length = key->offset;
+ cache->used = btrfs_stack_block_group_used(bgi);
+ cache->flags = btrfs_stack_block_group_flags(bgi);
set_free_space_tree_thresholds(cache);
@@ -1864,6 +1888,13 @@ static int read_one_block_group(struct btrfs_fs_info *info,
goto error;
}
+ ret = btrfs_load_block_group_zone_info(cache, false);
+ if (ret) {
+ btrfs_err(info, "zoned: failed to load zone info of bg %llu",
+ cache->start);
+ goto error;
+ }
+
/*
* We need to exclude the super stripes now so that the space info has
* super bytes accounted for, otherwise we'll think we have more space
@@ -1877,12 +1908,20 @@ static int read_one_block_group(struct btrfs_fs_info *info,
}
/*
- * Check for two cases, either we are full, and therefore don't need
- * to bother with the caching work since we won't find any space, or we
- * are empty, and we can just add all the space in and be done with it.
- * This saves us _a_lot_ of time, particularly in the full case.
+ * For zoned filesystem, space after the allocation offset is the only
+ * free space for a block group. So, we don't need any caching work.
+ * btrfs_calc_zone_unusable() will set the amount of free space and
+ * zone_unusable space.
+ *
+ * For regular filesystem, check for two cases, either we are full, and
+ * therefore don't need to bother with the caching work since we won't
+ * find any space, or we are empty, and we can just add all the space
+ * in and be done with it. This saves us _a_lot_ of time, particularly
+ * in the full case.
*/
- if (cache->length == cache->used) {
+ if (btrfs_is_zoned(info)) {
+ btrfs_calc_zone_unusable(cache);
+ } else if (cache->length == cache->used) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
btrfs_free_excluded_extents(cache);
@@ -1901,7 +1940,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
}
trace_btrfs_add_block_group(info, cache, 0);
btrfs_update_space_info(info, cache->flags, cache->length,
- cache->used, cache->bytes_super, &space_info);
+ cache->used, cache->bytes_super,
+ cache->zone_unusable, &space_info);
cache->space_info = space_info;
@@ -1957,7 +1997,7 @@ static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
break;
}
btrfs_update_space_info(fs_info, bg->flags, em->len, em->len,
- 0, &space_info);
+ 0, 0, &space_info);
bg->space_info = space_info;
link_block_group(bg);
@@ -1996,19 +2036,29 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
need_clear = 1;
while (1) {
+ struct btrfs_block_group_item bgi;
+ struct extent_buffer *leaf;
+ int slot;
+
ret = find_first_block_group(info, path, &key);
if (ret > 0)
break;
if (ret != 0)
goto error;
- btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- ret = read_one_block_group(info, path, &key, need_clear);
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+
+ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bgi));
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ btrfs_release_path(path);
+ ret = read_one_block_group(info, &bgi, &key, need_clear);
if (ret < 0)
goto error;
key.objectid += key.offset;
key.offset = 0;
- btrfs_release_path(path);
}
btrfs_release_path(path);
@@ -2140,6 +2190,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
cache->cached = BTRFS_CACHE_FINISHED;
if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
cache->needs_free_space = 1;
+
+ ret = btrfs_load_block_group_zone_info(cache, true);
+ if (ret) {
+ btrfs_put_block_group(cache);
+ return ret;
+ }
+
ret = exclude_super_stripes(cache);
if (ret) {
/* We may have excluded something, so call this just in case */
@@ -2181,7 +2238,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
*/
trace_btrfs_add_block_group(fs_info, cache, 1);
btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
- cache->bytes_super, &cache->space_info);
+ cache->bytes_super, 0, &cache->space_info);
btrfs_update_global_block_rsv(fs_info);
link_block_group(cache);
@@ -2255,7 +2312,7 @@ again:
}
ret = inc_block_group_ro(cache, 0);
- if (!do_chunk_alloc)
+ if (!do_chunk_alloc || ret == -ETXTBSY)
goto unlock_out;
if (!ret)
goto out;
@@ -2264,6 +2321,8 @@ again:
if (ret < 0)
goto out;
ret = inc_block_group_ro(cache, 0);
+ if (ret == -ETXTBSY)
+ goto unlock_out;
out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
@@ -2289,8 +2348,15 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
spin_lock(&cache->lock);
if (!--cache->ro) {
num_bytes = cache->length - cache->reserved -
- cache->pinned - cache->bytes_super - cache->used;
+ cache->pinned - cache->bytes_super -
+ cache->zone_unusable - cache->used;
sinfo->bytes_readonly -= num_bytes;
+ if (btrfs_is_zoned(cache->fs_info)) {
+ /* Migrate zone_unusable bytes back */
+ cache->zone_unusable = cache->alloc_offset - cache->used;
+ sinfo->bytes_zone_unusable += cache->zone_unusable;
+ sinfo->bytes_readonly -= cache->zone_unusable;
+ }
list_del_init(&cache->ro_list);
}
spin_unlock(&cache->lock);
@@ -2564,8 +2630,10 @@ again:
if (!path) {
path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
}
/*
@@ -2659,16 +2727,14 @@ again:
btrfs_put_block_group(cache);
if (drop_reserve)
btrfs_delayed_refs_rsv_release(fs_info, 1);
-
- if (ret)
- break;
-
/*
* Avoid blocking other tasks for too long. It might even save
* us from writing caches for block groups that are going to be
* removed.
*/
mutex_unlock(&trans->transaction->cache_write_mutex);
+ if (ret)
+ goto out;
mutex_lock(&trans->transaction->cache_write_mutex);
}
mutex_unlock(&trans->transaction->cache_write_mutex);
@@ -2692,7 +2758,12 @@ again:
goto again;
}
spin_unlock(&cur_trans->dirty_bgs_lock);
- } else if (ret < 0) {
+ }
+out:
+ if (ret < 0) {
+ spin_lock(&cur_trans->dirty_bgs_lock);
+ list_splice_init(&dirty, &cur_trans->dirty_bgs);
+ spin_unlock(&cur_trans->dirty_bgs_lock);
btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
}
@@ -2896,10 +2967,8 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- percpu_counter_add_batch(
- &cache->space_info->total_bytes_pinned,
- num_bytes,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
+ __btrfs_mod_total_bytes_pinned(cache->space_info,
+ num_bytes);
set_extent_dirty(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
@@ -3344,6 +3413,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
ASSERT(list_empty(&block_group->io_list));
ASSERT(list_empty(&block_group->bg_list));
ASSERT(refcount_read(&block_group->refs) == 1);
+ ASSERT(block_group->swap_extents == 0);
btrfs_put_block_group(block_group);
spin_lock(&info->block_group_cache_lock);
@@ -3410,3 +3480,26 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
__btrfs_remove_free_space_cache(block_group->free_space_ctl);
}
}
+
+bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
+{
+ bool ret = true;
+
+ spin_lock(&bg->lock);
+ if (bg->ro)
+ ret = false;
+ else
+ bg->swap_extents++;
+ spin_unlock(&bg->lock);
+
+ return ret;
+}
+
+void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
+{
+ spin_lock(&bg->lock);
+ ASSERT(!bg->ro);
+ ASSERT(bg->swap_extents >= amount);
+ bg->swap_extents -= amount;
+ spin_unlock(&bg->lock);
+}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 8f74a96074f7..3ecc3372a5ce 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -95,6 +95,8 @@ struct btrfs_block_group {
unsigned int iref:1;
unsigned int has_caching_ctl:1;
unsigned int removed:1;
+ unsigned int to_copy:1;
+ unsigned int relocating_repair:1;
int disk_cache_state;
@@ -181,8 +183,25 @@ struct btrfs_block_group {
*/
int needs_free_space;
+ /* Flag indicating this block group is placed on a sequential zone */
+ bool seq_zone;
+
+ /*
+ * Number of extents in this block group used for swap files.
+ * All accesses protected by the spinlock 'lock'.
+ */
+ int swap_extents;
+
/* Record locked full stripes for RAID5/6 block group */
struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
+
+ /*
+ * Allocation offset for the block group to implement sequential
+ * allocation. This is used only on a zoned filesystem.
+ */
+ u64 alloc_offset;
+ u64 zone_unusable;
+ u64 meta_write_pointer;
};
static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
@@ -270,6 +289,9 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
int btrfs_free_block_groups(struct btrfs_fs_info *info);
void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
struct btrfs_caching_control *caching_ctl);
+int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
+ struct block_device *bdev, u64 physical, u64 **logical,
+ int *naddrs, int *stripe_len);
static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
{
@@ -296,9 +318,7 @@ static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
void btrfs_freeze_block_group(struct btrfs_block_group *cache);
void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
- u64 physical, u64 **logical, int *naddrs, int *stripe_len);
-#endif
+bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
+void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
#endif /* BTRFS_BLOCK_GROUP_H */
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index d9bf53d9ff90..28e202e89660 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -325,7 +325,8 @@ struct btrfs_dio_private {
struct inode *inode;
u64 logical_offset;
u64 disk_bytenr;
- u64 bytes;
+ /* Used for bio::bi_size */
+ u32 bytes;
/*
* References to this structure. There is one reference per in-flight
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 6ff44e53814c..113cb85c1fd4 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -2674,7 +2674,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bio() is also called before
* btrfsic_mount(), this might return NULL */
- dev_state = btrfsic_dev_state_lookup(bio_dev(bio) + bio->bi_partno);
+ dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev);
if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i = 0;
@@ -2690,9 +2690,9 @@ static void __btrfsic_submit_bio(struct bio *bio)
bio_is_patched = 0;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n",
+ pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
bio_op(bio), bio->bi_opf, segs,
- bio->bi_iter.bi_sector, dev_bytenr, bio->bi_disk);
+ bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev);
mapped_datav = kmalloc_array(segs,
sizeof(*mapped_datav), GFP_NOFS);
@@ -2721,8 +2721,8 @@ static void __btrfsic_submit_bio(struct bio *bio)
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- pr_info("submit_bio(rw=%d,0x%x FLUSH, disk=%p)\n",
- bio_op(bio), bio->bi_opf, bio->bi_disk);
+ pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
+ bio_op(bio), bio->bi_opf, bio->bi_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 5ae3fa0386b7..3f4c832abfed 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -141,6 +141,7 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
struct btrfs_fs_info *fs_info = inode->root->fs_info;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
const u32 csum_size = fs_info->csum_size;
+ const u32 sectorsize = fs_info->sectorsize;
struct page *page;
unsigned long i;
char *kaddr;
@@ -154,22 +155,34 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
shash->tfm = fs_info->csum_shash;
for (i = 0; i < cb->nr_pages; i++) {
+ u32 pg_offset;
+ u32 bytes_left = PAGE_SIZE;
page = cb->compressed_pages[i];
- kaddr = kmap_atomic(page);
- crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum);
- kunmap_atomic(kaddr);
-
- if (memcmp(&csum, cb_sum, csum_size)) {
- btrfs_print_data_csum_error(inode, disk_start,
- csum, cb_sum, cb->mirror_num);
- if (btrfs_io_bio(bio)->device)
- btrfs_dev_stat_inc_and_print(
- btrfs_io_bio(bio)->device,
- BTRFS_DEV_STAT_CORRUPTION_ERRS);
- return -EIO;
+ /* Determine the remaining bytes inside the page first */
+ if (i == cb->nr_pages - 1)
+ bytes_left = cb->compressed_len - i * PAGE_SIZE;
+
+ /* Hash through the page sector by sector */
+ for (pg_offset = 0; pg_offset < bytes_left;
+ pg_offset += sectorsize) {
+ kaddr = kmap_atomic(page);
+ crypto_shash_digest(shash, kaddr + pg_offset,
+ sectorsize, csum);
+ kunmap_atomic(kaddr);
+
+ if (memcmp(&csum, cb_sum, csum_size) != 0) {
+ btrfs_print_data_csum_error(inode, disk_start,
+ csum, cb_sum, cb->mirror_num);
+ if (btrfs_io_bio(bio)->device)
+ btrfs_dev_stat_inc_and_print(
+ btrfs_io_bio(bio)->device,
+ BTRFS_DEV_STAT_CORRUPTION_ERRS);
+ return -EIO;
+ }
+ cb_sum += csum_size;
+ disk_start += sectorsize;
}
- cb_sum += csum_size;
}
return 0;
}
@@ -542,13 +555,19 @@ static noinline int add_ra_bio_pages(struct inode *inode,
goto next;
}
- end = last_offset + PAGE_SIZE - 1;
/*
* at this point, we have a locked page in the page cache
* for these bytes in the file. But, we have to make
* sure they map to this compressed extent on disk.
*/
- set_page_extent_mapped(page);
+ ret = set_page_extent_mapped(page);
+ if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
+ break;
+ }
+
+ end = last_offset + PAGE_SIZE - 1;
lock_extent(tree, last_offset, end);
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset,
@@ -634,7 +653,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree,
page_offset(bio_first_page_all(bio)),
- PAGE_SIZE);
+ fs_info->sectorsize);
read_unlock(&em_tree->lock);
if (!em)
return BLK_STS_IOERR;
@@ -692,19 +711,30 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
refcount_set(&cb->pending_bios, 1);
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+ u32 pg_len = PAGE_SIZE;
int submit = 0;
+ /*
+ * To handle subpage case, we need to make sure the bio only
+ * covers the range we need.
+ *
+ * If we're at the last page, truncate the length to only cover
+ * the remaining part.
+ */
+ if (pg_index == nr_pages - 1)
+ pg_len = min_t(u32, PAGE_SIZE,
+ compressed_len - pg_index * PAGE_SIZE);
+
page = cb->compressed_pages[pg_index];
page->mapping = inode->i_mapping;
page->index = em_start >> PAGE_SHIFT;
if (comp_bio->bi_iter.bi_size)
- submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
+ submit = btrfs_bio_fits_in_stripe(page, pg_len,
comp_bio, 0);
page->mapping = NULL;
- if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
- PAGE_SIZE) {
+ if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
unsigned int nr_sectors;
ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
@@ -737,9 +767,9 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
- bio_add_page(comp_bio, page, PAGE_SIZE, 0);
+ bio_add_page(comp_bio, page, pg_len, 0);
}
- cur_disk_byte += PAGE_SIZE;
+ cur_disk_byte += pg_len;
}
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
@@ -1231,7 +1261,6 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
unsigned long prev_start_byte;
unsigned long working_bytes = total_out - buf_start;
unsigned long bytes;
- char *kaddr;
struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
/*
@@ -1262,9 +1291,8 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
PAGE_SIZE - (buf_offset % PAGE_SIZE));
bytes = min(bytes, working_bytes);
- kaddr = kmap_atomic(bvec.bv_page);
- memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
- kunmap_atomic(kaddr);
+ memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
+ bytes);
flush_dcache_page(bvec.bv_page);
buf_offset += bytes;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index cc89b63d65a4..34b929bd5c1a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -221,9 +221,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0);
-
- if (ret)
+ if (ret) {
+ btrfs_tree_unlock(cow);
+ free_extent_buffer(cow);
+ btrfs_abort_transaction(trans, ret);
return ret;
+ }
btrfs_mark_buffer_dirty(cow);
*cow_ret = cow;
@@ -1362,7 +1365,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
"failed to read tree block %llu from get_old_root",
logical);
} else {
+ btrfs_tree_read_lock(old);
eb = btrfs_clone_extent_buffer(old);
+ btrfs_tree_read_unlock(old);
free_extent_buffer(old);
}
} else if (old_root) {
@@ -1494,6 +1499,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
return ret;
}
+ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
/*
* helper function for defrag to decide if two blocks pointed to by a
@@ -2821,6 +2827,7 @@ done:
btrfs_release_path(p);
return ret;
}
+ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
/*
* Like btrfs_search_slot, this looks for a key in the given tree. It uses the
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4debdbdde2ab..9ae776ab3967 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -298,7 +298,8 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
- BTRFS_FEATURE_INCOMPAT_RAID1C34)
+ BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
+ BTRFS_FEATURE_INCOMPAT_ZONED)
#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
(BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
@@ -523,6 +524,11 @@ struct btrfs_swapfile_pin {
* points to a struct btrfs_device.
*/
bool is_block_group;
+ /*
+ * Only used when 'is_block_group' is true and it is the number of
+ * extents used by a swapfile for this block group ('ptr' field).
+ */
+ int bg_extent_count;
};
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
@@ -797,7 +803,7 @@ struct btrfs_fs_info {
/* used to keep from writing metadata until there is a nice batch */
struct percpu_counter dirty_metadata_bytes;
struct percpu_counter delalloc_bytes;
- struct percpu_counter dio_bytes;
+ struct percpu_counter ordered_bytes;
s32 dirty_metadata_batch;
s32 delalloc_batch;
@@ -933,6 +939,7 @@ struct btrfs_fs_info {
/* Used to reclaim the metadata space in the background. */
struct work_struct async_reclaim_work;
struct work_struct async_data_reclaim_work;
+ struct work_struct preempt_reclaim_work;
spinlock_t unused_bgs_lock;
struct list_head unused_bgs;
@@ -974,6 +981,9 @@ struct btrfs_fs_info {
/* Max size to emit ZONE_APPEND write command */
u64 max_zone_append_size;
+ struct mutex zoned_meta_io_lock;
+ spinlock_t treelog_bg_lock;
+ u64 treelog_bg;
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
spinlock_t ref_verify_lock;
@@ -1104,7 +1114,7 @@ struct btrfs_root {
u32 type;
- u64 highest_objectid;
+ u64 free_objectid;
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
@@ -2740,6 +2750,7 @@ enum btrfs_flush_state {
ALLOC_CHUNK_FORCE = 8,
RUN_DELAYED_IPUTS = 9,
COMMIT_TRANS = 10,
+ FORCE_COMMIT_TRANS = 11,
};
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
@@ -3100,15 +3111,14 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u32 min_type);
int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
bool in_reclaim_context);
int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
unsigned int extra_bits,
struct extent_state **cached_state);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
- struct btrfs_root *parent_root,
- u64 new_dirid);
+ struct btrfs_root *parent_root);
void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
unsigned *bits);
void btrfs_clear_delalloc_extent(struct inode *inode,
@@ -3119,6 +3129,8 @@ void btrfs_split_delalloc_extent(struct inode *inode,
struct extent_state *orig, u64 split);
int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
unsigned long bio_flags);
+bool btrfs_bio_fits_in_ordered_extent(struct page *page, struct bio *bio,
+ unsigned int size);
void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end);
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
@@ -3628,7 +3640,8 @@ static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag)
/* acl.c */
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
struct posix_acl *btrfs_get_acl(struct inode *inode, int type);
-int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir);
#else
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index bacee09b7bfd..56642ca7af10 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -191,12 +191,14 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
}
/**
- * btrfs_inode_rsv_release - release any excessive reservation.
- * @inode - the inode we need to release from.
- * @qgroup_free - free or convert qgroup meta.
- * Unlike normal operation, qgroup meta reservation needs to know if we are
- * freeing qgroup reservation or just converting it into per-trans. Normally
- * @qgroup_free is true for error handling, and false for normal release.
+ * Release any excessive reservation
+ *
+ * @inode: the inode we need to release from
+ * @qgroup_free: free or convert qgroup meta. Unlike normal operation, qgroup
+ * meta reservation needs to know if we are freeing qgroup
+ * reservation or just converting it into per-trans. Normally
+ * @qgroup_free is true for error handling, and false for normal
+ * release.
*
* This is the same as btrfs_block_rsv_release, except that it handles the
* tracepoint for the reservation.
@@ -361,7 +363,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
}
/**
- * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
+ * Release a metadata reservation for an inode
+ *
* @inode: the inode to release the reservation for.
* @num_bytes: the number of bytes we are releasing.
* @qgroup_free: free qgroup reservation or convert it to per-trans reservation
@@ -455,11 +458,13 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
}
/**
- * btrfs_delalloc_release_space - release data and metadata space for delalloc
- * @inode: inode we're releasing space for
- * @start: start position of the space already reserved
- * @len: the len of the space already reserved
- * @release_bytes: the len of the space we consumed or didn't use
+ * Release data and metadata space for delalloc
+ *
+ * @inode: inode we're releasing space for
+ * @reserved: list of changed/reserved ranges
+ * @start: start position of the space already reserved
+ * @len: length of the space already reserved
+ * @qgroup_free: should qgroup reserved-space also be freed
*
* This function will release the metadata space that was not used and will
* decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 70c0340d839c..bf25401c9768 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -627,7 +627,8 @@ static int btrfs_delayed_inode_reserve_metadata(
*/
if (!src_rsv || (!trans->bytes_reserved &&
src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
- ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
+ ret = btrfs_qgroup_reserve_meta(root, num_bytes,
+ BTRFS_QGROUP_RSV_META_PREALLOC, true);
if (ret < 0)
return ret;
ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
@@ -649,7 +650,7 @@ static int btrfs_delayed_inode_reserve_metadata(
btrfs_ino(inode),
num_bytes, 1);
} else {
- btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
+ btrfs_qgroup_free_meta_prealloc(root, num_bytes);
}
return ret;
}
@@ -1154,7 +1155,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
delayed_root = fs_info->delayed_root;
curr_node = btrfs_first_delayed_node(delayed_root);
- while (curr_node && (!count || (count && nr--))) {
+ while (curr_node && (!count || nr--)) {
ret = __btrfs_commit_inode_delayed_items(trans, path,
curr_node);
if (ret) {
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 353cc2994d10..63be7d01a9a3 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -69,9 +69,10 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
}
/**
- * btrfs_delayed_refs_rsv_release - release a ref head's reservation.
- * @fs_info - the fs_info for our fs.
- * @nr - the number of items to drop.
+ * Release a ref head's reservation
+ *
+ * @fs_info: the filesystem
+ * @nr: number of items to drop
*
* This drops the delayed ref head's count from the delayed refs rsv and frees
* any excess reservation we had.
@@ -114,10 +115,11 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
}
/**
- * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
- * @fs_info - the fs info for our fs.
- * @src - the source block rsv to transfer from.
- * @num_bytes - the number of bytes to transfer.
+ * Transfer bytes to our delayed refs rsv
+ *
+ * @fs_info: the filesystem
+ * @src: source block rsv to transfer from
+ * @num_bytes: number of bytes to transfer
*
* This transfers up to the num_bytes amount from the src rsv to the
* delayed_refs_rsv. Any extra bytes are returned to the space info.
@@ -162,9 +164,10 @@ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
}
/**
- * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
- * @fs_info - the fs_info for our fs.
- * @flush - control how we can flush for this reservation.
+ * Refill based on our delayed refs usage
+ *
+ * @fs_info: the filesystem
+ * @flush: control how we can flush for this reservation.
*
* This will refill the delayed block_rsv up to 1 items size worth of space and
* will return -ENOSPC if we can't make the reservation.
@@ -648,12 +651,12 @@ inserted:
*/
static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *existing,
- struct btrfs_delayed_ref_head *update,
- int *old_ref_mod_ret)
+ struct btrfs_delayed_ref_head *update)
{
struct btrfs_delayed_ref_root *delayed_refs =
&trans->transaction->delayed_refs;
struct btrfs_fs_info *fs_info = trans->fs_info;
+ u64 flags = btrfs_ref_head_to_space_flags(existing);
int old_ref_mod;
BUG_ON(existing->is_data != update->is_data);
@@ -701,8 +704,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
* currently, for refs we just added we know we're a-ok.
*/
old_ref_mod = existing->total_ref_mod;
- if (old_ref_mod_ret)
- *old_ref_mod_ret = old_ref_mod;
existing->ref_mod += update->ref_mod;
existing->total_ref_mod += update->ref_mod;
@@ -724,6 +725,27 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
trans->delayed_ref_updates += csum_leaves;
}
}
+
+ /*
+ * This handles the following conditions:
+ *
+ * 1. We had a ref mod of 0 or more and went negative, indicating that
+ * we may be freeing space, so add our space to the
+ * total_bytes_pinned counter.
+ * 2. We were negative and went to 0 or positive, so no longer can say
+ * that the space would be pinned, decrement our counter from the
+ * total_bytes_pinned counter.
+ * 3. We are now at 0 and have ->must_insert_reserved set, which means
+ * this was a new allocation and then we dropped it, and thus must
+ * add our space to the total_bytes_pinned counter.
+ */
+ if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
+ btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
+ else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
+ btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
+ else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
+ btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
+
spin_unlock(&existing->lock);
}
@@ -798,8 +820,7 @@ static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
- int action, int *qrecord_inserted_ret,
- int *old_ref_mod, int *new_ref_mod)
+ int action, int *qrecord_inserted_ret)
{
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -821,8 +842,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
- update_existing_head_ref(trans, existing, head_ref,
- old_ref_mod);
+ update_existing_head_ref(trans, existing, head_ref);
/*
* we've updated the existing ref, free the newly
* allocated ref
@@ -830,14 +850,17 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing;
} else {
- if (old_ref_mod)
- *old_ref_mod = 0;
+ u64 flags = btrfs_ref_head_to_space_flags(head_ref);
+
if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes;
trans->delayed_ref_updates +=
btrfs_csum_bytes_to_leaves(trans->fs_info,
head_ref->num_bytes);
}
+ if (head_ref->ref_mod < 0)
+ btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
+ head_ref->num_bytes);
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
@@ -845,8 +868,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
}
if (qrecord_inserted_ret)
*qrecord_inserted_ret = qrecord_inserted;
- if (new_ref_mod)
- *new_ref_mod = head_ref->total_ref_mod;
return head_ref;
}
@@ -909,8 +930,7 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
*/
int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
- struct btrfs_delayed_extent_op *extent_op,
- int *old_ref_mod, int *new_ref_mod)
+ struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_tree_ref *ref;
@@ -977,8 +997,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
* the spin lock
*/
head_ref = add_delayed_ref_head(trans, head_ref, record,
- action, &qrecord_inserted,
- old_ref_mod, new_ref_mod);
+ action, &qrecord_inserted);
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
@@ -1006,8 +1025,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
*/
int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
- u64 reserved, int *old_ref_mod,
- int *new_ref_mod)
+ u64 reserved)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_data_ref *ref;
@@ -1073,8 +1091,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
* the spin lock
*/
head_ref = add_delayed_ref_head(trans, head_ref, record,
- action, &qrecord_inserted,
- old_ref_mod, new_ref_mod);
+ action, &qrecord_inserted);
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
@@ -1117,7 +1134,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
spin_lock(&delayed_refs->lock);
add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
- NULL, NULL, NULL);
+ NULL);
spin_unlock(&delayed_refs->lock);
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 1c977e6d45dc..e22fba272e4f 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -135,6 +135,11 @@ struct btrfs_delayed_data_ref {
u64 offset;
};
+enum btrfs_delayed_ref_flags {
+ /* Indicate that we are flushing delayed refs for the commit */
+ BTRFS_DELAYED_REFS_FLUSHING,
+};
+
struct btrfs_delayed_ref_root {
/* head ref rbtree */
struct rb_root_cached href_root;
@@ -158,12 +163,7 @@ struct btrfs_delayed_ref_root {
u64 pending_csums;
- /*
- * set when the tree is flushing before a transaction commit,
- * used by the throttling code to decide if new updates need
- * to be run right away
- */
- int flushing;
+ unsigned long flags;
u64 run_delayed_start;
@@ -326,6 +326,16 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
}
}
+static inline u64 btrfs_ref_head_to_space_flags(
+ struct btrfs_delayed_ref_head *head_ref)
+{
+ if (head_ref->is_data)
+ return BTRFS_BLOCK_GROUP_DATA;
+ else if (head_ref->is_system)
+ return BTRFS_BLOCK_GROUP_SYSTEM;
+ return BTRFS_BLOCK_GROUP_METADATA;
+}
+
static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
{
if (refcount_dec_and_test(&head->refs))
@@ -334,12 +344,10 @@ static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *hea
int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
- struct btrfs_delayed_extent_op *extent_op,
- int *old_ref_mod, int *new_ref_mod);
+ struct btrfs_delayed_extent_op *extent_op);
int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
- u64 reserved, int *old_ref_mod,
- int *new_ref_mod);
+ u64 reserved);
int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
struct btrfs_delayed_extent_op *extent_op);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 324f646d6e5e..3a9c1e046ebe 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -22,6 +22,7 @@
#include "dev-replace.h"
#include "sysfs.h"
#include "zoned.h"
+#include "block-group.h"
/*
* Device replace overview
@@ -459,6 +460,185 @@ static char* btrfs_dev_name(struct btrfs_device *device)
return rcu_str_deref(device->name);
}
+static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *src_dev)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_root *root = fs_info->dev_root;
+ struct btrfs_dev_extent *dev_extent = NULL;
+ struct btrfs_block_group *cache;
+ struct btrfs_trans_handle *trans;
+ int ret = 0;
+ u64 chunk_offset;
+
+ /* Do not use "to_copy" on non zoned filesystem for now */
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ mutex_lock(&fs_info->chunk_mutex);
+
+ /* Ensure we don't have pending new block group */
+ spin_lock(&fs_info->trans_lock);
+ while (fs_info->running_transaction &&
+ !list_empty(&fs_info->running_transaction->dev_update_list)) {
+ spin_unlock(&fs_info->trans_lock);
+ mutex_unlock(&fs_info->chunk_mutex);
+ trans = btrfs_attach_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ mutex_lock(&fs_info->chunk_mutex);
+ if (ret == -ENOENT) {
+ spin_lock(&fs_info->trans_lock);
+ continue;
+ } else {
+ goto unlock;
+ }
+ }
+
+ ret = btrfs_commit_transaction(trans);
+ mutex_lock(&fs_info->chunk_mutex);
+ if (ret)
+ goto unlock;
+
+ spin_lock(&fs_info->trans_lock);
+ }
+ spin_unlock(&fs_info->trans_lock);
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ path->reada = READA_FORWARD;
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
+
+ key.objectid = src_dev->devid;
+ key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto free_path;
+ if (ret > 0) {
+ if (path->slots[0] >=
+ btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto free_path;
+ if (ret > 0) {
+ ret = 0;
+ goto free_path;
+ }
+ } else {
+ ret = 0;
+ }
+ }
+
+ while (1) {
+ struct extent_buffer *leaf = path->nodes[0];
+ int slot = path->slots[0];
+
+ btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+ if (found_key.objectid != src_dev->devid)
+ break;
+
+ if (found_key.type != BTRFS_DEV_EXTENT_KEY)
+ break;
+
+ if (found_key.offset < key.offset)
+ break;
+
+ dev_extent = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
+
+ chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dev_extent);
+
+ cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+ if (!cache)
+ goto skip;
+
+ spin_lock(&cache->lock);
+ cache->to_copy = 1;
+ spin_unlock(&cache->lock);
+
+ btrfs_put_block_group(cache);
+
+skip:
+ ret = btrfs_next_item(root, path);
+ if (ret != 0) {
+ if (ret > 0)
+ ret = 0;
+ break;
+ }
+ }
+
+free_path:
+ btrfs_free_path(path);
+unlock:
+ mutex_unlock(&fs_info->chunk_mutex);
+
+ return ret;
+}
+
+bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
+ struct btrfs_block_group *cache,
+ u64 physical)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct extent_map *em;
+ struct map_lookup *map;
+ u64 chunk_offset = cache->start;
+ int num_extents, cur_extent;
+ int i;
+
+ /* Do not use "to_copy" on non zoned filesystem for now */
+ if (!btrfs_is_zoned(fs_info))
+ return true;
+
+ spin_lock(&cache->lock);
+ if (cache->removed) {
+ spin_unlock(&cache->lock);
+ return true;
+ }
+ spin_unlock(&cache->lock);
+
+ em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
+ ASSERT(!IS_ERR(em));
+ map = em->map_lookup;
+
+ num_extents = cur_extent = 0;
+ for (i = 0; i < map->num_stripes; i++) {
+ /* We have more device extent to copy */
+ if (srcdev != map->stripes[i].dev)
+ continue;
+
+ num_extents++;
+ if (physical == map->stripes[i].physical)
+ cur_extent = i;
+ }
+
+ free_extent_map(em);
+
+ if (num_extents > 1 && cur_extent < num_extents - 1) {
+ /*
+ * Has more stripes on this device. Keep this block group
+ * readonly until we finish all the stripes.
+ */
+ return false;
+ }
+
+ /* Last stripe on this device */
+ spin_lock(&cache->lock);
+ cache->to_copy = 0;
+ spin_unlock(&cache->lock);
+
+ return true;
+}
+
static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
int read_src)
@@ -500,6 +680,10 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
if (ret)
return ret;
+ ret = mark_block_group_to_copy(fs_info, src_device);
+ if (ret)
+ return ret;
+
down_write(&dev_replace->rwsem);
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
@@ -715,7 +899,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished
*/
- ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
+ ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
if (ret) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index 60b70dacc299..3911049a5f23 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -18,5 +18,8 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
+bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
+ struct btrfs_block_group *cache,
+ u64 physical);
#endif
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index 2b8383d41144..306ff20af70f 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -185,10 +185,12 @@ static struct btrfs_block_group *find_next_block_group(
}
/**
- * peek_discard_list - wrap find_next_block_group()
- * @discard_ctl: discard control
+ * Wrap find_next_block_group()
+ *
+ * @discard_ctl: discard control
* @discard_state: the discard_state of the block_group after state management
* @discard_index: the discard_index of the block_group after state management
+ * @now: time when discard was invoked, in ns
*
* This wraps find_next_block_group() and sets the block_group to be in use.
* discard_state's control flow is managed here. Variables related to
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 07a2b4f69b10..41b718cfea40 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -459,6 +459,12 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct bio_vec *bvec
return 0;
found_start = btrfs_header_bytenr(eb);
+
+ if (test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)) {
+ WARN_ON(found_start != 0);
+ return 0;
+ }
+
/*
* Please do not consolidate these warnings into a single if.
* It is useful to know what went wrong.
@@ -591,6 +597,59 @@ out:
return ret;
}
+static int validate_subpage_buffer(struct page *page, u64 start, u64 end,
+ int mirror)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
+ struct extent_buffer *eb;
+ bool reads_done;
+ int ret = 0;
+
+ /*
+ * We don't allow bio merge for subpage metadata read, so we should
+ * only get one eb for each endio hook.
+ */
+ ASSERT(end == start + fs_info->nodesize - 1);
+ ASSERT(PagePrivate(page));
+
+ eb = find_extent_buffer(fs_info, start);
+ /*
+ * When we are reading one tree block, eb must have been inserted into
+ * the radix tree. If not, something is wrong.
+ */
+ ASSERT(eb);
+
+ reads_done = atomic_dec_and_test(&eb->io_pages);
+ /* Subpage read must finish in page read */
+ ASSERT(reads_done);
+
+ eb->read_mirror = mirror;
+ if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
+ ret = -EIO;
+ goto err;
+ }
+ ret = validate_extent_buffer(eb);
+ if (ret < 0)
+ goto err;
+
+ if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
+ btree_readahead_hook(eb, ret);
+
+ set_extent_buffer_uptodate(eb);
+
+ free_extent_buffer(eb);
+ return ret;
+err:
+ /*
+ * end_bio_extent_readpage decrements io_pages in case of error,
+ * make sure it has something to decrement.
+ */
+ atomic_inc(&eb->io_pages);
+ clear_extent_buffer_uptodate(eb);
+ free_extent_buffer(eb);
+ return ret;
+}
+
int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio,
struct page *page, u64 start, u64 end,
int mirror)
@@ -600,6 +659,10 @@ int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio,
int reads_done;
ASSERT(page->private);
+
+ if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
+ return validate_subpage_buffer(page, start, end, mirror);
+
eb = (struct extent_buffer *)page->private;
/*
@@ -646,7 +709,7 @@ static void end_workqueue_bio(struct bio *bio)
fs_info = end_io_wq->info;
end_io_wq->status = bio->bi_status;
- if (bio_op(bio) == REQ_OP_WRITE) {
+ if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
wq = fs_info->endio_meta_write_workers;
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
@@ -808,6 +871,8 @@ static blk_status_t btree_submit_bio_start(struct inode *inode, struct bio *bio,
static int check_async_write(struct btrfs_fs_info *fs_info,
struct btrfs_inode *bi)
{
+ if (btrfs_is_zoned(fs_info))
+ return 0;
if (atomic_read(&bi->sync_writers))
return 0;
if (test_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags))
@@ -822,7 +887,7 @@ blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
int async = check_async_write(fs_info, BTRFS_I(inode));
blk_status_t ret;
- if (bio_op(bio) != REQ_OP_WRITE) {
+ if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
/*
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
@@ -1016,7 +1081,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->orphan_cleanup_state = 0;
root->last_trans = 0;
- root->highest_objectid = 0;
+ root->free_objectid = 0;
root->nr_delalloc_inodes = 0;
root->nr_ordered_extents = 0;
root->inode_tree = RB_ROOT;
@@ -1189,7 +1254,6 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
- struct extent_buffer *leaf;
root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
if (!root)
@@ -1199,6 +1263,14 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
+ return root;
+}
+
+int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct extent_buffer *leaf;
+
/*
* DON'T set SHAREABLE bit for log trees.
*
@@ -1211,16 +1283,15 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
NULL, 0, 0, 0, BTRFS_NESTING_NORMAL);
- if (IS_ERR(leaf)) {
- btrfs_put_root(root);
- return ERR_CAST(leaf);
- }
+ if (IS_ERR(leaf))
+ return PTR_ERR(leaf);
root->node = leaf;
btrfs_mark_buffer_dirty(root->node);
btrfs_tree_unlock(root->node);
- return root;
+
+ return 0;
}
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
@@ -1231,6 +1302,16 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
log_root = alloc_log_tree(trans, fs_info);
if (IS_ERR(log_root))
return PTR_ERR(log_root);
+
+ if (!btrfs_is_zoned(fs_info)) {
+ int ret = btrfs_alloc_log_tree_node(trans, log_root);
+
+ if (ret) {
+ btrfs_put_root(log_root);
+ return ret;
+ }
+ }
+
WARN_ON(fs_info->log_root_tree);
fs_info->log_root_tree = log_root;
return 0;
@@ -1242,11 +1323,18 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log_root;
struct btrfs_inode_item *inode_item;
+ int ret;
log_root = alloc_log_tree(trans, fs_info);
if (IS_ERR(log_root))
return PTR_ERR(log_root);
+ ret = btrfs_alloc_log_tree_node(trans, log_root);
+ if (ret) {
+ btrfs_put_root(log_root);
+ return ret;
+ }
+
log_root->last_trans = trans->transid;
log_root->root_key.offset = root->root_key.objectid;
@@ -1367,14 +1455,13 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
}
mutex_lock(&root->objectid_mutex);
- ret = btrfs_find_highest_objectid(root,
- &root->highest_objectid);
+ ret = btrfs_init_root_free_objectid(root);
if (ret) {
mutex_unlock(&root->objectid_mutex);
goto fail;
}
- ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
+ ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
mutex_unlock(&root->objectid_mutex);
@@ -1470,7 +1557,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
{
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
- percpu_counter_destroy(&fs_info->dio_bytes);
+ percpu_counter_destroy(&fs_info->ordered_bytes);
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
btrfs_free_csum_hash(fs_info);
btrfs_free_stripe_hash_table(fs_info);
@@ -2427,13 +2514,21 @@ static int validate_super(struct btrfs_fs_info *fs_info,
btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
ret = -EINVAL;
}
- /* Only PAGE SIZE is supported yet */
- if (sectorsize != PAGE_SIZE) {
+
+ /*
+ * For 4K page size, we only support 4K sector size.
+ * For 64K page size, we support read-write for 64K sector size, and
+ * read-only for 4K sector size.
+ */
+ if ((PAGE_SIZE == SZ_4K && sectorsize != PAGE_SIZE) ||
+ (PAGE_SIZE == SZ_64K && (sectorsize != SZ_4K &&
+ sectorsize != SZ_64K))) {
btrfs_err(fs_info,
- "sectorsize %llu not supported yet, only support %lu",
+ "sectorsize %llu not yet supported for page size %lu",
sectorsize, PAGE_SIZE);
ret = -EINVAL;
}
+
if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
@@ -2646,14 +2741,13 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
* No need to hold btrfs_root::objectid_mutex since the fs
* hasn't been fully initialised and we are the only user
*/
- ret = btrfs_find_highest_objectid(tree_root,
- &tree_root->highest_objectid);
+ ret = btrfs_init_root_free_objectid(tree_root);
if (ret < 0) {
handle_error = true;
continue;
}
- ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
+ ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
ret = btrfs_read_roots(fs_info);
if (ret < 0) {
@@ -2695,11 +2789,13 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->buffer_lock);
spin_lock_init(&fs_info->unused_bgs_lock);
+ spin_lock_init(&fs_info->treelog_bg_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->unused_bg_unpin_mutex);
mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
+ mutex_init(&fs_info->zoned_meta_io_lock);
seqlock_init(&fs_info->profiles_lock);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
@@ -2804,7 +2900,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
- ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
+ ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
if (ret)
return ret;
@@ -3140,8 +3236,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
btrfs_info(fs_info, "has skinny extents");
- fs_info->zoned = (features & BTRFS_FEATURE_INCOMPAT_ZONED);
-
/*
* flag our filesystem as having big metadata blocks if
* they are bigger than the page size
@@ -3194,6 +3288,17 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_alloc;
}
+ /* For 4K sector size support, it's only read-only */
+ if (PAGE_SIZE == SZ_64K && sectorsize == SZ_4K) {
+ if (!sb_rdonly(sb) || btrfs_super_log_root(disk_super)) {
+ btrfs_err(fs_info,
+ "subpage sectorsize %u only supported read-only for page size %lu",
+ sectorsize, PAGE_SIZE);
+ err = -EINVAL;
+ goto fail_alloc;
+ }
+ }
+
ret = btrfs_init_workqueues(fs_info, fs_devices);
if (ret) {
err = ret;
@@ -3261,6 +3366,19 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_tree_roots;
/*
+ * Get zone type information of zoned block devices. This will also
+ * handle emulation of a zoned filesystem if a regular device has the
+ * zoned incompat feature flag set.
+ */
+ ret = btrfs_get_dev_zone_info_all_devices(fs_info);
+ if (ret) {
+ btrfs_err(fs_info,
+ "zoned: failed to read device zone info: %d",
+ ret);
+ goto fail_block_groups;
+ }
+
+ /*
* If we have a uuid root and we're not being told to rescan we need to
* check the generation here so we can set the
* BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the
@@ -4114,6 +4232,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
cancel_work_sync(&fs_info->async_reclaim_work);
cancel_work_sync(&fs_info->async_data_reclaim_work);
+ cancel_work_sync(&fs_info->preempt_reclaim_work);
/* Cancel or finish ongoing discard work */
btrfs_discard_cleanup(fs_info);
@@ -4166,9 +4285,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
percpu_counter_sum(&fs_info->delalloc_bytes));
}
- if (percpu_counter_sum(&fs_info->dio_bytes))
+ if (percpu_counter_sum(&fs_info->ordered_bytes))
btrfs_info(fs_info, "at unmount dio bytes count %lld",
- percpu_counter_sum(&fs_info->dio_bytes));
+ percpu_counter_sum(&fs_info->ordered_bytes));
btrfs_sysfs_remove_mounted(fs_info);
btrfs_sysfs_remove_fsid(fs_info->fs_devices);
@@ -4689,6 +4808,8 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
EXTENT_DIRTY);
btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
+ btrfs_free_redirty_list(cur_trans);
+
cur_trans->state =TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait);
}
@@ -4746,7 +4867,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
return 0;
}
-int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
+int btrfs_init_root_free_objectid(struct btrfs_root *root)
{
struct btrfs_path *path;
int ret;
@@ -4770,10 +4891,10 @@ int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
slot = path->slots[0] - 1;
l = path->nodes[0];
btrfs_item_key_to_cpu(l, &found_key, slot);
- *objectid = max_t(u64, found_key.objectid,
- BTRFS_FIRST_FREE_OBJECTID - 1);
+ root->free_objectid = max_t(u64, found_key.objectid + 1,
+ BTRFS_FIRST_FREE_OBJECTID);
} else {
- *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
+ root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
}
ret = 0;
error:
@@ -4781,12 +4902,12 @@ error:
return ret;
}
-int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
+int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
{
int ret;
mutex_lock(&root->objectid_mutex);
- if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
+ if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
btrfs_warn(root->fs_info,
"the objectid of root %llu reaches its highest value",
root->root_key.objectid);
@@ -4794,7 +4915,7 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
goto out;
}
- *objectid = ++root->highest_objectid;
+ *objectid = root->free_objectid++;
ret = 0;
out:
mutex_unlock(&root->objectid_mutex);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e45057c0c016..0e7e9526b6a8 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -120,6 +120,8 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
extent_submit_bio_start_t *submit_bio_start);
blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
int mirror_num);
+int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
@@ -133,8 +135,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *));
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
-int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
-int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
+int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid);
+int btrfs_init_root_free_objectid(struct btrfs_root *root);
int __init btrfs_end_io_wq_init(void);
void __cold btrfs_end_io_wq_exit(void);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0c335dae5af7..36a3c973fda1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -34,6 +34,8 @@
#include "block-group.h"
#include "discard.h"
#include "rcu-string.h"
+#include "zoned.h"
+#include "dev-replace.h"
#undef SCRAMBLE_DELAYED_REFS
@@ -82,41 +84,6 @@ void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
EXTENT_UPTODATE);
}
-static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
-{
- if (ref->type == BTRFS_REF_METADATA) {
- if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
- return BTRFS_BLOCK_GROUP_SYSTEM;
- else
- return BTRFS_BLOCK_GROUP_METADATA;
- }
- return BTRFS_BLOCK_GROUP_DATA;
-}
-
-static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *ref)
-{
- struct btrfs_space_info *space_info;
- u64 flags = generic_ref_to_space_flags(ref);
-
- space_info = btrfs_find_space_info(fs_info, flags);
- ASSERT(space_info);
- percpu_counter_add_batch(&space_info->total_bytes_pinned, ref->len,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
-}
-
-static void sub_pinned_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *ref)
-{
- struct btrfs_space_info *space_info;
- u64 flags = generic_ref_to_space_flags(ref);
-
- space_info = btrfs_find_space_info(fs_info, flags);
- ASSERT(space_info);
- percpu_counter_add_batch(&space_info->total_bytes_pinned, -ref->len,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
-}
-
/* simple helper to search for an existing data extent at a given offset */
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
{
@@ -1299,6 +1266,46 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
return ret;
}
+static int do_discard_extent(struct btrfs_bio_stripe *stripe, u64 *bytes)
+{
+ struct btrfs_device *dev = stripe->dev;
+ struct btrfs_fs_info *fs_info = dev->fs_info;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ u64 phys = stripe->physical;
+ u64 len = stripe->length;
+ u64 discarded = 0;
+ int ret = 0;
+
+ /* Zone reset on a zoned filesystem */
+ if (btrfs_can_zone_reset(dev, phys, len)) {
+ u64 src_disc;
+
+ ret = btrfs_reset_device_zone(dev, phys, len, &discarded);
+ if (ret)
+ goto out;
+
+ if (!btrfs_dev_replace_is_ongoing(dev_replace) ||
+ dev != dev_replace->srcdev)
+ goto out;
+
+ src_disc = discarded;
+
+ /* Send to replace target as well */
+ ret = btrfs_reset_device_zone(dev_replace->tgtdev, phys, len,
+ &discarded);
+ discarded += src_disc;
+ } else if (blk_queue_discard(bdev_get_queue(stripe->dev->bdev))) {
+ ret = btrfs_issue_discard(dev->bdev, phys, len, &discarded);
+ } else {
+ ret = 0;
+ *bytes = 0;
+ }
+
+out:
+ *bytes = discarded;
+ return ret;
+}
+
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes)
{
@@ -1333,20 +1340,13 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
stripe = bbio->stripes;
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
u64 bytes;
- struct request_queue *req_q;
if (!stripe->dev->bdev) {
ASSERT(btrfs_test_opt(fs_info, DEGRADED));
continue;
}
- req_q = bdev_get_queue(stripe->dev->bdev);
- if (!blk_queue_discard(req_q))
- continue;
- ret = btrfs_issue_discard(stripe->dev->bdev,
- stripe->physical,
- stripe->length,
- &bytes);
+ ret = do_discard_extent(stripe, &bytes);
if (!ret) {
discarded_bytes += bytes;
} else if (ret != -EOPNOTSUPP) {
@@ -1388,7 +1388,6 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- int old_ref_mod, new_ref_mod;
int ret;
ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
@@ -1397,17 +1396,12 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
if (generic_ref->type == BTRFS_REF_METADATA)
- ret = btrfs_add_delayed_tree_ref(trans, generic_ref,
- NULL, &old_ref_mod, &new_ref_mod);
+ ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
else
- ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0,
- &old_ref_mod, &new_ref_mod);
+ ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0);
btrfs_ref_tree_mod(fs_info, generic_ref);
- if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
- sub_pinned_bytes(fs_info, generic_ref);
-
return ret;
}
@@ -1795,34 +1789,28 @@ void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
{
int nr_items = 1; /* Dropping this ref head update. */
- if (head->total_ref_mod < 0) {
- struct btrfs_space_info *space_info;
- u64 flags;
+ /*
+ * We had csum deletions accounted for in our delayed refs rsv, we need
+ * to drop the csum leaves for this update from our delayed_refs_rsv.
+ */
+ if (head->total_ref_mod < 0 && head->is_data) {
+ spin_lock(&delayed_refs->lock);
+ delayed_refs->pending_csums -= head->num_bytes;
+ spin_unlock(&delayed_refs->lock);
+ nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
+ }
- if (head->is_data)
- flags = BTRFS_BLOCK_GROUP_DATA;
- else if (head->is_system)
- flags = BTRFS_BLOCK_GROUP_SYSTEM;
- else
- flags = BTRFS_BLOCK_GROUP_METADATA;
- space_info = btrfs_find_space_info(fs_info, flags);
- ASSERT(space_info);
- percpu_counter_add_batch(&space_info->total_bytes_pinned,
- -head->num_bytes,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
+ /*
+ * We were dropping refs, or had a new ref and dropped it, and thus must
+ * adjust down our total_bytes_pinned, the space may or may not have
+ * been pinned and so is accounted for properly in the pinned space by
+ * now.
+ */
+ if (head->total_ref_mod < 0 ||
+ (head->total_ref_mod == 0 && head->must_insert_reserved)) {
+ u64 flags = btrfs_ref_head_to_space_flags(head);
- /*
- * We had csum deletions accounted for in our delayed refs rsv,
- * we need to drop the csum leaves for this update from our
- * delayed_refs_rsv.
- */
- if (head->is_data) {
- spin_lock(&delayed_refs->lock);
- delayed_refs->pending_csums -= head->num_bytes;
- spin_unlock(&delayed_refs->lock);
- nr_items += btrfs_csum_bytes_to_leaves(fs_info,
- head->num_bytes);
- }
+ btrfs_mod_total_bytes_pinned(fs_info, flags, -head->num_bytes);
}
btrfs_delayed_refs_rsv_release(fs_info, nr_items);
@@ -2160,7 +2148,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
if (count == 0)
- count = atomic_read(&delayed_refs->num_entries) * 2;
+ count = delayed_refs->num_heads_ready;
again:
#ifdef SCRAMBLE_DELAYED_REFS
@@ -2572,8 +2560,7 @@ static int pin_down_extent(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
- percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
- num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
+ __btrfs_mod_total_bytes_pinned(cache->space_info, num_bytes);
set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
return 0;
@@ -2784,11 +2771,14 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
cache->pinned -= len;
btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
space_info->max_extent_size = 0;
- percpu_counter_add_batch(&space_info->total_bytes_pinned,
- -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
+ __btrfs_mod_total_bytes_pinned(space_info, -len);
if (cache->ro) {
space_info->bytes_readonly += len;
readonly = true;
+ } else if (btrfs_is_zoned(fs_info)) {
+ /* Need reset before reusing in a zoned block group */
+ space_info->bytes_zone_unusable += len;
+ readonly = true;
}
spin_unlock(&cache->lock);
if (!readonly && return_free_space &&
@@ -3318,7 +3308,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ref generic_ref = { 0 };
- int pin = 1;
int ret;
btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
@@ -3327,25 +3316,23 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
root->root_key.objectid);
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- int old_ref_mod, new_ref_mod;
-
btrfs_ref_tree_mod(fs_info, &generic_ref);
- ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL,
- &old_ref_mod, &new_ref_mod);
+ ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
BUG_ON(ret); /* -ENOMEM */
- pin = old_ref_mod >= 0 && new_ref_mod < 0;
}
if (last_ref && btrfs_header_generation(buf) == trans->transid) {
struct btrfs_block_group *cache;
+ bool must_pin = false;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
ret = check_ref_cleanup(trans, buf->start);
- if (!ret)
+ if (!ret) {
+ btrfs_redirty_list_add(trans->transaction, buf);
goto out;
+ }
}
- pin = 0;
cache = btrfs_lookup_block_group(fs_info, buf->start);
if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
@@ -3354,6 +3341,33 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
goto out;
}
+ /*
+ * If this is a leaf and there are tree mod log users, we may
+ * have recorded mod log operations that point to this leaf.
+ * So we must make sure no one reuses this leaf's extent before
+ * mod log operations are applied to a node, otherwise after
+ * rewinding a node using the mod log operations we get an
+ * inconsistent btree, as the leaf's extent may now be used as
+ * a node or leaf for another different btree.
+ * We are safe from races here because at this point no other
+ * node or root points to this extent buffer, so if after this
+ * check a new tree mod log user joins, it will not be able to
+ * find a node pointing to this leaf and record operations that
+ * point to this leaf.
+ */
+ if (btrfs_header_level(buf) == 0) {
+ read_lock(&fs_info->tree_mod_log_lock);
+ must_pin = !list_empty(&fs_info->tree_mod_seq_list);
+ read_unlock(&fs_info->tree_mod_log_lock);
+ }
+
+ if (must_pin || btrfs_is_zoned(fs_info)) {
+ btrfs_redirty_list_add(trans->transaction, buf);
+ pin_down_extent(trans, cache, buf->start, buf->len, 1);
+ btrfs_put_block_group(cache);
+ goto out;
+ }
+
WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
btrfs_add_free_space(cache, buf->start, buf->len);
@@ -3362,9 +3376,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
}
out:
- if (pin)
- add_pinned_bytes(fs_info, &generic_ref);
-
if (last_ref) {
/*
* Deleting the buffer, clear the corrupt flag since it doesn't
@@ -3378,7 +3389,6 @@ out:
int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- int old_ref_mod, new_ref_mod;
int ret;
if (btrfs_is_testing(fs_info))
@@ -3394,14 +3404,11 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
/* unlocks the pinned mutex */
btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
- old_ref_mod = new_ref_mod = 0;
ret = 0;
} else if (ref->type == BTRFS_REF_METADATA) {
- ret = btrfs_add_delayed_tree_ref(trans, ref, NULL,
- &old_ref_mod, &new_ref_mod);
+ ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
} else {
- ret = btrfs_add_delayed_data_ref(trans, ref, 0,
- &old_ref_mod, &new_ref_mod);
+ ret = btrfs_add_delayed_data_ref(trans, ref, 0);
}
if (!((ref->type == BTRFS_REF_METADATA &&
@@ -3410,9 +3417,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
btrfs_ref_tree_mod(fs_info, ref);
- if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
- add_pinned_bytes(fs_info, ref);
-
return ret;
}
@@ -3489,6 +3493,7 @@ btrfs_release_block_group(struct btrfs_block_group *cache,
enum btrfs_extent_allocation_policy {
BTRFS_EXTENT_ALLOC_CLUSTERED,
+ BTRFS_EXTENT_ALLOC_ZONED,
};
/*
@@ -3513,6 +3518,9 @@ struct find_free_extent_ctl {
bool have_caching_bg;
bool orig_have_caching_bg;
+ /* Allocation is called for tree-log */
+ bool for_treelog;
+
/* RAID index, converted from flags */
int index;
@@ -3741,6 +3749,118 @@ static int do_allocation_clustered(struct btrfs_block_group *block_group,
return find_free_extent_unclustered(block_group, ffe_ctl);
}
+/*
+ * Tree-log block group locking
+ * ============================
+ *
+ * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which
+ * indicates the starting address of a block group, which is reserved only
+ * for tree-log metadata.
+ *
+ * Lock nesting
+ * ============
+ *
+ * space_info::lock
+ * block_group::lock
+ * fs_info::treelog_bg_lock
+ */
+
+/*
+ * Simple allocator for sequential-only block group. It only allows sequential
+ * allocation. No need to play with trees. This function also reserves the
+ * bytes as in btrfs_add_reserved_bytes.
+ */
+static int do_allocation_zoned(struct btrfs_block_group *block_group,
+ struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_block_group **bg_ret)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct btrfs_space_info *space_info = block_group->space_info;
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ u64 start = block_group->start;
+ u64 num_bytes = ffe_ctl->num_bytes;
+ u64 avail;
+ u64 bytenr = block_group->start;
+ u64 log_bytenr;
+ int ret = 0;
+ bool skip;
+
+ ASSERT(btrfs_is_zoned(block_group->fs_info));
+
+ /*
+ * Do not allow non-tree-log blocks in the dedicated tree-log block
+ * group, and vice versa.
+ */
+ spin_lock(&fs_info->treelog_bg_lock);
+ log_bytenr = fs_info->treelog_bg;
+ skip = log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) ||
+ (!ffe_ctl->for_treelog && bytenr == log_bytenr));
+ spin_unlock(&fs_info->treelog_bg_lock);
+ if (skip)
+ return 1;
+
+ spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+ spin_lock(&fs_info->treelog_bg_lock);
+
+ ASSERT(!ffe_ctl->for_treelog ||
+ block_group->start == fs_info->treelog_bg ||
+ fs_info->treelog_bg == 0);
+
+ if (block_group->ro) {
+ ret = 1;
+ goto out;
+ }
+
+ /*
+ * Do not allow currently using block group to be tree-log dedicated
+ * block group.
+ */
+ if (ffe_ctl->for_treelog && !fs_info->treelog_bg &&
+ (block_group->used || block_group->reserved)) {
+ ret = 1;
+ goto out;
+ }
+
+ avail = block_group->length - block_group->alloc_offset;
+ if (avail < num_bytes) {
+ if (ffe_ctl->max_extent_size < avail) {
+ /*
+ * With sequential allocator, free space is always
+ * contiguous
+ */
+ ffe_ctl->max_extent_size = avail;
+ ffe_ctl->total_free_space = avail;
+ }
+ ret = 1;
+ goto out;
+ }
+
+ if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
+ fs_info->treelog_bg = block_group->start;
+
+ ffe_ctl->found_offset = start + block_group->alloc_offset;
+ block_group->alloc_offset += num_bytes;
+ spin_lock(&ctl->tree_lock);
+ ctl->free_space -= num_bytes;
+ spin_unlock(&ctl->tree_lock);
+
+ /*
+ * We do not check if found_offset is aligned to stripesize. The
+ * address is anyway rewritten when using zone append writing.
+ */
+
+ ffe_ctl->search_start = ffe_ctl->found_offset;
+
+out:
+ if (ret && ffe_ctl->for_treelog)
+ fs_info->treelog_bg = 0;
+ spin_unlock(&fs_info->treelog_bg_lock);
+ spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
+ return ret;
+}
+
static int do_allocation(struct btrfs_block_group *block_group,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_block_group **bg_ret)
@@ -3748,6 +3868,8 @@ static int do_allocation(struct btrfs_block_group *block_group,
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
return do_allocation_clustered(block_group, ffe_ctl, bg_ret);
+ case BTRFS_EXTENT_ALLOC_ZONED:
+ return do_allocation_zoned(block_group, ffe_ctl, bg_ret);
default:
BUG();
}
@@ -3762,6 +3884,9 @@ static void release_block_group(struct btrfs_block_group *block_group,
ffe_ctl->retry_clustered = false;
ffe_ctl->retry_unclustered = false;
break;
+ case BTRFS_EXTENT_ALLOC_ZONED:
+ /* Nothing to do */
+ break;
default:
BUG();
}
@@ -3790,6 +3915,9 @@ static void found_extent(struct find_free_extent_ctl *ffe_ctl,
case BTRFS_EXTENT_ALLOC_CLUSTERED:
found_extent_clustered(ffe_ctl, ins);
break;
+ case BTRFS_EXTENT_ALLOC_ZONED:
+ /* Nothing to do */
+ break;
default:
BUG();
}
@@ -3805,6 +3933,9 @@ static int chunk_allocation_failed(struct find_free_extent_ctl *ffe_ctl)
*/
ffe_ctl->loop = LOOP_NO_EMPTY_SIZE;
return 0;
+ case BTRFS_EXTENT_ALLOC_ZONED:
+ /* Give up here */
+ return -ENOSPC;
default:
BUG();
}
@@ -3973,6 +4104,14 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
case BTRFS_EXTENT_ALLOC_CLUSTERED:
return prepare_allocation_clustered(fs_info, ffe_ctl,
space_info, ins);
+ case BTRFS_EXTENT_ALLOC_ZONED:
+ if (ffe_ctl->for_treelog) {
+ spin_lock(&fs_info->treelog_bg_lock);
+ if (fs_info->treelog_bg)
+ ffe_ctl->hint_byte = fs_info->treelog_bg;
+ spin_unlock(&fs_info->treelog_bg_lock);
+ }
+ return 0;
default:
BUG();
}
@@ -4015,6 +4154,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
struct find_free_extent_ctl ffe_ctl = {0};
struct btrfs_space_info *space_info;
bool full_search = false;
+ bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
WARN_ON(num_bytes < fs_info->sectorsize);
@@ -4028,6 +4168,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
ffe_ctl.orig_have_caching_bg = false;
ffe_ctl.found_offset = 0;
ffe_ctl.hint_byte = hint_byte_orig;
+ ffe_ctl.for_treelog = for_treelog;
ffe_ctl.policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
/* For clustered allocation */
@@ -4036,6 +4177,9 @@ static noinline int find_free_extent(struct btrfs_root *root,
ffe_ctl.last_ptr = NULL;
ffe_ctl.use_cluster = true;
+ if (btrfs_is_zoned(fs_info))
+ ffe_ctl.policy = BTRFS_EXTENT_ALLOC_ZONED;
+
ins->type = BTRFS_EXTENT_ITEM_KEY;
ins->objectid = 0;
ins->offset = 0;
@@ -4099,8 +4243,11 @@ search:
struct btrfs_block_group *bg_ret;
/* If the block group is read-only, we can skip it entirely. */
- if (unlikely(block_group->ro))
+ if (unlikely(block_group->ro)) {
+ if (for_treelog)
+ btrfs_clear_treelog_bg(block_group);
continue;
+ }
btrfs_grab_block_group(block_group, delalloc);
ffe_ctl.search_start = block_group->start;
@@ -4178,20 +4325,21 @@ have_block_group:
/* move on to the next group */
if (ffe_ctl.search_start + num_bytes >
block_group->start + block_group->length) {
- btrfs_add_free_space(block_group, ffe_ctl.found_offset,
- num_bytes);
+ btrfs_add_free_space_unused(block_group,
+ ffe_ctl.found_offset, num_bytes);
goto loop;
}
if (ffe_ctl.found_offset < ffe_ctl.search_start)
- btrfs_add_free_space(block_group, ffe_ctl.found_offset,
- ffe_ctl.search_start - ffe_ctl.found_offset);
+ btrfs_add_free_space_unused(block_group,
+ ffe_ctl.found_offset,
+ ffe_ctl.search_start - ffe_ctl.found_offset);
ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
num_bytes, delalloc);
if (ret == -EAGAIN) {
- btrfs_add_free_space(block_group, ffe_ctl.found_offset,
- num_bytes);
+ btrfs_add_free_space_unused(block_group,
+ ffe_ctl.found_offset, num_bytes);
goto loop;
}
btrfs_inc_block_group_reservations(block_group);
@@ -4285,6 +4433,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
bool final_tried = num_bytes == min_alloc_size;
u64 flags;
int ret;
+ bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
flags = get_alloc_profile_by_root(root, is_data);
again:
@@ -4308,8 +4457,8 @@ again:
sinfo = btrfs_find_space_info(fs_info, flags);
btrfs_err(fs_info,
- "allocation failed flags %llu, wanted %llu",
- flags, num_bytes);
+ "allocation failed flags %llu, wanted %llu tree-log %d",
+ flags, num_bytes, for_treelog);
if (sinfo)
btrfs_dump_space_info(fs_info, sinfo,
num_bytes, 1);
@@ -4491,7 +4640,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
}
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
- BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_SHARED_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
@@ -4528,7 +4676,6 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_key *ins)
{
struct btrfs_ref generic_ref = { 0 };
- int ret;
BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
@@ -4536,9 +4683,8 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
ins->objectid, ins->offset, 0);
btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, offset);
btrfs_ref_tree_mod(root->fs_info, &generic_ref);
- ret = btrfs_add_delayed_data_ref(trans, &generic_ref,
- ram_bytes, NULL, NULL);
- return ret;
+
+ return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
}
/*
@@ -4620,6 +4766,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
__btrfs_tree_lock(buf, nest);
btrfs_clean_tree_block(buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
+ clear_bit(EXTENT_BUFFER_NO_CHECK, &buf->bflags);
set_extent_buffer_uptodate(buf);
@@ -4730,8 +4877,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
generic_ref.real_root = root->root_key.objectid;
btrfs_init_tree_ref(&generic_ref, level, root_objectid);
btrfs_ref_tree_mod(fs_info, &generic_ref);
- ret = btrfs_add_delayed_tree_ref(trans, &generic_ref,
- extent_op, NULL, NULL);
+ ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
if (ret)
goto out_free_delayed;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c9cee458e001..910769d5fcdb 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -24,6 +24,9 @@
#include "rcu-string.h"
#include "backref.h"
#include "disk-io.h"
+#include "subpage.h"
+#include "zoned.h"
+#include "block-group.h"
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
@@ -389,16 +392,16 @@ do_insert:
}
/**
- * __etree_search - searche @tree for an entry that contains @offset. Such
- * entry would have entry->start <= offset && entry->end >= offset.
+ * Search @tree for an entry that contains @offset. Such entry would have
+ * entry->start <= offset && entry->end >= offset.
*
- * @tree - the tree to search
- * @offset - offset that should fall within an entry in @tree
- * @next_ret - pointer to the first entry whose range ends after @offset
- * @prev - pointer to the first entry whose range begins before @offset
- * @p_ret - pointer where new node should be anchored (used when inserting an
- * entry in the tree)
- * @parent_ret - points to entry which would have been the parent of the entry,
+ * @tree: the tree to search
+ * @offset: offset that should fall within an entry in @tree
+ * @next_ret: pointer to the first entry whose range ends after @offset
+ * @prev_ret: pointer to the first entry whose range begins before @offset
+ * @p_ret: pointer where new node should be anchored (used when inserting an
+ * entry in the tree)
+ * @parent_ret: points to entry which would have been the parent of the entry,
* containing @offset
*
* This function returns a pointer to the entry that contains @offset byte
@@ -1588,12 +1591,13 @@ out:
}
/**
- * find_contiguous_extent_bit: find a contiguous area of bits
- * @tree - io tree to check
- * @start - offset to start the search from
- * @start_ret - the first offset we found with the bits set
- * @end_ret - the final contiguous range of the bits that were set
- * @bits - bits to look for
+ * Find a contiguous area of bits
+ *
+ * @tree: io tree to check
+ * @start: offset to start the search from
+ * @start_ret: the first offset we found with the bits set
+ * @end_ret: the final contiguous range of the bits that were set
+ * @bits: bits to look for
*
* set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
* to set bits appropriately, and then merge them again. During this time it
@@ -1625,14 +1629,14 @@ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
}
/**
- * find_first_clear_extent_bit - find the first range that has @bits not set.
- * This range could start before @start.
+ * Find the first range that has @bits not set. This range could start before
+ * @start.
*
- * @tree - the tree to search
- * @start - the offset at/after which the found extent should start
- * @start_ret - records the beginning of the range
- * @end_ret - records the end of the range (inclusive)
- * @bits - the set of bits which must be unset
+ * @tree: the tree to search
+ * @start: offset at/after which the found extent should start
+ * @start_ret: records the beginning of the range
+ * @end_ret: records the end of the range (inclusive)
+ * @bits: the set of bits which must be unset
*
* Since unallocated range is also considered one which doesn't have the bits
* set it's possible that @end_ret contains -1, this happens in case the range
@@ -1975,10 +1979,10 @@ static int __process_pages_contig(struct address_space *mapping,
pages_processed++;
continue;
}
- if (page_ops & PAGE_CLEAR_DIRTY)
+ if (page_ops & PAGE_START_WRITEBACK) {
clear_page_dirty_for_io(pages[i]);
- if (page_ops & PAGE_SET_WRITEBACK)
set_page_writeback(pages[i]);
+ }
if (page_ops & PAGE_SET_ERROR)
SetPageError(pages[i]);
if (page_ops & PAGE_END_WRITEBACK)
@@ -2256,6 +2260,9 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
BUG_ON(!mirror_num);
+ if (btrfs_is_zoned(fs_info))
+ return btrfs_repair_one_zone(fs_info, logical);
+
bio = btrfs_io_bio_alloc(1);
bio->bi_iter.bi_size = 0;
map_length = length;
@@ -2732,6 +2739,7 @@ static void end_bio_extent_writepage(struct bio *bio)
u64 start;
u64 end;
struct bvec_iter_all iter_all;
+ bool first_bvec = true;
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
@@ -2758,6 +2766,11 @@ static void end_bio_extent_writepage(struct bio *bio)
start = page_offset(page);
end = start + bvec->bv_offset + bvec->bv_len - 1;
+ if (first_bvec) {
+ btrfs_record_physical_zoned(inode, start, bio);
+ first_bvec = false;
+ }
+
end_extent_writepage(page, error, start, end);
end_page_writeback(page);
}
@@ -2775,7 +2788,7 @@ struct processed_extent {
struct btrfs_inode *inode;
/* Start of the range in @inode */
u64 start;
- /* End of the range in in @inode */
+ /* End of the range in @inode */
u64 end;
bool uptodate;
};
@@ -2838,15 +2851,67 @@ update:
processed->uptodate = uptodate;
}
-static void endio_readpage_update_page_status(struct page *page, bool uptodate)
+static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
{
+ ASSERT(PageLocked(page));
+ if (fs_info->sectorsize == PAGE_SIZE)
+ return;
+
+ ASSERT(PagePrivate(page));
+ btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
+}
+
+static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
+
+ ASSERT(page_offset(page) <= start &&
+ start + len <= page_offset(page) + PAGE_SIZE);
+
if (uptodate) {
- SetPageUptodate(page);
+ btrfs_page_set_uptodate(fs_info, page, start, len);
} else {
- ClearPageUptodate(page);
- SetPageError(page);
+ btrfs_page_clear_uptodate(fs_info, page, start, len);
+ btrfs_page_set_error(fs_info, page, start, len);
}
- unlock_page(page);
+
+ if (fs_info->sectorsize == PAGE_SIZE)
+ unlock_page(page);
+ else if (is_data_inode(page->mapping->host))
+ /*
+ * For subpage data, unlock the page if we're the last reader.
+ * For subpage metadata, page lock is not utilized for read.
+ */
+ btrfs_subpage_end_reader(fs_info, page, start, len);
+}
+
+/*
+ * Find extent buffer for a givne bytenr.
+ *
+ * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
+ * in endio context.
+ */
+static struct extent_buffer *find_extent_buffer_readpage(
+ struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
+{
+ struct extent_buffer *eb;
+
+ /*
+ * For regular sectorsize, we can use page->private to grab extent
+ * buffer
+ */
+ if (fs_info->sectorsize == PAGE_SIZE) {
+ ASSERT(PagePrivate(page) && page->private);
+ return (struct extent_buffer *)page->private;
+ }
+
+ /* For subpage case, we need to lookup buffer radix tree */
+ rcu_read_lock();
+ eb = radix_tree_lookup(&fs_info->buffer_radix,
+ bytenr >> fs_info->sectorsize_bits);
+ rcu_read_unlock();
+ ASSERT(eb);
+ return eb;
}
/*
@@ -2960,7 +3025,7 @@ static void end_bio_extent_readpage(struct bio *bio)
} else {
struct extent_buffer *eb;
- eb = (struct extent_buffer *)page->private;
+ eb = find_extent_buffer_readpage(fs_info, page, start);
set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = mirror;
atomic_dec(&eb->io_pages);
@@ -2972,18 +3037,29 @@ readpage_ok:
if (likely(uptodate)) {
loff_t i_size = i_size_read(inode);
pgoff_t end_index = i_size >> PAGE_SHIFT;
- unsigned off;
- /* Zero out the end if this page straddles i_size */
- off = offset_in_page(i_size);
- if (page->index == end_index && off)
- zero_user_segment(page, off, PAGE_SIZE);
+ /*
+ * Zero out the remaining part if this range straddles
+ * i_size.
+ *
+ * Here we should only zero the range inside the bvec,
+ * not touch anything else.
+ *
+ * NOTE: i_size is exclusive while end is inclusive.
+ */
+ if (page->index == end_index && i_size <= end) {
+ u32 zero_start = max(offset_in_page(i_size),
+ offset_in_page(start));
+
+ zero_user_segment(page, zero_start,
+ offset_in_page(end) + 1);
+ }
}
ASSERT(bio_offset + len > bio_offset);
bio_offset += len;
/* Update page status and unlock */
- endio_readpage_update_page_status(page, uptodate);
+ end_page_read(page, uptodate, start, len);
endio_readpage_release_extent(&processed, BTRFS_I(inode),
start, end, uptodate);
}
@@ -3012,7 +3088,7 @@ struct bio *btrfs_bio_alloc(u64 first_byte)
{
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
+ bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &btrfs_bioset);
bio->bi_iter.bi_sector = first_byte >> 9;
btrfs_io_bio_init(btrfs_io_bio(bio));
return bio;
@@ -3058,14 +3134,67 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
return bio;
}
+/**
+ * Attempt to add a page to bio
+ *
+ * @bio: destination bio
+ * @page: page to add to the bio
+ * @disk_bytenr: offset of the new bio or to check whether we are adding
+ * a contiguous page to the previous one
+ * @pg_offset: starting offset in the page
+ * @size: portion of page that we want to write
+ * @prev_bio_flags: flags of previous bio to see if we can merge the current one
+ * @bio_flags: flags of the current bio to see if we can merge them
+ * @return: true if page was added, false otherwise
+ *
+ * Attempt to add a page to bio considering stripe alignment etc.
+ *
+ * Return true if successfully page added. Otherwise, return false.
+ */
+static bool btrfs_bio_add_page(struct bio *bio, struct page *page,
+ u64 disk_bytenr, unsigned int size,
+ unsigned int pg_offset,
+ unsigned long prev_bio_flags,
+ unsigned long bio_flags)
+{
+ const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
+ bool contig;
+ int ret;
+
+ if (prev_bio_flags != bio_flags)
+ return false;
+
+ if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
+ contig = bio->bi_iter.bi_sector == sector;
+ else
+ contig = bio_end_sector(bio) == sector;
+ if (!contig)
+ return false;
+
+ if (btrfs_bio_fits_in_stripe(page, size, bio, bio_flags))
+ return false;
+
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ struct page *first_page = bio_first_bvec_all(bio)->bv_page;
+
+ if (!btrfs_bio_fits_in_ordered_extent(first_page, bio, size))
+ return false;
+ ret = bio_add_zone_append_page(bio, page, size, pg_offset);
+ } else {
+ ret = bio_add_page(bio, page, size, pg_offset);
+ }
+
+ return ret == size;
+}
+
/*
* @opf: bio REQ_OP_* and REQ_* flags as one value
* @wbc: optional writeback control for io accounting
* @page: page to add to the bio
+ * @disk_bytenr: logical bytenr where the write will be
+ * @size: portion of page that we want to write to
* @pg_offset: offset of the new bio or to check whether we are adding
* a contiguous page to the previous one
- * @size: portion of page that we want to write
- * @offset: starting offset in the page
* @bio_ret: must be valid pointer, newly allocated bio will be stored there
* @end_io_func: end_io callback for new bio
* @mirror_num: desired mirror to read/write
@@ -3074,7 +3203,7 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
*/
static int submit_extent_page(unsigned int opf,
struct writeback_control *wbc,
- struct page *page, u64 offset,
+ struct page *page, u64 disk_bytenr,
size_t size, unsigned long pg_offset,
struct bio **bio_ret,
bio_end_io_t end_io_func,
@@ -3086,27 +3215,17 @@ static int submit_extent_page(unsigned int opf,
int ret = 0;
struct bio *bio;
size_t io_size = min_t(size_t, size, PAGE_SIZE);
- sector_t sector = offset >> 9;
- struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
+ struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
+ struct extent_io_tree *tree = &inode->io_tree;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
ASSERT(bio_ret);
if (*bio_ret) {
- bool contig;
- bool can_merge = true;
-
bio = *bio_ret;
- if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
- contig = bio->bi_iter.bi_sector == sector;
- else
- contig = bio_end_sector(bio) == sector;
-
- if (btrfs_bio_fits_in_stripe(page, io_size, bio, bio_flags))
- can_merge = false;
-
- if (prev_bio_flags != bio_flags || !contig || !can_merge ||
- force_bio_submit ||
- bio_add_page(bio, page, io_size, pg_offset) < io_size) {
+ if (force_bio_submit ||
+ !btrfs_bio_add_page(bio, page, disk_bytenr, io_size,
+ pg_offset, prev_bio_flags, bio_flags)) {
ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
if (ret < 0) {
*bio_ret = NULL;
@@ -3120,7 +3239,7 @@ static int submit_extent_page(unsigned int opf,
}
}
- bio = btrfs_bio_alloc(offset);
+ bio = btrfs_bio_alloc(disk_bytenr);
bio_add_page(bio, page, io_size, pg_offset);
bio->bi_end_io = end_io_func;
bio->bi_private = tree;
@@ -3129,20 +3248,39 @@ static int submit_extent_page(unsigned int opf,
if (wbc) {
struct block_device *bdev;
- bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
+ bdev = fs_info->fs_devices->latest_bdev;
bio_set_dev(bio, bdev);
wbc_init_bio(wbc, bio);
wbc_account_cgroup_owner(wbc, page, io_size);
}
+ if (btrfs_is_zoned(fs_info) && bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ struct extent_map *em;
+ struct map_lookup *map;
+
+ em = btrfs_get_chunk_map(fs_info, disk_bytenr, io_size);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+
+ map = em->map_lookup;
+ /* We only support single profile for now */
+ ASSERT(map->num_stripes == 1);
+ btrfs_io_bio(bio)->device = map->stripes[0].dev;
+
+ free_extent_map(em);
+ }
*bio_ret = bio;
return ret;
}
-static void attach_extent_buffer_page(struct extent_buffer *eb,
- struct page *page)
+static int attach_extent_buffer_page(struct extent_buffer *eb,
+ struct page *page,
+ struct btrfs_subpage *prealloc)
{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ int ret = 0;
+
/*
* If the page is mapped to btree inode, we should hold the private
* lock to prevent race.
@@ -3152,16 +3290,62 @@ static void attach_extent_buffer_page(struct extent_buffer *eb,
if (page->mapping)
lockdep_assert_held(&page->mapping->private_lock);
- if (!PagePrivate(page))
- attach_page_private(page, eb);
+ if (fs_info->sectorsize == PAGE_SIZE) {
+ if (!PagePrivate(page))
+ attach_page_private(page, eb);
+ else
+ WARN_ON(page->private != (unsigned long)eb);
+ return 0;
+ }
+
+ /* Already mapped, just free prealloc */
+ if (PagePrivate(page)) {
+ btrfs_free_subpage(prealloc);
+ return 0;
+ }
+
+ if (prealloc)
+ /* Has preallocated memory for subpage */
+ attach_page_private(page, prealloc);
else
- WARN_ON(page->private != (unsigned long)eb);
+ /* Do new allocation to attach subpage */
+ ret = btrfs_attach_subpage(fs_info, page,
+ BTRFS_SUBPAGE_METADATA);
+ return ret;
+}
+
+int set_page_extent_mapped(struct page *page)
+{
+ struct btrfs_fs_info *fs_info;
+
+ ASSERT(page->mapping);
+
+ if (PagePrivate(page))
+ return 0;
+
+ fs_info = btrfs_sb(page->mapping->host->i_sb);
+
+ if (fs_info->sectorsize < PAGE_SIZE)
+ return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
+
+ attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
+ return 0;
}
-void set_page_extent_mapped(struct page *page)
+void clear_page_extent_mapped(struct page *page)
{
+ struct btrfs_fs_info *fs_info;
+
+ ASSERT(page->mapping);
+
if (!PagePrivate(page))
- attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
+ return;
+
+ fs_info = btrfs_sb(page->mapping->host->i_sb);
+ if (fs_info->sectorsize < PAGE_SIZE)
+ return btrfs_detach_subpage(fs_info, page);
+
+ detach_page_private(page);
}
static struct extent_map *
@@ -3202,6 +3386,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
unsigned int read_flags, u64 *prev_em_start)
{
struct inode *inode = page->mapping->host;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 start = page_offset(page);
const u64 end = start + PAGE_SIZE - 1;
u64 cur = start;
@@ -3218,12 +3403,19 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
unsigned long this_bio_flag = 0;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
- set_page_extent_mapped(page);
+ ret = set_page_extent_mapped(page);
+ if (ret < 0) {
+ unlock_extent(tree, start, end);
+ btrfs_page_set_error(fs_info, page, start, PAGE_SIZE);
+ unlock_page(page);
+ goto out;
+ }
if (!PageUptodate(page)) {
if (cleancache_get_page(page) == 0) {
BUG_ON(blocksize != PAGE_SIZE);
unlock_extent(tree, start, end);
+ unlock_page(page);
goto out;
}
}
@@ -3240,9 +3432,10 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
kunmap_atomic(userpage);
}
}
+ begin_page_read(fs_info, page);
while (cur <= end) {
bool force_bio_submit = false;
- u64 offset;
+ u64 disk_bytenr;
if (cur >= last_byte) {
char *userpage;
@@ -3257,13 +3450,14 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur,
cur + iosize - 1, &cached);
+ end_page_read(page, true, cur, iosize);
break;
}
em = __get_extent_map(inode, page, pg_offset, cur,
end - cur + 1, em_cached);
if (IS_ERR_OR_NULL(em)) {
- SetPageError(page);
unlock_extent(tree, cur, end);
+ end_page_read(page, false, cur, end + 1 - cur);
break;
}
extent_offset = cur - em->start;
@@ -3280,9 +3474,9 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
cur_end = min(extent_map_end(em) - 1, end);
iosize = ALIGN(iosize, blocksize);
if (this_bio_flag & EXTENT_BIO_COMPRESSED)
- offset = em->block_start;
+ disk_bytenr = em->block_start;
else
- offset = em->block_start + extent_offset;
+ disk_bytenr = em->block_start + extent_offset;
block_start = em->block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
block_start = EXTENT_MAP_HOLE;
@@ -3346,6 +3540,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur,
cur + iosize - 1, &cached);
+ end_page_read(page, true, cur, iosize);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3355,6 +3550,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
unlock_extent(tree, cur, cur + iosize - 1);
+ end_page_read(page, true, cur, iosize);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3363,15 +3559,15 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
* to date. Error out
*/
if (block_start == EXTENT_MAP_INLINE) {
- SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1);
+ end_page_read(page, false, cur, iosize);
cur = cur + iosize;
pg_offset += iosize;
continue;
}
ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
- page, offset, iosize,
+ page, disk_bytenr, iosize,
pg_offset, bio,
end_bio_extent_readpage, 0,
*bio_flags,
@@ -3381,19 +3577,14 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
nr++;
*bio_flags = this_bio_flag;
} else {
- SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1);
+ end_page_read(page, false, cur, iosize);
goto out;
}
cur = cur + iosize;
pg_offset += iosize;
}
out:
- if (!nr) {
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
- }
return ret;
}
@@ -3513,23 +3704,21 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
unsigned long nr_written,
int *nr_ret)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *tree = &inode->io_tree;
u64 start = page_offset(page);
- u64 page_end = start + PAGE_SIZE - 1;
- u64 end;
+ u64 end = start + PAGE_SIZE - 1;
u64 cur = start;
u64 extent_offset;
u64 block_start;
- u64 iosize;
struct extent_map *em;
- size_t pg_offset = 0;
- size_t blocksize;
int ret = 0;
int nr = 0;
+ u32 opf = REQ_OP_WRITE;
const unsigned int write_flags = wbc_to_write_flags(wbc);
bool compressed;
- ret = btrfs_writepage_cow_fixup(page, start, page_end);
+ ret = btrfs_writepage_cow_fixup(page, start, end);
if (ret) {
/* Fixup worker will requeue */
redirty_page_for_writepage(wbc, page);
@@ -3544,16 +3733,13 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
*/
update_nr_written(wbc, nr_written + 1);
- end = page_end;
- blocksize = inode->vfs_inode.i_sb->s_blocksize;
-
while (cur <= end) {
+ u64 disk_bytenr;
u64 em_end;
- u64 offset;
+ u32 iosize;
if (cur >= i_size) {
- btrfs_writepage_endio_finish_ordered(page, cur,
- page_end, 1);
+ btrfs_writepage_endio_finish_ordered(page, cur, end, 1);
break;
}
em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
@@ -3565,13 +3751,20 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
extent_offset = cur - em->start;
em_end = extent_map_end(em);
- BUG_ON(em_end <= cur);
- BUG_ON(end < cur);
- iosize = min(em_end - cur, end - cur + 1);
- iosize = ALIGN(iosize, blocksize);
- offset = em->block_start + extent_offset;
+ ASSERT(cur <= em_end);
+ ASSERT(cur < end);
+ ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
+ ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
block_start = em->block_start;
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+ disk_bytenr = em->block_start + extent_offset;
+
+ /* Note that em_end from extent_map_end() is exclusive */
+ iosize = min(em_end, end + 1) - cur;
+
+ if (btrfs_use_zone_append(inode, em))
+ opf = REQ_OP_ZONE_APPEND;
+
free_extent_map(em);
em = NULL;
@@ -3587,7 +3780,6 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
btrfs_writepage_endio_finish_ordered(page, cur,
cur + iosize - 1, 1);
cur += iosize;
- pg_offset += iosize;
continue;
}
@@ -3598,9 +3790,9 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
page->index, cur, end);
}
- ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
- page, offset, iosize, pg_offset,
- &epd->bio,
+ ret = submit_extent_page(opf | write_flags, wbc, page,
+ disk_bytenr, iosize,
+ cur - page_offset(page), &epd->bio,
end_bio_extent_writepage,
0, 0, 0, false);
if (ret) {
@@ -3609,8 +3801,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
end_page_writeback(page);
}
- cur = cur + iosize;
- pg_offset += iosize;
+ cur += iosize;
nr++;
}
*nr_ret = nr;
@@ -3663,7 +3854,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
flush_dcache_page(page);
}
- set_page_extent_mapped(page);
+ ret = set_page_extent_mapped(page);
+ if (ret < 0) {
+ SetPageError(page);
+ goto done;
+ }
if (!epd->extent_locked) {
ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
@@ -3923,7 +4118,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct writeback_control *wbc,
struct extent_page_data *epd)
{
- u64 offset = eb->start;
+ u64 disk_bytenr = eb->start;
u32 nritems;
int i, num_pages;
unsigned long start, end;
@@ -3956,7 +4151,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p);
set_page_writeback(p);
ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
- p, offset, PAGE_SIZE, 0,
+ p, disk_bytenr, PAGE_SIZE, 0,
&epd->bio,
end_bio_extent_buffer_writepage,
0, 0, 0, false);
@@ -3969,7 +4164,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
ret = -EIO;
break;
}
- offset += PAGE_SIZE;
+ disk_bytenr += PAGE_SIZE;
update_nr_written(wbc, 1);
unlock_page(p);
}
@@ -4010,6 +4205,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
struct extent_buffer **eb_context)
{
struct address_space *mapping = page->mapping;
+ struct btrfs_block_group *cache = NULL;
struct extent_buffer *eb;
int ret;
@@ -4042,13 +4238,31 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
if (!ret)
return 0;
+ if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
+ /*
+ * If for_sync, this hole will be filled with
+ * trasnsaction commit.
+ */
+ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
+ ret = -EAGAIN;
+ else
+ ret = 0;
+ free_extent_buffer(eb);
+ return ret;
+ }
+
*eb_context = eb;
ret = lock_extent_buffer_for_io(eb, epd);
if (ret <= 0) {
+ btrfs_revert_meta_write_pointer(cache, eb);
+ if (cache)
+ btrfs_put_block_group(cache);
free_extent_buffer(eb);
return ret;
}
+ if (cache)
+ btrfs_put_block_group(cache);
ret = write_one_eb(eb, wbc, epd);
free_extent_buffer(eb);
if (ret < 0)
@@ -4094,6 +4308,7 @@ int btree_write_cache_pages(struct address_space *mapping,
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
+ btrfs_zoned_meta_io_lock(fs_info);
retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
@@ -4134,7 +4349,7 @@ retry:
}
if (ret < 0) {
end_write_bio(&epd, ret);
- return ret;
+ goto out;
}
/*
* If something went wrong, don't allow any metadata write bio to be
@@ -4169,14 +4384,17 @@ retry:
ret = -EROFS;
end_write_bio(&epd, ret);
}
+out:
+ btrfs_zoned_meta_io_unlock(fs_info);
return ret;
}
/**
- * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
+ * Walk the list of dirty pages of the given address space and write all of them.
+ *
* @mapping: address space structure to write
- * @wbc: subtract the number of written pages from *@wbc->nr_to_write
- * @data: data passed to __extent_writepage function
+ * @wbc: subtract the number of written pages from *@wbc->nr_to_write
+ * @epd: holds context for the write, namely the bio
*
* If a page is already under I/O, write_cache_pages() skips it, even
* if it's dirty. This is desirable behaviour for memory-cleaning writeback,
@@ -4975,25 +5193,39 @@ int extent_buffer_under_io(const struct extent_buffer *eb)
test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}
-/*
- * Release all pages attached to the extent buffer.
- */
-static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
+static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
{
- int i;
- int num_pages;
- int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
+ struct btrfs_subpage *subpage;
- BUG_ON(extent_buffer_under_io(eb));
+ lockdep_assert_held(&page->mapping->private_lock);
- num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++) {
- struct page *page = eb->pages[i];
+ if (PagePrivate(page)) {
+ subpage = (struct btrfs_subpage *)page->private;
+ if (atomic_read(&subpage->eb_refs))
+ return true;
+ }
+ return false;
+}
- if (!page)
- continue;
+static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
+{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
+
+ /*
+ * For mapped eb, we're going to change the page private, which should
+ * be done under the private_lock.
+ */
+ if (mapped)
+ spin_lock(&page->mapping->private_lock);
+
+ if (!PagePrivate(page)) {
if (mapped)
- spin_lock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->private_lock);
+ return;
+ }
+
+ if (fs_info->sectorsize == PAGE_SIZE) {
/*
* We do this since we'll remove the pages after we've
* removed the eb from the radix tree, so we could race
@@ -5012,9 +5244,49 @@ static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
*/
detach_page_private(page);
}
-
if (mapped)
spin_unlock(&page->mapping->private_lock);
+ return;
+ }
+
+ /*
+ * For subpage, we can have dummy eb with page private. In this case,
+ * we can directly detach the private as such page is only attached to
+ * one dummy eb, no sharing.
+ */
+ if (!mapped) {
+ btrfs_detach_subpage(fs_info, page);
+ return;
+ }
+
+ btrfs_page_dec_eb_refs(fs_info, page);
+
+ /*
+ * We can only detach the page private if there are no other ebs in the
+ * page range.
+ */
+ if (!page_range_has_eb(fs_info, page))
+ btrfs_detach_subpage(fs_info, page);
+
+ spin_unlock(&page->mapping->private_lock);
+}
+
+/* Release all pages attached to the extent buffer */
+static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
+{
+ int i;
+ int num_pages;
+
+ ASSERT(!extent_buffer_under_io(eb));
+
+ num_pages = num_extent_pages(eb);
+ for (i = 0; i < num_pages; i++) {
+ struct page *page = eb->pages[i];
+
+ if (!page)
+ continue;
+
+ detach_extent_buffer_page(eb, page);
/* One for when we allocated the page */
put_page(page);
@@ -5046,6 +5318,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
&fs_info->allocated_ebs);
+ INIT_LIST_HEAD(&eb->release_list);
spin_lock_init(&eb->refs_lock);
atomic_set(&eb->refs, 1);
@@ -5067,21 +5340,32 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
if (new == NULL)
return NULL;
+ /*
+ * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
+ * btrfs_release_extent_buffer() have different behavior for
+ * UNMAPPED subpage extent buffer.
+ */
+ set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
+
for (i = 0; i < num_pages; i++) {
+ int ret;
+
p = alloc_page(GFP_NOFS);
if (!p) {
btrfs_release_extent_buffer(new);
return NULL;
}
- attach_extent_buffer_page(new, p);
+ ret = attach_extent_buffer_page(new, p, NULL);
+ if (ret < 0) {
+ put_page(p);
+ btrfs_release_extent_buffer(new);
+ return NULL;
+ }
WARN_ON(PageDirty(p));
- SetPageUptodate(p);
new->pages[i] = p;
copy_page(page_address(p), page_address(src->pages[i]));
}
-
- set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
- set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
+ set_extent_buffer_uptodate(new);
return new;
}
@@ -5099,9 +5383,14 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
+ int ret;
+
eb->pages[i] = alloc_page(GFP_NOFS);
if (!eb->pages[i])
goto err;
+ ret = attach_extent_buffer_page(eb, eb->pages[i], NULL);
+ if (ret < 0)
+ goto err;
}
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
@@ -5109,8 +5398,10 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
return eb;
err:
- for (; i > 0; i--)
+ for (; i > 0; i--) {
+ detach_extent_buffer_page(eb, eb->pages[i - 1]);
__free_page(eb->pages[i - 1]);
+ }
__free_extent_buffer(eb);
return NULL;
}
@@ -5252,6 +5543,38 @@ free_eb:
}
#endif
+static struct extent_buffer *grab_extent_buffer(
+ struct btrfs_fs_info *fs_info, struct page *page)
+{
+ struct extent_buffer *exists;
+
+ /*
+ * For subpage case, we completely rely on radix tree to ensure we
+ * don't try to insert two ebs for the same bytenr. So here we always
+ * return NULL and just continue.
+ */
+ if (fs_info->sectorsize < PAGE_SIZE)
+ return NULL;
+
+ /* Page not yet attached to an extent buffer */
+ if (!PagePrivate(page))
+ return NULL;
+
+ /*
+ * We could have already allocated an eb for this page and attached one
+ * so lets see if we can get a ref on the existing eb, and if we can we
+ * know it's good and we can just return that one, else we know we can
+ * just overwrite page->private.
+ */
+ exists = (struct extent_buffer *)page->private;
+ if (atomic_inc_not_zero(&exists->refs))
+ return exists;
+
+ WARN_ON(PageDirty(page));
+ detach_page_private(page);
+ return NULL;
+}
+
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level)
{
@@ -5290,36 +5613,58 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++, index++) {
+ struct btrfs_subpage *prealloc = NULL;
+
p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
if (!p) {
exists = ERR_PTR(-ENOMEM);
goto free_eb;
}
- spin_lock(&mapping->private_lock);
- if (PagePrivate(p)) {
- /*
- * We could have already allocated an eb for this page
- * and attached one so lets see if we can get a ref on
- * the existing eb, and if we can we know it's good and
- * we can just return that one, else we know we can just
- * overwrite page->private.
- */
- exists = (struct extent_buffer *)p->private;
- if (atomic_inc_not_zero(&exists->refs)) {
- spin_unlock(&mapping->private_lock);
- unlock_page(p);
- put_page(p);
- mark_extent_buffer_accessed(exists, p);
- goto free_eb;
- }
- exists = NULL;
+ /*
+ * Preallocate page->private for subpage case, so that we won't
+ * allocate memory with private_lock hold. The memory will be
+ * freed by attach_extent_buffer_page() or freed manually if
+ * we exit earlier.
+ *
+ * Although we have ensured one subpage eb can only have one
+ * page, but it may change in the future for 16K page size
+ * support, so we still preallocate the memory in the loop.
+ */
+ ret = btrfs_alloc_subpage(fs_info, &prealloc,
+ BTRFS_SUBPAGE_METADATA);
+ if (ret < 0) {
+ unlock_page(p);
+ put_page(p);
+ exists = ERR_PTR(ret);
+ goto free_eb;
+ }
- WARN_ON(PageDirty(p));
- detach_page_private(p);
+ spin_lock(&mapping->private_lock);
+ exists = grab_extent_buffer(fs_info, p);
+ if (exists) {
+ spin_unlock(&mapping->private_lock);
+ unlock_page(p);
+ put_page(p);
+ mark_extent_buffer_accessed(exists, p);
+ btrfs_free_subpage(prealloc);
+ goto free_eb;
}
- attach_extent_buffer_page(eb, p);
+ /* Should not fail, as we have preallocated the memory */
+ ret = attach_extent_buffer_page(eb, p, prealloc);
+ ASSERT(!ret);
+ /*
+ * To inform we have extra eb under allocation, so that
+ * detach_extent_buffer_page() won't release the page private
+ * when the eb hasn't yet been inserted into radix tree.
+ *
+ * The ref will be decreased when the eb released the page, in
+ * detach_extent_buffer_page().
+ * Thus needs no special handling in error path.
+ */
+ btrfs_page_inc_eb_refs(fs_info, p);
spin_unlock(&mapping->private_lock);
+
WARN_ON(PageDirty(p));
eb->pages[i] = p;
if (!PageUptodate(p))
@@ -5525,33 +5870,103 @@ bool set_extent_buffer_dirty(struct extent_buffer *eb)
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
- int i;
+ struct btrfs_fs_info *fs_info = eb->fs_info;
struct page *page;
int num_pages;
+ int i;
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (page)
- ClearPageUptodate(page);
+ btrfs_page_clear_uptodate(fs_info, page,
+ eb->start, eb->len);
}
}
void set_extent_buffer_uptodate(struct extent_buffer *eb)
{
- int i;
+ struct btrfs_fs_info *fs_info = eb->fs_info;
struct page *page;
int num_pages;
+ int i;
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
- SetPageUptodate(page);
+ btrfs_page_set_uptodate(fs_info, page, eb->start, eb->len);
}
}
+static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
+ int mirror_num)
+{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct extent_io_tree *io_tree;
+ struct page *page = eb->pages[0];
+ struct bio *bio = NULL;
+ int ret = 0;
+
+ ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
+ ASSERT(PagePrivate(page));
+ io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
+
+ if (wait == WAIT_NONE) {
+ ret = try_lock_extent(io_tree, eb->start,
+ eb->start + eb->len - 1);
+ if (ret <= 0)
+ return ret;
+ } else {
+ ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = 0;
+ if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) ||
+ PageUptodate(page) ||
+ btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) {
+ set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+ unlock_extent(io_tree, eb->start, eb->start + eb->len - 1);
+ return ret;
+ }
+
+ clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
+ eb->read_mirror = 0;
+ atomic_set(&eb->io_pages, 1);
+ check_buffer_tree_ref(eb);
+ btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
+
+ ret = submit_extent_page(REQ_OP_READ | REQ_META, NULL, page, eb->start,
+ eb->len, eb->start - page_offset(page), &bio,
+ end_bio_extent_readpage, mirror_num, 0, 0,
+ true);
+ if (ret) {
+ /*
+ * In the endio function, if we hit something wrong we will
+ * increase the io_pages, so here we need to decrease it for
+ * error path.
+ */
+ atomic_dec(&eb->io_pages);
+ }
+ if (bio) {
+ int tmp;
+
+ tmp = submit_one_bio(bio, mirror_num, 0);
+ if (tmp < 0)
+ return tmp;
+ }
+ if (ret || wait != WAIT_COMPLETE)
+ return ret;
+
+ wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED);
+ if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
+ ret = -EIO;
+ return ret;
+}
+
int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
{
int i;
@@ -5568,10 +5983,20 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
+ if (eb->fs_info->sectorsize < PAGE_SIZE)
+ return read_extent_buffer_subpage(eb, wait, mirror_num);
+
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (wait == WAIT_NONE) {
+ /*
+ * WAIT_NONE is only utilized by readahead. If we can't
+ * acquire the lock atomically it means either the eb
+ * is being read out or under modification.
+ * Either way the eb will be or has been cached,
+ * readahead can exit safely.
+ */
if (!trylock_page(page))
goto unlock_exit;
} else {
@@ -5823,6 +6248,8 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
char *src = (char *)srcv;
unsigned long i = get_eb_page_index(start);
+ WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
+
if (check_eb_range(eb, start, len))
return;
@@ -6169,13 +6596,115 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
}
}
+static struct extent_buffer *get_next_extent_buffer(
+ struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
+{
+ struct extent_buffer *gang[BTRFS_SUBPAGE_BITMAP_SIZE];
+ struct extent_buffer *found = NULL;
+ u64 page_start = page_offset(page);
+ int ret;
+ int i;
+
+ ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
+ ASSERT(PAGE_SIZE / fs_info->nodesize <= BTRFS_SUBPAGE_BITMAP_SIZE);
+ lockdep_assert_held(&fs_info->buffer_lock);
+
+ ret = radix_tree_gang_lookup(&fs_info->buffer_radix, (void **)gang,
+ bytenr >> fs_info->sectorsize_bits,
+ PAGE_SIZE / fs_info->nodesize);
+ for (i = 0; i < ret; i++) {
+ /* Already beyond page end */
+ if (gang[i]->start >= page_start + PAGE_SIZE)
+ break;
+ /* Found one */
+ if (gang[i]->start >= bytenr) {
+ found = gang[i];
+ break;
+ }
+ }
+ return found;
+}
+
+static int try_release_subpage_extent_buffer(struct page *page)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
+ u64 cur = page_offset(page);
+ const u64 end = page_offset(page) + PAGE_SIZE;
+ int ret;
+
+ while (cur < end) {
+ struct extent_buffer *eb = NULL;
+
+ /*
+ * Unlike try_release_extent_buffer() which uses page->private
+ * to grab buffer, for subpage case we rely on radix tree, thus
+ * we need to ensure radix tree consistency.
+ *
+ * We also want an atomic snapshot of the radix tree, thus go
+ * with spinlock rather than RCU.
+ */
+ spin_lock(&fs_info->buffer_lock);
+ eb = get_next_extent_buffer(fs_info, page, cur);
+ if (!eb) {
+ /* No more eb in the page range after or at cur */
+ spin_unlock(&fs_info->buffer_lock);
+ break;
+ }
+ cur = eb->start + eb->len;
+
+ /*
+ * The same as try_release_extent_buffer(), to ensure the eb
+ * won't disappear out from under us.
+ */
+ spin_lock(&eb->refs_lock);
+ if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
+ spin_unlock(&eb->refs_lock);
+ spin_unlock(&fs_info->buffer_lock);
+ break;
+ }
+ spin_unlock(&fs_info->buffer_lock);
+
+ /*
+ * If tree ref isn't set then we know the ref on this eb is a
+ * real ref, so just return, this eb will likely be freed soon
+ * anyway.
+ */
+ if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
+ spin_unlock(&eb->refs_lock);
+ break;
+ }
+
+ /*
+ * Here we don't care about the return value, we will always
+ * check the page private at the end. And
+ * release_extent_buffer() will release the refs_lock.
+ */
+ release_extent_buffer(eb);
+ }
+ /*
+ * Finally to check if we have cleared page private, as if we have
+ * released all ebs in the page, the page private should be cleared now.
+ */
+ spin_lock(&page->mapping->private_lock);
+ if (!PagePrivate(page))
+ ret = 1;
+ else
+ ret = 0;
+ spin_unlock(&page->mapping->private_lock);
+ return ret;
+
+}
+
int try_release_extent_buffer(struct page *page)
{
struct extent_buffer *eb;
+ if (btrfs_sb(page->mapping->host->i_sb)->sectorsize < PAGE_SIZE)
+ return try_release_subpage_extent_buffer(page);
+
/*
- * We need to make sure nobody is attaching this page to an eb right
- * now.
+ * We need to make sure nobody is changing page->private, as we rely on
+ * page->private as the pointer to extent buffer.
*/
spin_lock(&page->mapping->private_lock);
if (!PagePrivate(page)) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 19221095c635..824640cb0ace 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -31,16 +31,17 @@ enum {
EXTENT_BUFFER_IN_TREE,
/* write IO error */
EXTENT_BUFFER_WRITE_ERR,
+ EXTENT_BUFFER_NO_CHECK,
};
/* these are flags for __process_pages_contig */
#define PAGE_UNLOCK (1 << 0)
-#define PAGE_CLEAR_DIRTY (1 << 1)
-#define PAGE_SET_WRITEBACK (1 << 2)
-#define PAGE_END_WRITEBACK (1 << 3)
-#define PAGE_SET_PRIVATE2 (1 << 4)
-#define PAGE_SET_ERROR (1 << 5)
-#define PAGE_LOCK (1 << 6)
+/* Page starts writeback, clear dirty bit and set writeback bit */
+#define PAGE_START_WRITEBACK (1 << 1)
+#define PAGE_END_WRITEBACK (1 << 2)
+#define PAGE_SET_PRIVATE2 (1 << 3)
+#define PAGE_SET_ERROR (1 << 4)
+#define PAGE_LOCK (1 << 5)
/*
* page->private values. Every page that is controlled by the extent
@@ -93,6 +94,7 @@ struct extent_buffer {
struct rw_semaphore lock;
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
+ struct list_head release_list;
#ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list;
#endif
@@ -178,7 +180,8 @@ int btree_write_cache_pages(struct address_space *mapping,
void extent_readahead(struct readahead_control *rac);
int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
-void set_page_extent_mapped(struct page *page);
+int set_page_extent_mapped(struct page *page);
+void clear_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u64 owner_root, int level);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index bd6229fb2b6f..4a8e02f7b6c7 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -385,9 +385,12 @@ static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
}
/**
- * add_extent_mapping - add new extent map to the extent tree
+ * Add new extent map to the extent tree
+ *
* @tree: tree to insert new map in
* @em: map to insert
+ * @modified: indicate whether the given @em should be added to the
+ * modified list, which indicates the extent needs to be logged
*
* Insert @em into @tree or perform a simple forward/backward merge with
* existing mappings. The extent_map struct passed in will be inserted
@@ -574,12 +577,13 @@ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
}
/**
- * btrfs_add_extent_mapping - add extent mapping into em_tree
- * @fs_info - used for tracepoint
- * @em_tree - the extent tree into which we want to insert the extent mapping
- * @em_in - extent we are inserting
- * @start - start of the logical range btrfs_get_extent() is requesting
- * @len - length of the logical range btrfs_get_extent() is requesting
+ * Add extent mapping into em_tree
+ *
+ * @fs_info: the filesystem
+ * @em_tree: extent tree into which we want to insert the extent mapping
+ * @em_in: extent we are inserting
+ * @start: start of the logical range btrfs_get_extent() is requesting
+ * @len: length of the logical range btrfs_get_extent() is requesting
*
* Note that @em_in's range may be different from [start, start+len),
* but they must be overlapped.
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 6ccfc019ad90..47cd3a6dc635 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -24,8 +24,10 @@
PAGE_SIZE))
/**
- * @inode - the inode we want to update the disk_i_size for
- * @new_i_size - the i_size we want to set to, 0 if we use i_size
+ * Set inode's size according to filesystem options
+ *
+ * @inode: inode we want to update the disk_i_size for
+ * @new_i_size: i_size we want to set to, 0 if we use i_size
*
* With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read()
* returns as it is perfectly fine with a file that has holes without hole file
@@ -62,9 +64,11 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
}
/**
- * @inode - the inode we're modifying
- * @start - the start file offset of the file extent we've inserted
- * @len - the logical length of the file extent item
+ * Mark range within a file as having a new extent inserted
+ *
+ * @inode: inode being modified
+ * @start: start file offset of the file extent we've inserted
+ * @len: logical length of the file extent item
*
* Call when we are inserting a new file extent where there was none before.
* Does not need to call this in the case where we're replacing an existing file
@@ -88,9 +92,11 @@ int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
}
/**
- * @inode - the inode we're modifying
- * @start - the start file offset of the file extent we've inserted
- * @len - the logical length of the file extent item
+ * Marks an inode range as not having a backing extent
+ *
+ * @inode: inode being modified
+ * @start: start file offset of the file extent we've inserted
+ * @len: logical length of the file extent item
*
* Called when we drop a file extent, for example when we truncate. Doesn't
* need to be called for cases where we're replacing a file extent, like when
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 0e41459b8de6..0e155f013839 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -453,12 +453,11 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
}
/*
- * after copy_from_user, pages need to be dirtied and we need to make
- * sure holes are created between the current EOF and the start of
- * any next extents (if required).
- *
- * this also makes the decision about creating an inline extent vs
- * doing real data extents, marking pages dirty and delalloc as required.
+ * After btrfs_copy_from_user(), update the following things for delalloc:
+ * - Mark newly dirtied pages as DELALLOC in the io tree.
+ * Used to advise which range is to be written back.
+ * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
+ * - Update inode size for past EOF write
*/
int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
@@ -1370,6 +1369,12 @@ again:
goto fail;
}
+ err = set_page_extent_mapped(pages[i]);
+ if (err < 0) {
+ faili = i;
+ goto fail;
+ }
+
if (i == 0)
err = prepare_uptodate_page(inode, pages[i], pos,
force_uptodate);
@@ -1454,23 +1459,11 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
}
/*
- * It's possible the pages are dirty right now, but we don't want
- * to clean them yet because copy_from_user may catch a page fault
- * and we might have to fall back to one page at a time. If that
- * happens, we'll unlock these pages and we'd have a window where
- * reclaim could sneak in and drop the once-dirty page on the floor
- * without writing it.
- *
- * We have the pages locked and the extent range locked, so there's
- * no way someone can start IO on any dirty pages in this range.
- *
- * We'll call btrfs_dirty_pages() later on, and that will flip around
- * delalloc bits and dirty the pages as required.
+ * We should be called after prepare_pages() which should have locked
+ * all pages in the range.
*/
- for (i = 0; i < num_pages; i++) {
- set_page_extent_mapped(pages[i]);
+ for (i = 0; i < num_pages; i++)
WARN_ON(!PageLocked(pages[i]));
- }
return ret;
}
@@ -1949,8 +1942,8 @@ relock:
goto buffered;
}
- dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops,
- &btrfs_dio_ops, is_sync_kiocb(iocb));
+ dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
+ 0);
btrfs_inode_unlock(inode, ilock_flags);
@@ -1997,9 +1990,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_inode *inode = BTRFS_I(file_inode(file));
ssize_t num_written = 0;
const bool sync = iocb->ki_flags & IOCB_DSYNC;
@@ -2008,7 +1999,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
* have opened a file as writable, we have to stop this write operation
* to ensure consistency.
*/
- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+ if (test_bit(BTRFS_FS_STATE_ERROR, &inode->root->fs_info->fs_state))
return -EROFS;
if (!(iocb->ki_flags & IOCB_DIRECT) &&
@@ -2016,7 +2007,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
return -EOPNOTSUPP;
if (sync)
- atomic_inc(&BTRFS_I(inode)->sync_writers);
+ atomic_inc(&inode->sync_writers);
if (iocb->ki_flags & IOCB_DIRECT)
num_written = btrfs_direct_write(iocb, from);
@@ -2028,14 +2019,14 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
* otherwise subsequent syncs to a file that's been synced in this
* transaction will appear to have already occurred.
*/
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->last_sub_trans = root->log_transid;
- spin_unlock(&BTRFS_I(inode)->lock);
+ spin_lock(&inode->lock);
+ inode->last_sub_trans = inode->root->log_transid;
+ spin_unlock(&inode->lock);
if (num_written > 0)
num_written = generic_write_sync(iocb, num_written);
if (sync)
- atomic_dec(&BTRFS_I(inode)->sync_writers);
+ atomic_dec(&inode->sync_writers);
current->backing_dev_info = NULL;
return num_written;
@@ -2177,8 +2168,12 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* commit waits for their completion, to avoid data loss if we fsync,
* the current transaction commits before the ordered extents complete
* and a power failure happens right after that.
+ *
+ * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
+ * logical address recorded in the ordered extent may change. We need
+ * to wait for the IO to stabilize the logical address.
*/
- if (full_sync) {
+ if (full_sync || btrfs_is_zoned(fs_info)) {
ret = btrfs_wait_ordered_range(inode, start, len);
} else {
/*
@@ -2241,6 +2236,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
ret = PTR_ERR(trans);
goto out_release_extents;
}
+ trans->in_fsync = true;
ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
btrfs_release_log_ctx_extents(&ctx);
@@ -3264,8 +3260,11 @@ reserve_space:
goto out;
ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
alloc_start, bytes_to_reserve);
- if (ret)
+ if (ret) {
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state);
goto out;
+ }
ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
alloc_end - alloc_start,
i_blocksize(inode),
@@ -3622,8 +3621,7 @@ static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
return 0;
btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
- ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
- is_sync_kiocb(iocb));
+ ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 0);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
return ret;
}
@@ -3639,7 +3637,7 @@ static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
return ret;
}
- return generic_file_buffered_read(iocb, to, ret);
+ return filemap_read(iocb, to, ret);
}
const struct file_operations btrfs_file_operations = {
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4d8897879c9c..9988decd5717 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -198,7 +198,7 @@ int create_free_space_inode(struct btrfs_trans_handle *trans,
int ret;
u64 ino;
- ret = btrfs_find_free_objectid(trans->fs_info->tree_root, &ino);
+ ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino);
if (ret < 0)
return ret;
@@ -431,11 +431,22 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
int i;
for (i = 0; i < io_ctl->num_pages; i++) {
+ int ret;
+
page = find_or_create_page(inode->i_mapping, i, mask);
if (!page) {
io_ctl_drop_pages(io_ctl);
return -ENOMEM;
}
+
+ ret = set_page_extent_mapped(page);
+ if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
+ io_ctl_drop_pages(io_ctl);
+ return ret;
+ }
+
io_ctl->pages[i] = page;
if (uptodate && !PageUptodate(page)) {
btrfs_readpage(NULL, page);
@@ -455,10 +466,8 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
}
}
- for (i = 0; i < io_ctl->num_pages; i++) {
+ for (i = 0; i < io_ctl->num_pages; i++)
clear_page_dirty_for_io(io_ctl->pages[i]);
- set_page_extent_mapped(io_ctl->pages[i]);
- }
return 0;
}
@@ -775,8 +784,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
while (num_entries) {
e = kmem_cache_zalloc(btrfs_free_space_cachep,
GFP_NOFS);
- if (!e)
+ if (!e) {
+ ret = -ENOMEM;
goto free_cache;
+ }
ret = io_ctl_read_entry(&io_ctl, e, &type);
if (ret) {
@@ -785,6 +796,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
}
if (!e->bytes) {
+ ret = -1;
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
}
@@ -805,6 +817,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
e->bitmap = kmem_cache_zalloc(
btrfs_free_space_bitmap_cachep, GFP_NOFS);
if (!e->bitmap) {
+ ret = -ENOMEM;
kmem_cache_free(
btrfs_free_space_cachep, e);
goto free_cache;
@@ -1295,11 +1308,14 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
}
/**
- * __btrfs_write_out_cache - write out cached info to an inode
- * @root - the root the inode belongs to
- * @ctl - the free space cache we are going to write out
- * @block_group - the block_group for this cache if it belongs to a block_group
- * @trans - the trans handle
+ * Write out cached info to an inode
+ *
+ * @root: root the inode belongs to
+ * @inode: freespace inode we are writing out
+ * @ctl: free space cache we are going to write out
+ * @block_group: block_group for this cache if it belongs to a block_group
+ * @io_ctl: holds context for the io
+ * @trans: the trans handle
*
* This function writes out a free space cache struct to disk for quick recovery
* on mount. This will return 0 if it was successful in writing the cache out,
@@ -2461,6 +2477,8 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
int ret = 0;
u64 filter_bytes = bytes;
+ ASSERT(!btrfs_is_zoned(fs_info));
+
info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
if (!info)
return -ENOMEM;
@@ -2518,11 +2536,54 @@ out:
return ret;
}
+static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ u64 bytenr, u64 size, bool used)
+{
+ struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+ u64 offset = bytenr - block_group->start;
+ u64 to_free, to_unusable;
+
+ spin_lock(&ctl->tree_lock);
+ if (!used)
+ to_free = size;
+ else if (offset >= block_group->alloc_offset)
+ to_free = size;
+ else if (offset + size <= block_group->alloc_offset)
+ to_free = 0;
+ else
+ to_free = offset + size - block_group->alloc_offset;
+ to_unusable = size - to_free;
+
+ ctl->free_space += to_free;
+ /*
+ * If the block group is read-only, we should account freed space into
+ * bytes_readonly.
+ */
+ if (!block_group->ro)
+ block_group->zone_unusable += to_unusable;
+ spin_unlock(&ctl->tree_lock);
+ if (!used) {
+ spin_lock(&block_group->lock);
+ block_group->alloc_offset -= size;
+ spin_unlock(&block_group->lock);
+ }
+
+ /* All the region is now unusable. Mark it as unused and reclaim */
+ if (block_group->zone_unusable == block_group->length)
+ btrfs_mark_bg_unused(block_group);
+
+ return 0;
+}
+
int btrfs_add_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size)
{
enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
+ if (btrfs_is_zoned(block_group->fs_info))
+ return __btrfs_add_free_space_zoned(block_group, bytenr, size,
+ true);
+
if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
trim_state = BTRFS_TRIM_STATE_TRIMMED;
@@ -2531,6 +2592,16 @@ int btrfs_add_free_space(struct btrfs_block_group *block_group,
bytenr, size, trim_state);
}
+int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
+ u64 bytenr, u64 size)
+{
+ if (btrfs_is_zoned(block_group->fs_info))
+ return __btrfs_add_free_space_zoned(block_group, bytenr, size,
+ false);
+
+ return btrfs_add_free_space(block_group, bytenr, size);
+}
+
/*
* This is a subtle distinction because when adding free space back in general,
* we want it to be added as untrimmed for async. But in the case where we add
@@ -2541,6 +2612,10 @@ int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
{
enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
+ if (btrfs_is_zoned(block_group->fs_info))
+ return __btrfs_add_free_space_zoned(block_group, bytenr, size,
+ true);
+
if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) ||
btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
trim_state = BTRFS_TRIM_STATE_TRIMMED;
@@ -2558,6 +2633,23 @@ int btrfs_remove_free_space(struct btrfs_block_group *block_group,
int ret;
bool re_search = false;
+ if (btrfs_is_zoned(block_group->fs_info)) {
+ /*
+ * This can happen with conventional zones when replaying log.
+ * Since the allocation info of tree-log nodes are not recorded
+ * to the extent-tree, calculate_alloc_pointer() failed to
+ * advance the allocation pointer after last allocated tree log
+ * node blocks.
+ *
+ * This function is called from
+ * btrfs_pin_extent_for_log_replay() when replaying the log.
+ * Advance the pointer not to overwrite the tree-log nodes.
+ */
+ if (block_group->alloc_offset < offset + bytes)
+ block_group->alloc_offset = offset + bytes;
+ return 0;
+ }
+
spin_lock(&ctl->tree_lock);
again:
@@ -2652,6 +2744,16 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
struct rb_node *n;
int count = 0;
+ /*
+ * Zoned btrfs does not use free space tree and cluster. Just print
+ * out the free space after the allocation offset.
+ */
+ if (btrfs_is_zoned(fs_info)) {
+ btrfs_info(fs_info, "free space %llu",
+ block_group->length - block_group->alloc_offset);
+ return;
+ }
+
spin_lock(&ctl->tree_lock);
for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
@@ -2704,8 +2806,10 @@ static void __btrfs_return_cluster_to_free_space(
struct rb_node *node;
spin_lock(&cluster->lock);
- if (cluster->block_group != block_group)
- goto out;
+ if (cluster->block_group != block_group) {
+ spin_unlock(&cluster->lock);
+ return;
+ }
cluster->block_group = NULL;
cluster->window_start = 0;
@@ -2743,8 +2847,6 @@ static void __btrfs_return_cluster_to_free_space(
entry->offset, &entry->offset_index, bitmap);
}
cluster->root = RB_ROOT;
-
-out:
spin_unlock(&cluster->lock);
btrfs_put_block_group(block_group);
}
@@ -2845,6 +2947,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
u64 align_gap_len = 0;
enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
+ ASSERT(!btrfs_is_zoned(block_group->fs_info));
+
spin_lock(&ctl->tree_lock);
entry = find_free_space(ctl, &offset, &bytes_search,
block_group->full_stripe_len, max_extent_size);
@@ -2976,6 +3080,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
struct rb_node *node;
u64 ret = 0;
+ ASSERT(!btrfs_is_zoned(block_group->fs_info));
+
spin_lock(&cluster->lock);
if (bytes > cluster->max_size)
goto out;
@@ -3024,8 +3130,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
entry->bytes -= bytes;
}
- if (entry->bytes == 0)
- rb_erase(&entry->offset_index, &cluster->root);
break;
}
out:
@@ -3042,7 +3146,10 @@ out:
ctl->free_space -= bytes;
if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
+
+ spin_lock(&cluster->lock);
if (entry->bytes == 0) {
+ rb_erase(&entry->offset_index, &cluster->root);
ctl->free_extents--;
if (entry->bitmap) {
kmem_cache_free(btrfs_free_space_bitmap_cachep,
@@ -3055,6 +3162,7 @@ out:
kmem_cache_free(btrfs_free_space_cachep, entry);
}
+ spin_unlock(&cluster->lock);
spin_unlock(&ctl->tree_lock);
return ret;
@@ -3752,6 +3860,8 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group,
int ret;
u64 rem = 0;
+ ASSERT(!btrfs_is_zoned(block_group->fs_info));
+
*trimmed = 0;
spin_lock(&block_group->lock);
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index ecb09a02d544..1f23088d43f9 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -107,6 +107,8 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
enum btrfs_trim_state trim_state);
int btrfs_add_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size);
+int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
+ u64 bytenr, u64 size);
int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
u64 bytenr, u64 size);
int btrfs_remove_free_space(struct btrfs_block_group *block_group,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a8e0a6b038d3..7cdf65be3707 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -50,6 +50,7 @@
#include "delalloc-space.h"
#include "block-group.h"
#include "space-info.h"
+#include "zoned.h"
struct btrfs_iget_args {
u64 ino;
@@ -692,8 +693,7 @@ cont:
NULL,
clear_flags,
PAGE_UNLOCK |
- PAGE_CLEAR_DIRTY |
- PAGE_SET_WRITEBACK |
+ PAGE_START_WRITEBACK |
page_error_op |
PAGE_END_WRITEBACK);
@@ -917,7 +917,6 @@ retry:
ins.objectid,
async_extent->ram_size,
ins.offset,
- BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
if (ret) {
btrfs_drop_extent_cache(inode, async_extent->start,
@@ -934,8 +933,7 @@ retry:
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
- PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
- PAGE_SET_WRITEBACK);
+ PAGE_UNLOCK | PAGE_START_WRITEBACK);
if (btrfs_submit_compressed_write(inode, async_extent->start,
async_extent->ram_size,
ins.objectid,
@@ -971,9 +969,8 @@ out_free:
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
- PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
- PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
- PAGE_SET_ERROR);
+ PAGE_UNLOCK | PAGE_START_WRITEBACK |
+ PAGE_END_WRITEBACK | PAGE_SET_ERROR);
free_async_extent_pages(async_extent);
kfree(async_extent);
goto again;
@@ -1071,8 +1068,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
- PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
- PAGE_END_WRITEBACK);
+ PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
*nr_written = *nr_written +
(end - start + PAGE_SIZE) / PAGE_SIZE;
*page_started = 1;
@@ -1127,7 +1123,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
free_extent_map(em);
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
- ram_size, cur_alloc_size, 0);
+ ram_size, cur_alloc_size,
+ BTRFS_ORDERED_REGULAR);
if (ret)
goto out_drop_extent_cache;
@@ -1194,8 +1191,7 @@ out_reserve:
out_unlock:
clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
- page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
- PAGE_END_WRITEBACK;
+ page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK;
/*
* If we reserved an extent for our delalloc range (or a subrange) and
* failed to create the respective ordered extent, then it means that
@@ -1320,9 +1316,8 @@ static int cow_file_range_async(struct btrfs_inode *inode,
unsigned clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING;
- unsigned long page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
- PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
- PAGE_SET_ERROR;
+ unsigned long page_ops = PAGE_UNLOCK | PAGE_START_WRITEBACK |
+ PAGE_END_WRITEBACK | PAGE_SET_ERROR;
extent_clear_unlock_delalloc(inode, start, end, locked_page,
clear_bits, page_ops);
@@ -1399,6 +1394,29 @@ static int cow_file_range_async(struct btrfs_inode *inode,
return 0;
}
+static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
+ struct page *locked_page, u64 start,
+ u64 end, int *page_started,
+ unsigned long *nr_written)
+{
+ int ret;
+
+ ret = cow_file_range(inode, locked_page, start, end, page_started,
+ nr_written, 0);
+ if (ret)
+ return ret;
+
+ if (*page_started)
+ return 0;
+
+ __set_page_dirty_nobuffers(locked_page);
+ account_page_redirty(locked_page);
+ extent_write_locked_range(&inode->vfs_inode, start, end, WB_SYNC_ALL);
+ *page_started = 1;
+
+ return 0;
+}
+
static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
@@ -1519,8 +1537,7 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, PAGE_UNLOCK |
- PAGE_CLEAR_DIRTY |
- PAGE_SET_WRITEBACK |
+ PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
return -ENOMEM;
}
@@ -1657,9 +1674,6 @@ next_slot:
*/
btrfs_release_path(path);
- /* If extent is RO, we must COW it */
- if (btrfs_extent_readonly(fs_info, disk_bytenr))
- goto out_check;
ret = btrfs_cross_ref_exist(root, ino,
found_key.offset -
extent_offset, disk_bytenr, false);
@@ -1706,6 +1720,7 @@ next_slot:
WARN_ON_ONCE(freespace_inode);
goto out_check;
}
+ /* If the extent's block group is RO, we must COW */
if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
goto out_check;
nocow = true;
@@ -1842,8 +1857,7 @@ error:
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
- PAGE_CLEAR_DIRTY |
- PAGE_SET_WRITEBACK |
+ PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
btrfs_free_path(path);
return ret;
@@ -1878,17 +1892,24 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
{
int ret;
int force_cow = need_force_cow(inode, start, end);
+ const bool zoned = btrfs_is_zoned(inode->root->fs_info);
if (inode->flags & BTRFS_INODE_NODATACOW && !force_cow) {
+ ASSERT(!zoned);
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 1, nr_written);
} else if (inode->flags & BTRFS_INODE_PREALLOC && !force_cow) {
+ ASSERT(!zoned);
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
} else if (!inode_can_compress(inode) ||
!inode_need_compress(inode, start, end)) {
- ret = cow_file_range(inode, locked_page, start, end,
- page_started, nr_written, 1);
+ if (zoned)
+ ret = run_delalloc_zoned(inode, locked_page, start, end,
+ page_started, nr_written);
+ else
+ ret = cow_file_range(inode, locked_page, start, end,
+ page_started, nr_written, 1);
} else {
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
ret = cow_file_range_async(inode, wbc, locked_page, start, end,
@@ -2183,9 +2204,10 @@ int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 logical = bio->bi_iter.bi_sector << 9;
+ struct extent_map *em;
u64 length = 0;
u64 map_length;
- int ret;
+ int ret = 0;
struct btrfs_io_geometry geom;
if (bio_flags & EXTENT_BIO_COMPRESSED)
@@ -2193,14 +2215,19 @@ int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
length = bio->bi_iter.bi_size;
map_length = length;
- ret = btrfs_get_io_geometry(fs_info, btrfs_op(bio), logical, map_length,
- &geom);
+ em = btrfs_get_chunk_map(fs_info, logical, map_length);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+ ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), logical,
+ map_length, &geom);
if (ret < 0)
- return ret;
+ goto out;
if (geom.len < length + size)
- return 1;
- return 0;
+ ret = 1;
+out:
+ free_extent_map(em);
+ return ret;
}
/*
@@ -2217,6 +2244,119 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
}
+bool btrfs_bio_fits_in_ordered_extent(struct page *page, struct bio *bio,
+ unsigned int size)
+{
+ struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct btrfs_ordered_extent *ordered;
+ u64 len = bio->bi_iter.bi_size + size;
+ bool ret = true;
+
+ ASSERT(btrfs_is_zoned(fs_info));
+ ASSERT(fs_info->max_zone_append_size > 0);
+ ASSERT(bio_op(bio) == REQ_OP_ZONE_APPEND);
+
+ /* Ordered extent not yet created, so we're good */
+ ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
+ if (!ordered)
+ return ret;
+
+ if ((bio->bi_iter.bi_sector << SECTOR_SHIFT) + len >
+ ordered->disk_bytenr + ordered->disk_num_bytes)
+ ret = false;
+
+ btrfs_put_ordered_extent(ordered);
+
+ return ret;
+}
+
+static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
+ struct bio *bio, loff_t file_offset)
+{
+ struct btrfs_ordered_extent *ordered;
+ struct extent_map *em = NULL, *em_new = NULL;
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ u64 len = bio->bi_iter.bi_size;
+ u64 end = start + len;
+ u64 ordered_end;
+ u64 pre, post;
+ int ret = 0;
+
+ ordered = btrfs_lookup_ordered_extent(inode, file_offset);
+ if (WARN_ON_ONCE(!ordered))
+ return BLK_STS_IOERR;
+
+ /* No need to split */
+ if (ordered->disk_num_bytes == len)
+ goto out;
+
+ /* We cannot split once end_bio'd ordered extent */
+ if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* We cannot split a compressed ordered extent */
+ if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ordered_end = ordered->disk_bytenr + ordered->disk_num_bytes;
+ /* bio must be in one ordered extent */
+ if (WARN_ON_ONCE(start < ordered->disk_bytenr || end > ordered_end)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Checksum list should be empty */
+ if (WARN_ON_ONCE(!list_empty(&ordered->list))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pre = start - ordered->disk_bytenr;
+ post = ordered_end - end;
+
+ ret = btrfs_split_ordered_extent(ordered, pre, post);
+ if (ret)
+ goto out;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, ordered->file_offset, len);
+ if (!em) {
+ read_unlock(&em_tree->lock);
+ ret = -EIO;
+ goto out;
+ }
+ read_unlock(&em_tree->lock);
+
+ ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
+ /*
+ * We cannot reuse em_new here but have to create a new one, as
+ * unpin_extent_cache() expects the start of the extent map to be the
+ * logical offset of the file, which does not hold true anymore after
+ * splitting.
+ */
+ em_new = create_io_em(inode, em->start + pre, len,
+ em->start + pre, em->block_start + pre, len,
+ len, len, BTRFS_COMPRESS_NONE,
+ BTRFS_ORDERED_REGULAR);
+ if (IS_ERR(em_new)) {
+ ret = PTR_ERR(em_new);
+ goto out;
+ }
+ free_extent_map(em_new);
+
+out:
+ free_extent_map(em);
+ btrfs_put_ordered_extent(ordered);
+
+ return errno_to_blk_status(ret);
+}
+
/*
* extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read.
@@ -2252,7 +2392,16 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
if (btrfs_is_free_space_inode(BTRFS_I(inode)))
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
- if (bio_op(bio) != REQ_OP_WRITE) {
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ struct page *page = bio_first_bvec_all(bio)->bv_page;
+ loff_t file_offset = page_offset(page);
+
+ ret = extract_ordered_extent(BTRFS_I(inode), bio, file_offset);
+ if (ret)
+ goto out;
+ }
+
+ if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
if (ret)
goto out;
@@ -2754,6 +2903,9 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
+ if (ordered_extent->disk)
+ btrfs_rewrite_logical_zoned(ordered_extent);
+
btrfs_free_io_failure_record(inode, start, end);
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
@@ -3103,14 +3255,16 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
}
/**
- * btrfs_wait_on_delayed_iputs - wait on the delayed iputs to be done running
- * @fs_info - the fs_info for this fs
- * @return - EINTR if we were killed, 0 if nothing's pending
+ * Wait for flushing all delayed iputs
+ *
+ * @fs_info: the filesystem
*
* This will wait on any delayed iputs that are currently running with KILLABLE
* set. Once they are all done running we will return, unless we are killed in
* which case we return EINTR. This helps in user operations like fallocate etc
* that might get blocked on the iputs.
+ *
+ * Return EINTR if we were killed, 0 if nothing's pending
*/
int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info)
{
@@ -4720,6 +4874,9 @@ again:
ret = -ENOMEM;
goto out;
}
+ ret = set_page_extent_mapped(page);
+ if (ret < 0)
+ goto out_unlock;
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
@@ -4737,7 +4894,6 @@ again:
wait_on_page_writeback(page);
lock_extent_bits(io_tree, block_start, block_end, &cached_state);
- set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
@@ -5011,6 +5167,15 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_end_transaction(trans);
} else {
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
+ if (btrfs_is_zoned(fs_info)) {
+ ret = btrfs_wait_ordered_range(inode,
+ ALIGN(newsize, fs_info->sectorsize),
+ (u64)-1);
+ if (ret)
+ return ret;
+ }
/*
* We're truncating a file that used to have good data down to
@@ -5045,7 +5210,8 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
return ret;
}
-static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
+static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5054,7 +5220,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
if (btrfs_root_readonly(root))
return -EROFS;
- err = setattr_prepare(dentry, attr);
+ err = setattr_prepare(&init_user_ns, dentry, attr);
if (err)
return err;
@@ -5065,12 +5231,13 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid) {
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
inode_inc_iversion(inode);
err = btrfs_dirty_inode(inode);
if (!err && attr->ia_valid & ATTR_MODE)
- err = posix_acl_chmod(inode, inode->i_mode);
+ err = posix_acl_chmod(&init_user_ns, inode,
+ inode->i_mode);
}
return err;
@@ -5916,7 +6083,7 @@ static int btrfs_dirty_inode(struct inode *inode)
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
- if (ret && ret == -ENOSPC) {
+ if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans);
trans = btrfs_start_transaction(root, 1);
@@ -6190,7 +6357,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if (ret != 0)
goto fail_unlock;
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode_set_bytes(inode, 0);
inode->i_mtime = current_time(inode);
@@ -6351,8 +6518,8 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
return err;
}
-static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int btrfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
@@ -6371,7 +6538,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_objectid(root, &objectid);
+ err = btrfs_get_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -6415,8 +6582,8 @@ out_unlock:
return err;
}
-static int btrfs_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
+static int btrfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
@@ -6435,7 +6602,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_objectid(root, &objectid);
+ err = btrfs_get_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -6560,7 +6727,8 @@ fail:
return err;
}
-static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int btrfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct inode *inode = NULL;
@@ -6579,7 +6747,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_objectid(root, &objectid);
+ err = btrfs_get_free_objectid(root, &objectid);
if (err)
goto out_fail;
@@ -7103,9 +7271,6 @@ static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode,
* @strict: if true, omit optimizations that might force us into unnecessary
* cow. e.g., don't trust generation number.
*
- * This function will flush ordered extents in the range to ensure proper
- * nocow checks for (nowait == false) case.
- *
* Return:
* >0 and update @len if we can do nocow write
* 0 if we can't do nocow write
@@ -7613,6 +7778,9 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
iomap->bdev = fs_info->fs_devices->latest_bdev;
iomap->length = len;
+ if (write && btrfs_use_zone_append(BTRFS_I(inode), em))
+ iomap->flags |= IOMAP_F_ZONE_APPEND;
+
free_extent_map(em);
return 0;
@@ -7682,7 +7850,7 @@ static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
if (!refcount_dec_and_test(&dip->refs))
return;
- if (bio_op(dip->dio_bio) == REQ_OP_WRITE) {
+ if (btrfs_op(dip->dio_bio) == BTRFS_MAP_WRITE) {
__endio_write_update_ordered(BTRFS_I(dip->inode),
dip->logical_offset,
dip->bytes,
@@ -7797,10 +7965,8 @@ static void __endio_write_update_ordered(struct btrfs_inode *inode,
NULL);
btrfs_queue_work(wq, &ordered->work);
}
- /*
- * If btrfs_dec_test_ordered_pending does not find any ordered
- * extent in the range, we can exit.
- */
+
+ /* No ordered extent found in the range, exit */
if (ordered_offset == last_offset)
return;
/*
@@ -7841,6 +8007,8 @@ static void btrfs_end_dio_bio(struct bio *bio)
if (err)
dip->dio_bio->bi_status = err;
+ btrfs_record_physical_zoned(dip->inode, dip->logical_offset, bio);
+
bio_put(bio);
btrfs_dio_private_put(dip);
}
@@ -7850,7 +8018,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_dio_private *dip = bio->bi_private;
- bool write = bio_op(bio) == REQ_OP_WRITE;
+ bool write = btrfs_op(bio) == BTRFS_MAP_WRITE;
blk_status_t ret;
/* Check btrfs_submit_bio_hook() for rules about async submit. */
@@ -7900,7 +8068,7 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
struct inode *inode,
loff_t file_offset)
{
- const bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
+ const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
size_t dip_size;
struct btrfs_dio_private *dip;
@@ -7930,7 +8098,7 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
struct bio *dio_bio, loff_t file_offset)
{
- const bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
+ const bool write = (btrfs_op(dio_bio) == BTRFS_MAP_WRITE);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
BTRFS_BLOCK_GROUP_RAID56_MASK);
@@ -7941,10 +8109,12 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
u64 submit_len;
int clone_offset = 0;
int clone_len;
+ u64 logical;
int ret;
blk_status_t status;
struct btrfs_io_geometry geom;
struct btrfs_dio_data *dio_data = iomap->private;
+ struct extent_map *em = NULL;
dip = btrfs_create_dio_private(dio_bio, inode, file_offset);
if (!dip) {
@@ -7973,12 +8143,18 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
submit_len = dio_bio->bi_iter.bi_size;
do {
- ret = btrfs_get_io_geometry(fs_info, btrfs_op(dio_bio),
- start_sector << 9, submit_len,
- &geom);
+ logical = start_sector << 9;
+ em = btrfs_get_chunk_map(fs_info, logical, submit_len);
+ if (IS_ERR(em)) {
+ status = errno_to_blk_status(PTR_ERR(em));
+ em = NULL;
+ goto out_err_em;
+ }
+ ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(dio_bio),
+ logical, submit_len, &geom);
if (ret) {
status = errno_to_blk_status(ret);
- goto out_err;
+ goto out_err_em;
}
ASSERT(geom.len <= INT_MAX);
@@ -7993,6 +8169,19 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
+ WARN_ON_ONCE(write && btrfs_is_zoned(fs_info) &&
+ fs_info->max_zone_append_size &&
+ bio_op(bio) != REQ_OP_ZONE_APPEND);
+
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ status = extract_ordered_extent(BTRFS_I(inode), bio,
+ file_offset);
+ if (status) {
+ bio_put(bio);
+ goto out_err;
+ }
+ }
+
ASSERT(submit_len >= clone_len);
submit_len -= clone_len;
@@ -8023,19 +8212,24 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
bio_put(bio);
if (submit_len > 0)
refcount_dec(&dip->refs);
- goto out_err;
+ goto out_err_em;
}
dio_data->submitted += clone_len;
clone_offset += clone_len;
start_sector += clone_len >> 9;
file_offset += clone_len;
+
+ free_extent_map(em);
} while (submit_len > 0);
return BLK_QC_T_NONE;
+out_err_em:
+ free_extent_map(em);
out_err:
dip->dio_bio->bi_status = status;
btrfs_dio_private_put(dip);
+
return BLK_QC_T_NONE;
}
@@ -8117,7 +8311,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
int ret = try_release_extent_mapping(page, gfp_flags);
if (ret == 1)
- detach_page_private(page);
+ clear_page_extent_mapped(page);
return ret;
}
@@ -8186,8 +8380,9 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
if (!inode_evicting)
lock_extent_bits(tree, page_start, page_end, &cached_state);
-again:
+
start = page_start;
+again:
ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1);
if (ordered) {
found_ordered = true;
@@ -8276,7 +8471,7 @@ again:
}
ClearPageChecked(page);
- detach_page_private(page);
+ clear_page_extent_mapped(page);
}
/*
@@ -8355,7 +8550,12 @@ again:
wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, &cached_state);
- set_page_extent_mapped(page);
+ ret2 = set_page_extent_mapped(page);
+ if (ret2 < 0) {
+ ret = vmf_error(ret2);
+ unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
+ goto out_unlock;
+ }
/*
* we can't set the delalloc bits if there are pending ordered
@@ -8592,15 +8792,18 @@ out:
*/
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
- struct btrfs_root *parent_root,
- u64 new_dirid)
+ struct btrfs_root *parent_root)
{
struct inode *inode;
int err;
u64 index = 0;
+ u64 ino;
+
+ err = btrfs_get_free_objectid(new_root, &ino);
+ if (err < 0)
+ return err;
- inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
- new_dirid, new_dirid,
+ inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, ino, ino,
S_IFDIR | (~current_umask() & S_IRWXUGO),
&index);
if (IS_ERR(inode))
@@ -8805,7 +9008,7 @@ int __init btrfs_init_cachep(void)
btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
PAGE_SIZE, PAGE_SIZE,
- SLAB_RED_ZONE, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_bitmap_cachep)
goto fail;
@@ -8815,7 +9018,8 @@ fail:
return -ENOMEM;
}
-static int btrfs_getattr(const struct path *path, struct kstat *stat,
+static int btrfs_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
u64 delalloc_bytes;
@@ -8841,7 +9045,7 @@ static int btrfs_getattr(const struct path *path, struct kstat *stat,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
spin_lock(&BTRFS_I(inode)->lock);
@@ -9079,7 +9283,7 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
u64 objectid;
u64 index;
- ret = btrfs_find_free_objectid(root, &objectid);
+ ret = btrfs_get_free_objectid(root, &objectid);
if (ret)
return ret;
@@ -9332,9 +9536,9 @@ out_notrans:
return ret;
}
-static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int btrfs_rename2(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
@@ -9486,11 +9690,11 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
return start_delalloc_inodes(root, &wbc, true, false);
}
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
bool in_reclaim_context)
{
struct writeback_control wbc = {
- .nr_to_write = (nr == U64_MAX) ? LONG_MAX : (unsigned long)nr,
+ .nr_to_write = nr,
.sync_mode = WB_SYNC_NONE,
.range_start = 0,
.range_end = LLONG_MAX,
@@ -9507,12 +9711,12 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
mutex_lock(&fs_info->delalloc_root_mutex);
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
- while (!list_empty(&splice) && nr) {
+ while (!list_empty(&splice)) {
/*
* Reset nr_to_write here so we know that we're doing a full
* flush.
*/
- if (nr == U64_MAX)
+ if (nr == LONG_MAX)
wbc.nr_to_write = LONG_MAX;
root = list_first_entry(&splice, struct btrfs_root,
@@ -9542,8 +9746,8 @@ out:
return ret;
}
-static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int btrfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
@@ -9575,7 +9779,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(trans))
return PTR_ERR(trans);
- err = btrfs_find_free_objectid(root, &objectid);
+ err = btrfs_get_free_objectid(root, &objectid);
if (err)
goto out_unlock;
@@ -9673,6 +9877,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
struct btrfs_path *path;
u64 start = ins->objectid;
u64 len = ins->offset;
+ int qgroup_released;
int ret;
memset(&stack_fi, 0, sizeof(stack_fi));
@@ -9685,16 +9890,16 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
/* Encryption and other encoding is reserved and all 0 */
- ret = btrfs_qgroup_release_data(inode, file_offset, len);
- if (ret < 0)
- return ERR_PTR(ret);
+ qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
+ if (qgroup_released < 0)
+ return ERR_PTR(qgroup_released);
if (trans) {
ret = insert_reserved_file_extent(trans, inode,
file_offset, &stack_fi,
- true, ret);
+ true, qgroup_released);
if (ret)
- return ERR_PTR(ret);
+ goto free_qgroup;
return trans;
}
@@ -9705,21 +9910,35 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
extent_info.file_offset = file_offset;
extent_info.extent_buf = (char *)&stack_fi;
extent_info.is_new_extent = true;
- extent_info.qgroup_reserved = ret;
+ extent_info.qgroup_reserved = qgroup_released;
extent_info.insertions = 0;
path = btrfs_alloc_path();
- if (!path)
- return ERR_PTR(-ENOMEM);
+ if (!path) {
+ ret = -ENOMEM;
+ goto free_qgroup;
+ }
ret = btrfs_replace_file_extents(&inode->vfs_inode, path, file_offset,
file_offset + len - 1, &extent_info,
&trans);
btrfs_free_path(path);
if (ret)
- return ERR_PTR(ret);
-
+ goto free_qgroup;
return trans;
+
+free_qgroup:
+ /*
+ * We have released qgroup data range at the beginning of the function,
+ * and normally qgroup_released bytes will be freed when committing
+ * transaction.
+ * But if we error out early, we have to free what we have released
+ * or we leak qgroup data reservation.
+ */
+ btrfs_qgroup_free_refroot(inode->root->fs_info,
+ inode->root->root_key.objectid, qgroup_released,
+ BTRFS_QGROUP_RSV_DATA);
+ return ERR_PTR(ret);
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -9877,7 +10096,8 @@ static int btrfs_set_page_dirty(struct page *page)
return __set_page_dirty_nobuffers(page);
}
-static int btrfs_permission(struct inode *inode, int mask)
+static int btrfs_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
umode_t mode = inode->i_mode;
@@ -9889,10 +10109,11 @@ static int btrfs_permission(struct inode *inode, int mask)
if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
return -EACCES;
}
- return generic_permission(inode, mask);
+ return generic_permission(&init_user_ns, inode, mask);
}
-static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int btrfs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
@@ -9909,7 +10130,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_find_free_objectid(root, &objectid);
+ ret = btrfs_get_free_objectid(root, &objectid);
if (ret)
goto out;
@@ -9992,6 +10213,7 @@ static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
sp->ptr = ptr;
sp->inode = inode;
sp->is_block_group = is_block_group;
+ sp->bg_extent_count = 1;
spin_lock(&fs_info->swapfile_pins_lock);
p = &fs_info->swapfile_pins.rb_node;
@@ -10005,6 +10227,8 @@ static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
(sp->ptr == entry->ptr && sp->inode > entry->inode)) {
p = &(*p)->rb_right;
} else {
+ if (is_block_group)
+ entry->bg_extent_count++;
spin_unlock(&fs_info->swapfile_pins_lock);
kfree(sp);
return 1;
@@ -10030,8 +10254,11 @@ static void btrfs_free_swapfile_pins(struct inode *inode)
sp = rb_entry(node, struct btrfs_swapfile_pin, node);
if (sp->inode == inode) {
rb_erase(&sp->node, &fs_info->swapfile_pins);
- if (sp->is_block_group)
+ if (sp->is_block_group) {
+ btrfs_dec_block_group_swap_extents(sp->ptr,
+ sp->bg_extent_count);
btrfs_put_block_group(sp->ptr);
+ }
kfree(sp);
}
node = next;
@@ -10092,7 +10319,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
struct extent_map *em = NULL;
@@ -10143,13 +10371,27 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
"cannot activate swapfile while exclusive operation is running");
return -EBUSY;
}
+
+ /*
+ * Prevent snapshot creation while we are activating the swap file.
+ * We do not want to race with snapshot creation. If snapshot creation
+ * already started before we bumped nr_swapfiles from 0 to 1 and
+ * completes before the first write into the swap file after it is
+ * activated, than that write would fallback to COW.
+ */
+ if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
+ btrfs_exclop_finish(fs_info);
+ btrfs_warn(fs_info,
+ "cannot activate swapfile because snapshot creation is in progress");
+ return -EINVAL;
+ }
/*
* Snapshots can create extents which require COW even if NODATACOW is
* set. We use this counter to prevent snapshots. We must increment it
* before walking the extents because we don't want a concurrent
* snapshot to run after we've already checked the extents.
*/
- atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles);
+ atomic_inc(&root->nr_swapfiles);
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
@@ -10246,6 +10488,17 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
goto out;
}
+ if (!btrfs_inc_block_group_swap_extents(bg)) {
+ btrfs_warn(fs_info,
+ "block group for swapfile at %llu is read-only%s",
+ bg->start,
+ atomic_read(&fs_info->scrubs_running) ?
+ " (scrub running)" : "");
+ btrfs_put_block_group(bg);
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = btrfs_add_swapfile_pin(inode, bg, true);
if (ret) {
btrfs_put_block_group(bg);
@@ -10284,6 +10537,8 @@ out:
if (ret)
btrfs_swap_deactivate(file);
+ btrfs_drew_write_unlock(&root->snapshot_lock);
+
btrfs_exclop_finish(fs_info);
if (ret)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index dde49a791f3e..e8d53fea4c61 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -213,7 +213,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
const char *comp = NULL;
u32 binode_flags;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EPERM;
if (btrfs_root_readonly(root))
@@ -429,7 +429,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
unsigned old_i_flags;
int ret = 0;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EPERM;
if (btrfs_root_readonly(root))
@@ -528,6 +528,14 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
return -EPERM;
/*
+ * btrfs_trim_block_group() depends on space cache, which is not
+ * available in zoned filesystem. So, disallow fitrim on a zoned
+ * filesystem for now.
+ */
+ if (btrfs_is_zoned(fs_info))
+ return -EOPNOTSUPP;
+
+ /*
* If the fs is mounted with nologreplay, which requires it to be
* mounted in RO mode as well, we can not allow discard on free space
* inside block groups, because log trees refer to extents that are not
@@ -606,14 +614,13 @@ static noinline int create_subvol(struct inode *dir,
int err;
dev_t anon_dev = 0;
u64 objectid;
- u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
if (!root_item)
return -ENOMEM;
- ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
+ ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid);
if (ret)
goto fail_free;
@@ -693,7 +700,7 @@ static noinline int create_subvol(struct inode *dir,
free_extent_buffer(leaf);
leaf = NULL;
- btrfs_set_root_dirid(root_item, new_dirid);
+ btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
key.objectid = objectid;
key.offset = 0;
@@ -716,7 +723,7 @@ static noinline int create_subvol(struct inode *dir,
btrfs_record_root_in_trans(trans, new_root);
- ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
+ ret = btrfs_create_subvol_root(trans, new_root, root);
btrfs_put_root(new_root);
if (ret) {
/* We potentially lose an unused inode item here */
@@ -724,10 +731,6 @@ static noinline int create_subvol(struct inode *dir,
goto fail;
}
- mutex_lock(&new_root->objectid_mutex);
- new_root->highest_objectid = new_dirid;
- mutex_unlock(&new_root->objectid_mutex);
-
/*
* insert the directory item
*/
@@ -922,13 +925,14 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
BUG_ON(d_inode(victim->d_parent) != dir);
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
- error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ error = inode_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
- if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) ||
- IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim)))
+ if (check_sticky(&init_user_ns, dir, d_inode(victim)) ||
+ IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) ||
+ IS_SWAPFILE(d_inode(victim)))
return -EPERM;
if (isdir) {
if (!d_is_dir(victim))
@@ -951,7 +955,7 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
- return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ return inode_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
}
/*
@@ -1319,6 +1323,13 @@ again:
if (!page)
break;
+ ret = set_page_extent_mapped(page);
+ if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
+ break;
+ }
+
page_start = page_offset(page);
page_end = page_start + PAGE_SIZE - 1;
while (1) {
@@ -1440,7 +1451,6 @@ again:
for (i = 0; i < i_done; i++) {
clear_page_dirty_for_io(pages[i]);
ClearPageChecked(pages[i]);
- set_page_extent_mapped(pages[i]);
set_page_dirty(pages[i]);
unlock_page(pages[i]);
put_page(pages[i]);
@@ -1862,7 +1872,7 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
"Snapshot src from another FS");
ret = -EXDEV;
- } else if (!inode_owner_or_capable(src_inode)) {
+ } else if (!inode_owner_or_capable(&init_user_ns, src_inode)) {
/*
* Subvolume creation is not restricted, but snapshots
* are limited to own subvolumes only
@@ -1926,7 +1936,10 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
- if (vol_args->size > PAGE_SIZE) {
+ u64 nums;
+
+ if (vol_args->size < sizeof(*inherit) ||
+ vol_args->size > PAGE_SIZE) {
ret = -EINVAL;
goto free_args;
}
@@ -1935,6 +1948,20 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
ret = PTR_ERR(inherit);
goto free_args;
}
+
+ if (inherit->num_qgroups > PAGE_SIZE ||
+ inherit->num_ref_copies > PAGE_SIZE ||
+ inherit->num_excl_copies > PAGE_SIZE) {
+ ret = -EINVAL;
+ goto free_inherit;
+ }
+
+ nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
+ 2 * inherit->num_excl_copies;
+ if (vol_args->size != struct_size(inherit, qgroups, nums)) {
+ ret = -EINVAL;
+ goto free_inherit;
+ }
}
ret = __btrfs_ioctl_snap_create(file, vol_args->name, vol_args->fd,
@@ -1982,7 +2009,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
u64 flags;
int ret = 0;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EPERM;
ret = mnt_want_write_file(file);
@@ -2538,7 +2565,8 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
ret = PTR_ERR(temp_inode);
goto out_put;
}
- ret = inode_permission(temp_inode, MAY_READ | MAY_EXEC);
+ ret = inode_permission(&init_user_ns, temp_inode,
+ MAY_READ | MAY_EXEC);
iput(temp_inode);
if (ret) {
ret = -EACCES;
@@ -3068,7 +3096,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
if (root == dest)
goto out_dput;
- err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
+ err = inode_permission(&init_user_ns, inode,
+ MAY_WRITE | MAY_EXEC);
if (err)
goto out_dput;
}
@@ -3139,7 +3168,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
* running and allows defrag on files open in read-only mode.
*/
if (!capable(CAP_SYS_ADMIN) &&
- inode_permission(inode, MAY_WRITE)) {
+ inode_permission(&init_user_ns, inode, MAY_WRITE)) {
ret = -EPERM;
goto out;
}
@@ -4451,7 +4480,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
int ret = 0;
int received_uuid_changed;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EPERM;
ret = mnt_want_write_file(file);
@@ -4951,7 +4980,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_SYNC: {
int ret;
- ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
+ ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
if (ret)
return ret;
ret = btrfs_sync_fs(inode->i_sb, 1);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index aa9cd11f4b78..9084a950dc09 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -467,7 +467,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
bytes = min_t(unsigned long, destlen, out_len - start_byte);
- kaddr = kmap_atomic(dest_page);
+ kaddr = kmap_local_page(dest_page);
memcpy(kaddr, workspace->buf + start_byte, bytes);
/*
@@ -477,7 +477,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
*/
if (bytes < destlen)
memset(kaddr+bytes, 0, destlen-bytes);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
out:
return ret;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 79d366a36223..985a21558437 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -199,14 +199,21 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
entry->compress_type = compress_type;
entry->truncated_len = (u64)-1;
entry->qgroup_rsv = ret;
- if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
- set_bit(type, &entry->flags);
+ entry->physical = (u64)-1;
+ entry->disk = NULL;
+ entry->partno = (u8)-1;
- if (dio) {
- percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
- fs_info->delalloc_batch);
+ ASSERT(type == BTRFS_ORDERED_REGULAR ||
+ type == BTRFS_ORDERED_NOCOW ||
+ type == BTRFS_ORDERED_PREALLOC ||
+ type == BTRFS_ORDERED_COMPRESSED);
+ set_bit(type, &entry->flags);
+
+ percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
+ fs_info->delalloc_batch);
+
+ if (dio)
set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
- }
/* one ref for the tree */
refcount_set(&entry->refs, 1);
@@ -256,6 +263,9 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
int type)
{
+ ASSERT(type == BTRFS_ORDERED_REGULAR ||
+ type == BTRFS_ORDERED_NOCOW ||
+ type == BTRFS_ORDERED_PREALLOC);
return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
num_bytes, disk_num_bytes, type, 0,
BTRFS_COMPRESS_NONE);
@@ -265,6 +275,9 @@ int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
u64 disk_bytenr, u64 num_bytes,
u64 disk_num_bytes, int type)
{
+ ASSERT(type == BTRFS_ORDERED_REGULAR ||
+ type == BTRFS_ORDERED_NOCOW ||
+ type == BTRFS_ORDERED_PREALLOC);
return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
num_bytes, disk_num_bytes, type, 1,
BTRFS_COMPRESS_NONE);
@@ -272,11 +285,12 @@ int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
u64 disk_bytenr, u64 num_bytes,
- u64 disk_num_bytes, int type,
- int compress_type)
+ u64 disk_num_bytes, int compress_type)
{
+ ASSERT(compress_type != BTRFS_COMPRESS_NONE);
return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
- num_bytes, disk_num_bytes, type, 0,
+ num_bytes, disk_num_bytes,
+ BTRFS_ORDERED_COMPRESSED, 0,
compress_type);
}
@@ -297,26 +311,33 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
}
/*
- * this is used to account for finished IO across a given range
- * of the file. The IO may span ordered extents. If
- * a given ordered_extent is completely done, 1 is returned, otherwise
- * 0.
+ * Finish IO for one ordered extent across a given range. The range can
+ * contain several ordered extents.
+ *
+ * @found_ret: Return the finished ordered extent
+ * @file_offset: File offset for the finished IO
+ * Will also be updated to one byte past the range that is
+ * recordered as finished. This allows caller to walk forward.
+ * @io_size: Length of the finish IO range
+ * @uptodate: If the IO finished without problem
*
- * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
- * to make sure this function only returns 1 once for a given ordered extent.
+ * Return true if any ordered extent is finished in the range, and update
+ * @found_ret and @file_offset.
+ * Return false otherwise.
*
- * file_offset is updated to one byte past the range that is recorded as
- * complete. This allows you to walk forward in the file.
+ * NOTE: Although The range can cross multiple ordered extents, only one
+ * ordered extent will be updated during one call. The caller is responsible to
+ * iterate all ordered extents in the range.
*/
-int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
- struct btrfs_ordered_extent **cached,
+bool btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
+ struct btrfs_ordered_extent **finished_ret,
u64 *file_offset, u64 io_size, int uptodate)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- int ret;
+ bool finished = false;
unsigned long flags;
u64 dec_end;
u64 dec_start;
@@ -324,16 +345,12 @@ int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
spin_lock_irqsave(&tree->lock, flags);
node = tree_search(tree, *file_offset);
- if (!node) {
- ret = 1;
+ if (!node)
goto out;
- }
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
- if (!offset_in_entry(entry, *file_offset)) {
- ret = 1;
+ if (!offset_in_entry(entry, *file_offset))
goto out;
- }
dec_start = max(*file_offset, entry->file_offset);
dec_end = min(*file_offset + io_size,
@@ -354,39 +371,50 @@ int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
if (entry->bytes_left == 0) {
- ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
+ /*
+ * Ensure only one caller can set the flag and finished_ret
+ * accordingly
+ */
+ finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
/* test_and_set_bit implies a barrier */
cond_wake_up_nomb(&entry->wait);
- } else {
- ret = 1;
}
out:
- if (!ret && cached && entry) {
- *cached = entry;
+ if (finished && finished_ret && entry) {
+ *finished_ret = entry;
refcount_inc(&entry->refs);
}
spin_unlock_irqrestore(&tree->lock, flags);
- return ret == 0;
+ return finished;
}
/*
- * this is used to account for finished IO across a given range
- * of the file. The IO should not span ordered extents. If
- * a given ordered_extent is completely done, 1 is returned, otherwise
- * 0.
+ * Finish IO for one ordered extent across a given range. The range can only
+ * contain one ordered extent.
+ *
+ * @cached: The cached ordered extent. If not NULL, we can skip the tree
+ * search and use the ordered extent directly.
+ * Will be also used to store the finished ordered extent.
+ * @file_offset: File offset for the finished IO
+ * @io_size: Length of the finish IO range
+ * @uptodate: If the IO finishes without problem
*
- * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
- * to make sure this function only returns 1 once for a given ordered extent.
+ * Return true if the ordered extent is finished in the range, and update
+ * @cached.
+ * Return false otherwise.
+ *
+ * NOTE: The range can NOT cross multiple ordered extents.
+ * Thus caller should ensure the range doesn't cross ordered extents.
*/
-int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
- struct btrfs_ordered_extent **cached,
- u64 file_offset, u64 io_size, int uptodate)
+bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
+ struct btrfs_ordered_extent **cached,
+ u64 file_offset, u64 io_size, int uptodate)
{
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
unsigned long flags;
- int ret;
+ bool finished = false;
spin_lock_irqsave(&tree->lock, flags);
if (cached && *cached) {
@@ -395,41 +423,39 @@ int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
}
node = tree_search(tree, file_offset);
- if (!node) {
- ret = 1;
+ if (!node)
goto out;
- }
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
have_entry:
- if (!offset_in_entry(entry, file_offset)) {
- ret = 1;
+ if (!offset_in_entry(entry, file_offset))
goto out;
- }
- if (io_size > entry->bytes_left) {
+ if (io_size > entry->bytes_left)
btrfs_crit(inode->root->fs_info,
"bad ordered accounting left %llu size %llu",
entry->bytes_left, io_size);
- }
+
entry->bytes_left -= io_size;
if (!uptodate)
set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
if (entry->bytes_left == 0) {
- ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
+ /*
+ * Ensure only one caller can set the flag and finished_ret
+ * accordingly
+ */
+ finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
/* test_and_set_bit implies a barrier */
cond_wake_up_nomb(&entry->wait);
- } else {
- ret = 1;
}
out:
- if (!ret && cached && entry) {
+ if (finished && cached && entry) {
*cached = entry;
refcount_inc(&entry->refs);
}
spin_unlock_irqrestore(&tree->lock, flags);
- return ret == 0;
+ return finished;
}
/*
@@ -480,9 +506,8 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
false);
- if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
- percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
- fs_info->delalloc_batch);
+ percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
+ fs_info->delalloc_batch);
tree = &btrfs_inode->ordered_tree;
spin_lock_irq(&tree->lock);
@@ -745,9 +770,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
+ unsigned long flags;
tree = &inode->ordered_tree;
- spin_lock_irq(&tree->lock);
+ spin_lock_irqsave(&tree->lock, flags);
node = tree_search(tree, file_offset);
if (!node)
goto out;
@@ -758,7 +784,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
if (entry)
refcount_inc(&entry->refs);
out:
- spin_unlock_irq(&tree->lock);
+ spin_unlock_irqrestore(&tree->lock, flags);
return entry;
}
@@ -898,6 +924,84 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
}
}
+static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
+ u64 len)
+{
+ struct inode *inode = ordered->inode;
+ u64 file_offset = ordered->file_offset + pos;
+ u64 disk_bytenr = ordered->disk_bytenr + pos;
+ u64 num_bytes = len;
+ u64 disk_num_bytes = len;
+ int type;
+ unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT);
+ int compress_type = ordered->compress_type;
+ unsigned long weight;
+ int ret;
+
+ weight = hweight_long(flags_masked);
+ WARN_ON_ONCE(weight > 1);
+ if (!weight)
+ type = 0;
+ else
+ type = __ffs(flags_masked);
+
+ if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
+ WARN_ON_ONCE(1);
+ ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
+ file_offset, disk_bytenr, num_bytes,
+ disk_num_bytes, compress_type);
+ } else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
+ ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset,
+ disk_bytenr, num_bytes, disk_num_bytes, type);
+ } else {
+ ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset,
+ disk_bytenr, num_bytes, disk_num_bytes, type);
+ }
+
+ return ret;
+}
+
+int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
+ u64 post)
+{
+ struct inode *inode = ordered->inode;
+ struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
+ struct rb_node *node;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ int ret = 0;
+
+ spin_lock_irq(&tree->lock);
+ /* Remove from tree once */
+ node = &ordered->rb_node;
+ rb_erase(node, &tree->tree);
+ RB_CLEAR_NODE(node);
+ if (tree->last == node)
+ tree->last = NULL;
+
+ ordered->file_offset += pre;
+ ordered->disk_bytenr += pre;
+ ordered->num_bytes -= (pre + post);
+ ordered->disk_num_bytes -= (pre + post);
+ ordered->bytes_left -= (pre + post);
+
+ /* Re-insert the node */
+ node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
+ if (node)
+ btrfs_panic(fs_info, -EEXIST,
+ "zoned: inconsistency in ordered tree at offset %llu",
+ ordered->file_offset);
+
+ spin_unlock_irq(&tree->lock);
+
+ if (pre)
+ ret = clone_ordered_extent(ordered, 0, pre);
+ if (post)
+ ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
+ post);
+
+ return ret;
+}
+
int __init ordered_data_init(void)
{
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 0bfa82b58e23..99e0853e4d3b 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -27,7 +27,7 @@ struct btrfs_ordered_sum {
};
/*
- * bits for the flags field:
+ * Bits for btrfs_ordered_extent::flags.
*
* BTRFS_ORDERED_IO_DONE is set when all of the blocks are written.
* It is used to make sure metadata is inserted into the tree only once
@@ -38,24 +38,36 @@ struct btrfs_ordered_sum {
* IO is done and any metadata is inserted into the tree.
*/
enum {
+ /*
+ * Different types for direct io, one and only one of the 4 type can
+ * be set when creating ordered extent.
+ *
+ * REGULAR: For regular non-compressed COW write
+ * NOCOW: For NOCOW write into existing non-hole extent
+ * PREALLOC: For NOCOW write into preallocated extent
+ * COMPRESSED: For compressed COW write
+ */
+ BTRFS_ORDERED_REGULAR,
+ BTRFS_ORDERED_NOCOW,
+ BTRFS_ORDERED_PREALLOC,
+ BTRFS_ORDERED_COMPRESSED,
+
+ /*
+ * Extra bit for direct io, can only be set for
+ * REGULAR/NOCOW/PREALLOC. No direct io for compressed extent.
+ */
+ BTRFS_ORDERED_DIRECT,
+
+ /* Extra status bits for ordered extents */
+
/* set when all the pages are written */
BTRFS_ORDERED_IO_DONE,
/* set when removed from the tree */
BTRFS_ORDERED_COMPLETE,
- /* set when we want to write in place */
- BTRFS_ORDERED_NOCOW,
- /* writing a zlib compressed extent */
- BTRFS_ORDERED_COMPRESSED,
- /* set when writing to preallocated extent */
- BTRFS_ORDERED_PREALLOC,
- /* set when we're doing DIO with this extent */
- BTRFS_ORDERED_DIRECT,
/* We had an io error when writing this out */
BTRFS_ORDERED_IOERR,
/* Set when we have to truncate an extent */
BTRFS_ORDERED_TRUNCATED,
- /* Regular IO for COW */
- BTRFS_ORDERED_REGULAR,
/* Used during fsync to track already logged extents */
BTRFS_ORDERED_LOGGED,
/* We have already logged all the csums of the ordered extent */
@@ -127,6 +139,14 @@ struct btrfs_ordered_extent {
struct completion completion;
struct btrfs_work flush_work;
struct list_head work_list;
+
+ /*
+ * Used to reverse-map physical address returned from ZONE_APPEND write
+ * command in a workqueue context
+ */
+ u64 physical;
+ struct gendisk *disk;
+ u8 partno;
};
/*
@@ -152,11 +172,11 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry);
-int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
- struct btrfs_ordered_extent **cached,
- u64 file_offset, u64 io_size, int uptodate);
-int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
- struct btrfs_ordered_extent **cached,
+bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
+ struct btrfs_ordered_extent **cached,
+ u64 file_offset, u64 io_size, int uptodate);
+bool btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
+ struct btrfs_ordered_extent **finished_ret,
u64 *file_offset, u64 io_size,
int uptodate);
int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
@@ -167,8 +187,7 @@ int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
u64 disk_num_bytes, int type);
int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
u64 disk_bytenr, u64 num_bytes,
- u64 disk_num_bytes, int type,
- int compress_type);
+ u64 disk_num_bytes, int compress_type);
void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum);
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
@@ -190,6 +209,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
u64 end,
struct extent_state **cached_state);
+int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
+ u64 post);
int __init ordered_data_init(void);
void __cold ordered_data_exit(void);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 808370ada888..14ff388fd3bd 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3841,8 +3841,8 @@ static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
return num_bytes;
}
-static int qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
- enum btrfs_qgroup_rsv_type type, bool enforce)
+int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ enum btrfs_qgroup_rsv_type type, bool enforce)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@@ -3873,14 +3873,14 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
{
int ret;
- ret = qgroup_reserve_meta(root, num_bytes, type, enforce);
+ ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
if (ret <= 0 && ret != -EDQUOT)
return ret;
ret = try_flush_qgroup(root);
if (ret < 0)
return ret;
- return qgroup_reserve_meta(root, num_bytes, type, enforce);
+ return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
}
void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 50dea9a2d8fb..7283e4f549af 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -361,6 +361,8 @@ int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start,
u64 len);
+int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
+ enum btrfs_qgroup_rsv_type type, bool enforce);
int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
enum btrfs_qgroup_rsv_type type, bool enforce);
/* Reserve metadata space for pertrans and prealloc type */
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 93fbf87bdc8d..8c31357f08ed 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -233,8 +233,7 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
}
x = cmpxchg(&info->stripe_hash_table, NULL, table);
- if (x)
- kvfree(x);
+ kvfree(x);
return 0;
}
@@ -250,8 +249,6 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
{
int i;
- char *s;
- char *d;
int ret;
ret = alloc_rbio_pages(rbio);
@@ -262,13 +259,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
if (!rbio->bio_pages[i])
continue;
- s = kmap(rbio->bio_pages[i]);
- d = kmap(rbio->stripe_pages[i]);
-
- copy_page(d, s);
-
- kunmap(rbio->bio_pages[i]);
- kunmap(rbio->stripe_pages[i]);
+ copy_highpage(rbio->stripe_pages[i], rbio->bio_pages[i]);
SetPageUptodate(rbio->stripe_pages[i]);
}
set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -1105,8 +1096,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
* devices or if they are not contiguous
*/
if (last_end == disk_start && !last->bi_status &&
- last->bi_disk == stripe->dev->bdev->bd_disk &&
- last->bi_partno == stripe->dev->bdev->bd_partno) {
+ last->bi_bdev == stripe->dev->bdev) {
ret = bio_add_page(last, page, PAGE_SIZE, 0);
if (ret == PAGE_SIZE)
return 0;
@@ -1357,9 +1347,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
for (i = 0; i < rbio->bbio->num_stripes; i++) {
stripe = &rbio->bbio->stripes[i];
if (in_range(physical, stripe->physical, rbio->stripe_len) &&
- stripe->dev->bdev &&
- bio->bi_disk == stripe->dev->bdev->bd_disk &&
- bio->bi_partno == stripe->dev->bdev->bd_partno) {
+ stripe->dev->bdev && bio->bi_bdev == stripe->dev->bdev) {
return i;
}
}
@@ -2363,16 +2351,21 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
SetPageUptodate(p_page);
if (has_qstripe) {
+ /* RAID6, allocate and map temp space for the Q stripe */
q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!q_page) {
__free_page(p_page);
goto cleanup;
}
SetPageUptodate(q_page);
+ pointers[rbio->real_stripes - 1] = kmap(q_page);
}
atomic_set(&rbio->error, 0);
+ /* Map the parity stripe just once */
+ pointers[nr_data] = kmap(p_page);
+
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
struct page *p;
void *parity;
@@ -2382,16 +2375,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
pointers[stripe] = kmap(p);
}
- /* then add the parity stripe */
- pointers[stripe++] = kmap(p_page);
-
if (has_qstripe) {
- /*
- * raid6, add the qstripe and call the
- * library function to fill in our p/q
- */
- pointers[stripe++] = kmap(q_page);
-
+ /* RAID6, call the library function to fill in our P/Q */
raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
pointers);
} else {
@@ -2412,12 +2397,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
for (stripe = 0; stripe < nr_data; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
- kunmap(p_page);
}
+ kunmap(p_page);
__free_page(p_page);
- if (q_page)
+ if (q_page) {
+ kunmap(q_page);
__free_page(q_page);
+ }
writeback:
/*
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 20fd4aa48a8c..06713a8fe26b 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -209,7 +209,7 @@ int btree_readahead_hook(struct extent_buffer *eb, int err)
/* find extent */
spin_lock(&fs_info->reada_lock);
re = radix_tree_lookup(&fs_info->reada_tree,
- eb->start >> PAGE_SHIFT);
+ eb->start >> fs_info->sectorsize_bits);
if (re)
re->refcnt++;
spin_unlock(&fs_info->reada_lock);
@@ -240,7 +240,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
zone = NULL;
spin_lock(&fs_info->reada_lock);
ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
- logical >> PAGE_SHIFT, 1);
+ logical >> fs_info->sectorsize_bits, 1);
if (ret == 1 && logical >= zone->start && logical <= zone->end) {
kref_get(&zone->refcnt);
spin_unlock(&fs_info->reada_lock);
@@ -283,13 +283,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
spin_lock(&fs_info->reada_lock);
ret = radix_tree_insert(&dev->reada_zones,
- (unsigned long)(zone->end >> PAGE_SHIFT),
- zone);
+ (unsigned long)(zone->end >> fs_info->sectorsize_bits),
+ zone);
if (ret == -EEXIST) {
kfree(zone);
ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
- logical >> PAGE_SHIFT, 1);
+ logical >> fs_info->sectorsize_bits, 1);
if (ret == 1 && logical >= zone->start && logical <= zone->end)
kref_get(&zone->refcnt);
else
@@ -315,7 +315,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
u64 length;
int real_stripes;
int nzones = 0;
- unsigned long index = logical >> PAGE_SHIFT;
+ unsigned long index = logical >> fs_info->sectorsize_bits;
int dev_replace_is_ongoing;
int have_zone = 0;
@@ -497,7 +497,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
struct reada_extent *re)
{
int i;
- unsigned long index = re->logical >> PAGE_SHIFT;
+ unsigned long index = re->logical >> fs_info->sectorsize_bits;
spin_lock(&fs_info->reada_lock);
if (--re->refcnt) {
@@ -538,11 +538,12 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
static void reada_zone_release(struct kref *kref)
{
struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
+ struct btrfs_fs_info *fs_info = zone->device->fs_info;
- lockdep_assert_held(&zone->device->fs_info->reada_lock);
+ lockdep_assert_held(&fs_info->reada_lock);
radix_tree_delete(&zone->device->reada_zones,
- zone->end >> PAGE_SHIFT);
+ zone->end >> fs_info->sectorsize_bits);
kfree(zone);
}
@@ -593,7 +594,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
{
int i;
- unsigned long index = zone->end >> PAGE_SHIFT;
+ unsigned long index = zone->end >> zone->device->fs_info->sectorsize_bits;
for (i = 0; i < zone->ndevs; ++i) {
struct reada_zone *peer;
@@ -628,7 +629,7 @@ static int reada_pick_zone(struct btrfs_device *dev)
(void **)&zone, index, 1);
if (ret == 0)
break;
- index = (zone->end >> PAGE_SHIFT) + 1;
+ index = (zone->end >> dev->fs_info->sectorsize_bits) + 1;
if (zone->locked) {
if (zone->elems > top_locked_elems) {
top_locked_elems = zone->elems;
@@ -709,7 +710,7 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
* plugging to speed things up
*/
ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
- dev->reada_next >> PAGE_SHIFT, 1);
+ dev->reada_next >> fs_info->sectorsize_bits, 1);
if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
ret = reada_pick_zone(dev);
if (!ret) {
@@ -718,7 +719,7 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
}
re = NULL;
ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
- dev->reada_next >> PAGE_SHIFT, 1);
+ dev->reada_next >> fs_info->sectorsize_bits, 1);
}
if (ret == 0) {
spin_unlock(&fs_info->reada_lock);
@@ -885,7 +886,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
pr_cont(" curr off %llu",
device->reada_next - zone->start);
pr_cont("\n");
- index = (zone->end >> PAGE_SHIFT) + 1;
+ index = (zone->end >> fs_info->sectorsize_bits) + 1;
}
cnt = 0;
index = 0;
@@ -910,7 +911,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
}
}
pr_cont("\n");
- index = (re->logical >> PAGE_SHIFT) + 1;
+ index = (re->logical >> fs_info->sectorsize_bits) + 1;
if (++cnt > 15)
break;
}
@@ -926,7 +927,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
if (ret == 0)
break;
if (!re->scheduled) {
- index = (re->logical >> PAGE_SHIFT) + 1;
+ index = (re->logical >> fs_info->sectorsize_bits) + 1;
continue;
}
pr_debug("re: logical %llu size %u list empty %d scheduled %d",
@@ -942,7 +943,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
}
}
pr_cont("\n");
- index = (re->logical >> PAGE_SHIFT) + 1;
+ index = (re->logical >> fs_info->sectorsize_bits) + 1;
}
spin_unlock(&fs_info->reada_lock);
}
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 4b9b6c52a83b..8e026de74c44 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -218,11 +218,11 @@ static void __print_stack_trace(struct btrfs_fs_info *fs_info,
stack_trace_print(ra->trace, ra->trace_len, 2);
}
#else
-static void inline __save_stack_trace(struct ref_action *ra)
+static inline void __save_stack_trace(struct ref_action *ra)
{
}
-static void inline __print_stack_trace(struct btrfs_fs_info *fs_info,
+static inline void __print_stack_trace(struct btrfs_fs_info *fs_info,
struct ref_action *ra)
{
btrfs_err(fs_info, " ref-verify: no stacktrace support");
@@ -495,14 +495,15 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
}
static int process_leaf(struct btrfs_root *root,
- struct btrfs_path *path, u64 *bytenr, u64 *num_bytes)
+ struct btrfs_path *path, u64 *bytenr, u64 *num_bytes,
+ int *tree_block_level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
u32 count;
- int i = 0, tree_block_level = 0, ret = 0;
+ int i = 0, ret = 0;
struct btrfs_key key;
int nritems = btrfs_header_nritems(leaf);
@@ -515,15 +516,15 @@ static int process_leaf(struct btrfs_root *root,
case BTRFS_METADATA_ITEM_KEY:
*bytenr = key.objectid;
ret = process_extent_item(fs_info, path, &key, i,
- &tree_block_level);
+ tree_block_level);
break;
case BTRFS_TREE_BLOCK_REF_KEY:
ret = add_tree_block(fs_info, key.offset, 0,
- key.objectid, tree_block_level);
+ key.objectid, *tree_block_level);
break;
case BTRFS_SHARED_BLOCK_REF_KEY:
ret = add_tree_block(fs_info, 0, key.offset,
- key.objectid, tree_block_level);
+ key.objectid, *tree_block_level);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = btrfs_item_ptr(leaf, i,
@@ -549,7 +550,8 @@ static int process_leaf(struct btrfs_root *root,
/* Walk down to the leaf from the given level */
static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
- int level, u64 *bytenr, u64 *num_bytes)
+ int level, u64 *bytenr, u64 *num_bytes,
+ int *tree_block_level)
{
struct extent_buffer *eb;
int ret = 0;
@@ -565,7 +567,8 @@ static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
path->slots[level-1] = 0;
path->locks[level-1] = BTRFS_READ_LOCK;
} else {
- ret = process_leaf(root, path, bytenr, num_bytes);
+ ret = process_leaf(root, path, bytenr, num_bytes,
+ tree_block_level);
if (ret)
break;
}
@@ -666,18 +669,18 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
u64 bytenr = generic_ref->bytenr;
u64 num_bytes = generic_ref->len;
u64 parent = generic_ref->parent;
- u64 ref_root;
- u64 owner;
- u64 offset;
+ u64 ref_root = 0;
+ u64 owner = 0;
+ u64 offset = 0;
if (!btrfs_test_opt(fs_info, REF_VERIFY))
return 0;
if (generic_ref->type == BTRFS_REF_METADATA) {
- ref_root = generic_ref->tree_ref.root;
+ if (!parent)
+ ref_root = generic_ref->tree_ref.root;
owner = generic_ref->tree_ref.level;
- offset = 0;
- } else {
+ } else if (!parent) {
ref_root = generic_ref->data_ref.ref_root;
owner = generic_ref->data_ref.ino;
offset = generic_ref->data_ref.offset;
@@ -693,13 +696,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
goto out;
}
- if (parent) {
- ref->parent = parent;
- } else {
- ref->root_objectid = ref_root;
- ref->owner = owner;
- ref->offset = offset;
- }
+ ref->parent = parent;
+ ref->owner = owner;
+ ref->root_objectid = ref_root;
+ ref->offset = offset;
ref->num_refs = (action == BTRFS_DROP_DELAYED_REF) ? -1 : 1;
memcpy(&ra->ref, ref, sizeof(struct ref_entry));
@@ -974,6 +974,7 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_path *path;
struct extent_buffer *eb;
+ int tree_block_level = 0;
u64 bytenr = 0, num_bytes = 0;
int ret, level;
@@ -998,7 +999,7 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
* different leaf from the original extent item.
*/
ret = walk_down_tree(fs_info->extent_root, path, level,
- &bytenr, &num_bytes);
+ &bytenr, &num_bytes, &tree_block_level);
if (ret)
break;
ret = walk_up_tree(path, &level);
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index b03e7891394e..762881b777b3 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -81,7 +81,10 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
goto out_unlock;
}
- set_page_extent_mapped(page);
+ ret = set_page_extent_mapped(page);
+ if (ret < 0)
+ goto out_unlock;
+
clear_extent_bit(&inode->io_tree, file_offset, range_end,
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, NULL);
@@ -103,12 +106,8 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
if (comp_type == BTRFS_COMPRESS_NONE) {
- char *map;
-
- map = kmap(page);
- memcpy(map, data_start, datal);
+ memcpy_to_page(page, 0, data_start, datal);
flush_dcache_page(page);
- kunmap(page);
} else {
ret = btrfs_decompress(comp_type, data_start, page, 0,
inline_size, datal);
@@ -550,6 +549,24 @@ process_slot:
*/
btrfs_release_path(path);
+ /*
+ * When using NO_HOLES and we are cloning a range that covers
+ * only a hole (no extents) into a range beyond the current
+ * i_size, punching a hole in the target range will not create
+ * an extent map defining a hole, because the range starts at or
+ * beyond current i_size. If the file previously had an i_size
+ * greater than the new i_size set by this clone operation, we
+ * need to make sure the next fsync is a full fsync, so that it
+ * detects and logs a hole covering a range from the current
+ * i_size to the new i_size. If the clone range covers extents,
+ * besides a hole, then we know the full sync flag was already
+ * set by previous calls to btrfs_replace_file_extents() that
+ * replaced file extent items.
+ */
+ if (last_dest_end >= i_size_read(inode))
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+
ret = btrfs_replace_file_extents(inode, path, last_dest_end,
destoff + len - 1, NULL, &trans);
if (ret)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index df63ef64c5c0..232d5da7b7be 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -97,6 +97,7 @@ struct tree_block {
struct rb_node rb_node;
u64 bytenr;
}; /* Use rb_simple_node for search/insert */
+ u64 owner;
struct btrfs_key key;
unsigned int level:8;
unsigned int key_ready:1;
@@ -668,9 +669,7 @@ static void __del_reloc_root(struct btrfs_root *root)
RB_CLEAR_NODE(&node->rb_node);
}
spin_unlock(&rc->reloc_root_tree.lock);
- if (!node)
- return;
- BUG_ON((struct btrfs_root *)node->data != root);
+ ASSERT(!node || (struct btrfs_root *)node->data == root);
}
/*
@@ -2393,8 +2392,8 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
{
struct extent_buffer *eb;
- eb = read_tree_block(fs_info, block->bytenr, 0, block->key.offset,
- block->level, NULL);
+ eb = read_tree_block(fs_info, block->bytenr, block->owner,
+ block->key.offset, block->level, NULL);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
@@ -2493,7 +2492,8 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Kick in readahead for tree blocks with missing keys */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready)
- btrfs_readahead_tree_block(fs_info, block->bytenr, 0, 0,
+ btrfs_readahead_tree_block(fs_info, block->bytenr,
+ block->owner, 0,
block->level);
}
@@ -2553,6 +2553,31 @@ static noinline_for_stack int prealloc_file_extent_cluster(
if (ret)
return ret;
+ /*
+ * On a zoned filesystem, we cannot preallocate the file region.
+ * Instead, we dirty and fiemap_write the region.
+ */
+ if (btrfs_is_zoned(inode->root->fs_info)) {
+ struct btrfs_root *root = inode->root;
+ struct btrfs_trans_handle *trans;
+
+ end = cluster->end - offset + 1;
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
+ i_size_write(&inode->vfs_inode, end);
+ ret = btrfs_update_inode(trans, root, inode);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ return ret;
+ }
+
+ return btrfs_end_transaction(trans);
+ }
+
inode_lock(&inode->vfs_inode);
for (nr = 0; nr < cluster->nr; nr++) {
start = cluster->boundary[nr] - offset;
@@ -2615,7 +2640,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
/*
* Allow error injection to test balance cancellation
*/
-int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
+noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
{
return atomic_read(&fs_info->balance_cancel_req) ||
fatal_signal_pending(current);
@@ -2679,6 +2704,15 @@ static int relocate_file_extent_cluster(struct inode *inode,
goto out;
}
}
+ ret = set_page_extent_mapped(page);
+ if (ret < 0) {
+ btrfs_delalloc_release_metadata(BTRFS_I(inode),
+ PAGE_SIZE, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
+ unlock_page(page);
+ put_page(page);
+ goto out;
+ }
if (PageReadahead(page)) {
page_cache_async_readahead(inode->i_mapping,
@@ -2706,8 +2740,6 @@ static int relocate_file_extent_cluster(struct inode *inode,
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
- set_page_extent_mapped(page);
-
if (nr < cluster->nr &&
page_start + offset == cluster->boundary[nr]) {
set_extent_bits(&BTRFS_I(inode)->io_tree,
@@ -2749,6 +2781,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
}
}
WARN_ON(nr != cluster->nr);
+ if (btrfs_is_zoned(fs_info) && !ret)
+ ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
out:
kfree(ra);
return ret;
@@ -2801,21 +2835,58 @@ static int add_tree_block(struct reloc_control *rc,
u32 item_size;
int level = -1;
u64 generation;
+ u64 owner = 0;
eb = path->nodes[0];
item_size = btrfs_item_size_nr(eb, path->slots[0]);
if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
item_size >= sizeof(*ei) + sizeof(*bi)) {
+ unsigned long ptr = 0, end;
+
ei = btrfs_item_ptr(eb, path->slots[0],
struct btrfs_extent_item);
+ end = (unsigned long)ei + item_size;
if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
bi = (struct btrfs_tree_block_info *)(ei + 1);
level = btrfs_tree_block_level(eb, bi);
+ ptr = (unsigned long)(bi + 1);
} else {
level = (int)extent_key->offset;
+ ptr = (unsigned long)(ei + 1);
}
generation = btrfs_extent_generation(eb, ei);
+
+ /*
+ * We're reading random blocks without knowing their owner ahead
+ * of time. This is ok most of the time, as all reloc roots and
+ * fs roots have the same lock type. However normal trees do
+ * not, and the only way to know ahead of time is to read the
+ * inline ref offset. We know it's an fs root if
+ *
+ * 1. There's more than one ref.
+ * 2. There's a SHARED_DATA_REF_KEY set.
+ * 3. FULL_BACKREF is set on the flags.
+ *
+ * Otherwise it's safe to assume that the ref offset == the
+ * owner of this block, so we can use that when calling
+ * read_tree_block.
+ */
+ if (btrfs_extent_refs(eb, ei) == 1 &&
+ !(btrfs_extent_flags(eb, ei) &
+ BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
+ ptr < end) {
+ struct btrfs_extent_inline_ref *iref;
+ int type;
+
+ iref = (struct btrfs_extent_inline_ref *)ptr;
+ type = btrfs_get_extent_inline_ref_type(eb, iref,
+ BTRFS_REF_TYPE_BLOCK);
+ if (type == BTRFS_REF_TYPE_INVALID)
+ return -EINVAL;
+ if (type == BTRFS_TREE_BLOCK_REF_KEY)
+ owner = btrfs_extent_inline_ref_offset(eb, iref);
+ }
} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
btrfs_print_v0_err(eb->fs_info);
btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
@@ -2837,6 +2908,7 @@ static int add_tree_block(struct reloc_control *rc,
block->key.offset = generation;
block->level = level;
block->key_ready = 0;
+ block->owner = owner;
rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
if (rb_node)
@@ -3389,8 +3461,12 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct btrfs_inode_item *item;
struct extent_buffer *leaf;
+ u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
int ret;
+ if (btrfs_is_zoned(trans->fs_info))
+ flags &= ~BTRFS_INODE_PREALLOC;
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -3405,8 +3481,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_generation(leaf, item, 1);
btrfs_set_inode_size(leaf, item, 0);
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
- btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
- BTRFS_INODE_PREALLOC);
+ btrfs_set_inode_flags(leaf, item, flags);
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
@@ -3434,7 +3509,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
return ERR_CAST(trans);
}
- err = btrfs_find_free_objectid(root, &objectid);
+ err = btrfs_get_free_objectid(root, &objectid);
if (err)
goto out;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 5f4f88a4d2c8..3d9088eab2fc 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -166,6 +166,7 @@ struct scrub_ctx {
int pages_per_rd_bio;
int is_dev_replace;
+ u64 write_pointer;
struct scrub_bio *wr_curr_bio;
struct mutex wr_lock;
@@ -856,6 +857,9 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
have_csum = sblock_to_check->pagev[0]->have_csum;
dev = sblock_to_check->pagev[0]->dev;
+ if (btrfs_is_zoned(fs_info) && !sctx->is_dev_replace)
+ return btrfs_repair_one_zone(fs_info, logical);
+
/*
* We must use GFP_NOFS because the scrub task might be waiting for a
* worker task executing this function and in turn a transaction commit
@@ -1424,7 +1428,7 @@ static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
if (!first_page->dev->bdev)
goto out;
- bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
+ bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
bio_set_dev(bio, first_page->dev->bdev);
for (page_num = 0; page_num < sblock->page_count; page_num++) {
@@ -1619,6 +1623,28 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
return scrub_add_page_to_wr_bio(sblock->sctx, spage);
}
+static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
+{
+ int ret = 0;
+ u64 length;
+
+ if (!btrfs_is_zoned(sctx->fs_info))
+ return 0;
+
+ if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
+ return 0;
+
+ if (sctx->write_pointer < physical) {
+ length = physical - sctx->write_pointer;
+
+ ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
+ sctx->write_pointer, length);
+ if (!ret)
+ sctx->write_pointer = physical;
+ }
+ return ret;
+}
+
static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
struct scrub_page *spage)
{
@@ -1641,6 +1667,13 @@ again:
if (sbio->page_count == 0) {
struct bio *bio;
+ ret = fill_writer_pointer_gap(sctx,
+ spage->physical_for_dev_replace);
+ if (ret) {
+ mutex_unlock(&sctx->wr_lock);
+ return ret;
+ }
+
sbio->physical = spage->physical_for_dev_replace;
sbio->logical = spage->logical;
sbio->dev = sctx->wr_tgtdev;
@@ -1695,13 +1728,16 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
sbio = sctx->wr_curr_bio;
sctx->wr_curr_bio = NULL;
- WARN_ON(!sbio->bio->bi_disk);
+ WARN_ON(!sbio->bio->bi_bdev);
scrub_pending_bio_inc(sctx);
/* process all writes in a single worker thread. Then the block layer
* orders the requests before sending them to the driver which
* doubled the write performance on spinning disks when measured
* with Linux 3.5 */
btrfsic_submit_bio(sbio->bio);
+
+ if (btrfs_is_zoned(sctx->fs_info))
+ sctx->write_pointer = sbio->physical + sbio->page_count * PAGE_SIZE;
}
static void scrub_wr_bio_end_io(struct bio *bio)
@@ -3025,6 +3061,46 @@ out:
return ret < 0 ? ret : 0;
}
+static void sync_replace_for_zoned(struct scrub_ctx *sctx)
+{
+ if (!btrfs_is_zoned(sctx->fs_info))
+ return;
+
+ sctx->flush_all_writes = true;
+ scrub_submit(sctx);
+ mutex_lock(&sctx->wr_lock);
+ scrub_wr_submit(sctx);
+ mutex_unlock(&sctx->wr_lock);
+
+ wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
+}
+
+static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
+ u64 physical, u64 physical_end)
+{
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
+ int ret = 0;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
+
+ mutex_lock(&sctx->wr_lock);
+ if (sctx->write_pointer < physical_end) {
+ ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
+ physical,
+ sctx->write_pointer);
+ if (ret)
+ btrfs_err(fs_info,
+ "zoned: failed to recover write pointer");
+ }
+ mutex_unlock(&sctx->wr_lock);
+ btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
+
+ return ret;
+}
+
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct map_lookup *map,
struct btrfs_device *scrub_dev,
@@ -3165,6 +3241,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
*/
blk_start_plug(&plug);
+ if (sctx->is_dev_replace &&
+ btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
+ mutex_lock(&sctx->wr_lock);
+ sctx->write_pointer = physical;
+ mutex_unlock(&sctx->wr_lock);
+ sctx->flush_all_writes = true;
+ }
+
/*
* now find all extents for each stripe and scrub them
*/
@@ -3353,6 +3437,9 @@ again:
if (ret)
goto out;
+ if (sctx->is_dev_replace)
+ sync_replace_for_zoned(sctx);
+
if (extent_logical + extent_len <
key.objectid + bytes) {
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
@@ -3420,6 +3507,17 @@ out:
blk_finish_plug(&plug);
btrfs_free_path(path);
btrfs_free_path(ppath);
+
+ if (sctx->is_dev_replace && ret >= 0) {
+ int ret2;
+
+ ret2 = sync_write_pointer_for_zoned(sctx, base + offset,
+ map->stripes[num].physical,
+ physical_end);
+ if (ret2)
+ ret = ret2;
+ }
+
return ret < 0 ? ret : 0;
}
@@ -3475,6 +3573,25 @@ out:
return ret;
}
+static int finish_extent_writes_for_zoned(struct btrfs_root *root,
+ struct btrfs_block_group *cache)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_trans_handle *trans;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ btrfs_wait_block_group_reservations(cache);
+ btrfs_wait_nocow_writers(cache);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ return btrfs_commit_transaction(trans);
+}
+
static noinline_for_stack
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev, u64 start, u64 end)
@@ -3561,6 +3678,16 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (!cache)
goto skip;
+ if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
+ spin_lock(&cache->lock);
+ if (!cache->to_copy) {
+ spin_unlock(&cache->lock);
+ ro_set = 0;
+ goto done;
+ }
+ spin_unlock(&cache->lock);
+ }
+
/*
* Make sure that while we are scrubbing the corresponding block
* group doesn't get its logical address and its device extents
@@ -3619,6 +3746,16 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* group is not RO.
*/
ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
+ if (!ret && sctx->is_dev_replace) {
+ ret = finish_extent_writes_for_zoned(root, cache);
+ if (ret) {
+ btrfs_dec_block_group_ro(cache);
+ scrub_pause_off(fs_info);
+ btrfs_put_block_group(cache);
+ break;
+ }
+ }
+
if (ret == 0) {
ro_set = 1;
} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
@@ -3630,6 +3767,13 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* commit_transactions.
*/
ro_set = 0;
+ } else if (ret == -ETXTBSY) {
+ btrfs_warn(fs_info,
+ "skipping scrub of block group %llu due to active swapfile",
+ cache->start);
+ scrub_pause_off(fs_info);
+ ret = 0;
+ goto skip_unfreeze;
} else {
btrfs_warn(fs_info,
"failed setting block group ro: %d", ret);
@@ -3692,6 +3836,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
scrub_pause_off(fs_info);
+ if (sctx->is_dev_replace &&
+ !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
+ cache, found_key.offset))
+ ro_set = 0;
+
+done:
down_write(&dev_replace->rwsem);
dev_replace->cursor_left = dev_replace->cursor_right;
dev_replace->item_needs_writeback = 1;
@@ -3719,7 +3869,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
} else {
spin_unlock(&cache->lock);
}
-
+skip_unfreeze:
btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
if (ret)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 78a35374d492..8f323859156b 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1191,9 +1191,6 @@ struct backref_ctx {
/* may be truncated in case it's the last extent in a file */
u64 extent_len;
- /* data offset in the file extent item */
- u64 data_offset;
-
/* Just to check for bugs in backref resolving */
int found_itself;
};
@@ -1401,19 +1398,6 @@ static int find_extent_clone(struct send_ctx *sctx,
backref_ctx->cur_offset = data_offset;
backref_ctx->found_itself = 0;
backref_ctx->extent_len = num_bytes;
- /*
- * For non-compressed extents iterate_extent_inodes() gives us extent
- * offsets that already take into account the data offset, but not for
- * compressed extents, since the offset is logical and not relative to
- * the physical extent locations. We must take this into account to
- * avoid sending clone offsets that go beyond the source file's size,
- * which would result in the clone ioctl failing with -EINVAL on the
- * receiving end.
- */
- if (compressed == BTRFS_COMPRESS_NONE)
- backref_ctx->data_offset = 0;
- else
- backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
/*
* The last extent of a file may be too large due to page alignment.
@@ -4948,7 +4932,6 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode;
struct page *page;
- char *addr;
pgoff_t index = offset >> PAGE_SHIFT;
pgoff_t last_index;
unsigned pg_offset = offset_in_page(offset);
@@ -5001,10 +4984,8 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
}
}
- addr = kmap(page);
- memcpy(sctx->send_buf + sctx->send_size, addr + pg_offset,
- cur_len);
- kunmap(page);
+ memcpy_from_page(sctx->send_buf + sctx->send_size, page,
+ pg_offset, cur_len);
unlock_page(page);
put_page(page);
index++;
@@ -6607,10 +6588,9 @@ static int changed_cb(struct btrfs_path *left_path,
struct btrfs_path *right_path,
struct btrfs_key *key,
enum btrfs_compare_tree_result result,
- void *ctx)
+ struct send_ctx *sctx)
{
int ret = 0;
- struct send_ctx *sctx = ctx;
if (result == BTRFS_COMPARE_TREE_SAME) {
if (key->type == BTRFS_INODE_REF_KEY ||
@@ -6815,7 +6795,7 @@ static int tree_compare_item(struct btrfs_path *left_path,
* If it detects a change, it aborts immediately.
*/
static int btrfs_compare_trees(struct btrfs_root *left_root,
- struct btrfs_root *right_root, void *ctx)
+ struct btrfs_root *right_root, struct send_ctx *sctx)
{
struct btrfs_fs_info *fs_info = left_root->fs_info;
int ret;
@@ -6967,7 +6947,7 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
ret = changed_cb(left_path, right_path,
&right_key,
BTRFS_COMPARE_TREE_DELETED,
- ctx);
+ sctx);
if (ret < 0)
goto out;
}
@@ -6978,7 +6958,7 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
ret = changed_cb(left_path, right_path,
&left_key,
BTRFS_COMPARE_TREE_NEW,
- ctx);
+ sctx);
if (ret < 0)
goto out;
}
@@ -6992,7 +6972,7 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
ret = changed_cb(left_path, right_path,
&left_key,
BTRFS_COMPARE_TREE_NEW,
- ctx);
+ sctx);
if (ret < 0)
goto out;
advance_left = ADVANCE;
@@ -7000,7 +6980,7 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
ret = changed_cb(left_path, right_path,
&right_key,
BTRFS_COMPARE_TREE_DELETED,
- ctx);
+ sctx);
if (ret < 0)
goto out;
advance_right = ADVANCE;
@@ -7015,7 +6995,7 @@ static int btrfs_compare_trees(struct btrfs_root *left_root,
else
result = BTRFS_COMPARE_TREE_SAME;
ret = changed_cb(left_path, right_path,
- &left_key, result, ctx);
+ &left_key, result, sctx);
if (ret < 0)
goto out;
advance_left = ADVANCE;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index e8347461c8dd..2da6177f4b0b 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -140,6 +140,12 @@
* be freed, plus any delayed work we may not have gotten rid of in the case
* of metadata.
*
+ * FORCE_COMMIT_TRANS
+ * For use by the preemptive flusher. We use this to bypass the ticketing
+ * checks in may_commit_transaction, as we have more information about the
+ * overall state of the system and may want to commit the transaction ahead
+ * of actual ENOSPC conditions.
+ *
* OVERCOMMIT
*
* Because we hold so many reservations for metadata we will allow you to
@@ -163,6 +169,7 @@ u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
ASSERT(s_info);
return s_info->bytes_used + s_info->bytes_reserved +
s_info->bytes_pinned + s_info->bytes_readonly +
+ s_info->bytes_zone_unusable +
(may_use_included ? s_info->bytes_may_use : 0);
}
@@ -206,6 +213,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
INIT_LIST_HEAD(&space_info->ro_bgs);
INIT_LIST_HEAD(&space_info->tickets);
INIT_LIST_HEAD(&space_info->priority_tickets);
+ space_info->clamp = 1;
ret = btrfs_sysfs_add_space_info_type(info, space_info);
if (ret)
@@ -257,7 +265,7 @@ out:
void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
- u64 bytes_readonly,
+ u64 bytes_readonly, u64 bytes_zone_unusable,
struct btrfs_space_info **space_info)
{
struct btrfs_space_info *found;
@@ -273,6 +281,7 @@ void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
found->bytes_used += bytes_used;
found->disk_used += bytes_used * factor;
found->bytes_readonly += bytes_readonly;
+ found->bytes_zone_unusable += bytes_zone_unusable;
if (total_bytes > 0)
found->full = 0;
btrfs_try_granting_tickets(info, found);
@@ -422,10 +431,10 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
info->total_bytes - btrfs_space_info_used(info, true),
info->full ? "" : "not ");
btrfs_info(fs_info,
- "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
+ "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
info->total_bytes, info->bytes_used, info->bytes_pinned,
info->bytes_reserved, info->bytes_may_use,
- info->bytes_readonly);
+ info->bytes_readonly, info->bytes_zone_unusable);
DUMP_BLOCK_RSV(fs_info, global_block_rsv);
DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
@@ -454,9 +463,10 @@ again:
list_for_each_entry(cache, &info->block_groups[index], list) {
spin_lock(&cache->lock);
btrfs_info(fs_info,
- "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
+ "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
cache->start, cache->length, cache->used, cache->pinned,
- cache->reserved, cache->ro ? "[readonly]" : "");
+ cache->reserved, cache->zone_unusable,
+ cache->ro ? "[readonly]" : "");
spin_unlock(&cache->lock);
btrfs_dump_free_space(cache, bytes);
}
@@ -489,7 +499,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
{
struct btrfs_trans_handle *trans;
u64 delalloc_bytes;
- u64 dio_bytes;
+ u64 ordered_bytes;
u64 items;
long time_left;
int loops;
@@ -513,26 +523,22 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
delalloc_bytes = percpu_counter_sum_positive(
&fs_info->delalloc_bytes);
- dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
- if (delalloc_bytes == 0 && dio_bytes == 0) {
- if (trans)
- return;
- if (wait_ordered)
- btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
+ ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+ if (delalloc_bytes == 0 && ordered_bytes == 0)
return;
- }
/*
* If we are doing more ordered than delalloc we need to just wait on
* ordered extents, otherwise we'll waste time trying to flush delalloc
* that likely won't give us the space back we need.
*/
- if (dio_bytes > delalloc_bytes)
+ if (ordered_bytes > delalloc_bytes)
wait_ordered = true;
loops = 0;
- while ((delalloc_bytes || dio_bytes) && loops < 3) {
- u64 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
+ while ((delalloc_bytes || ordered_bytes) && loops < 3) {
+ u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
+ long nr_pages = min_t(u64, temp, LONG_MAX);
btrfs_start_delalloc_roots(fs_info, nr_pages, true);
@@ -555,15 +561,16 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
delalloc_bytes = percpu_counter_sum_positive(
&fs_info->delalloc_bytes);
- dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
+ ordered_bytes = percpu_counter_sum_positive(
+ &fs_info->ordered_bytes);
}
}
/**
- * maybe_commit_transaction - possibly commit the transaction if its ok to
- * @root - the root we're allocating for
- * @bytes - the number of bytes we want to reserve
- * @force - force the commit
+ * Possibly commit the transaction if its ok to
+ *
+ * @fs_info: the filesystem
+ * @space_info: space_info we are checking for commit, either data or metadata
*
* This will check to make sure that committing the transaction will actually
* get us somewhere and then commit the transaction if it does. Otherwise it
@@ -669,7 +676,7 @@ enospc:
*/
static void flush_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 num_bytes,
- int state)
+ enum btrfs_flush_state state, bool for_preempt)
{
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_trans_handle *trans;
@@ -738,13 +745,21 @@ static void flush_space(struct btrfs_fs_info *fs_info,
case COMMIT_TRANS:
ret = may_commit_transaction(fs_info, space_info);
break;
+ case FORCE_COMMIT_TRANS:
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+ ret = btrfs_commit_transaction(trans);
+ break;
default:
ret = -ENOSPC;
break;
}
trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
- ret);
+ ret, for_preempt);
return;
}
@@ -754,7 +769,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
{
u64 used;
u64 avail;
- u64 expected;
u64 to_reclaim = space_info->reclaim_size;
lockdep_assert_held(&space_info->lock);
@@ -772,43 +786,88 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
if (space_info->total_bytes + avail < used)
to_reclaim += used - (space_info->total_bytes + avail);
- if (to_reclaim)
- return to_reclaim;
-
- to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
- if (btrfs_can_overcommit(fs_info, space_info, to_reclaim,
- BTRFS_RESERVE_FLUSH_ALL))
- return 0;
-
- used = btrfs_space_info_used(space_info, true);
-
- if (btrfs_can_overcommit(fs_info, space_info, SZ_1M,
- BTRFS_RESERVE_FLUSH_ALL))
- expected = div_factor_fine(space_info->total_bytes, 95);
- else
- expected = div_factor_fine(space_info->total_bytes, 90);
-
- if (used > expected)
- to_reclaim = used - expected;
- else
- to_reclaim = 0;
- to_reclaim = min(to_reclaim, space_info->bytes_may_use +
- space_info->bytes_reserved);
return to_reclaim;
}
-static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- u64 used)
+static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info)
{
+ u64 ordered, delalloc;
u64 thresh = div_factor_fine(space_info->total_bytes, 98);
+ u64 used;
/* If we're just plain full then async reclaim just slows us down. */
if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
- return 0;
+ return false;
- if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info))
- return 0;
+ /*
+ * We have tickets queued, bail so we don't compete with the async
+ * flushers.
+ */
+ if (space_info->reclaim_size)
+ return false;
+
+ /*
+ * If we have over half of the free space occupied by reservations or
+ * pinned then we want to start flushing.
+ *
+ * We do not do the traditional thing here, which is to say
+ *
+ * if (used >= ((total_bytes + avail) / 2))
+ * return 1;
+ *
+ * because this doesn't quite work how we want. If we had more than 50%
+ * of the space_info used by bytes_used and we had 0 available we'd just
+ * constantly run the background flusher. Instead we want it to kick in
+ * if our reclaimable space exceeds our clamped free space.
+ *
+ * Our clamping range is 2^1 -> 2^8. Practically speaking that means
+ * the following:
+ *
+ * Amount of RAM Minimum threshold Maximum threshold
+ *
+ * 256GiB 1GiB 128GiB
+ * 128GiB 512MiB 64GiB
+ * 64GiB 256MiB 32GiB
+ * 32GiB 128MiB 16GiB
+ * 16GiB 64MiB 8GiB
+ *
+ * These are the range our thresholds will fall in, corresponding to how
+ * much delalloc we need for the background flusher to kick in.
+ */
+
+ thresh = calc_available_free_space(fs_info, space_info,
+ BTRFS_RESERVE_FLUSH_ALL);
+ thresh += (space_info->total_bytes - space_info->bytes_used -
+ space_info->bytes_reserved - space_info->bytes_readonly);
+ thresh >>= space_info->clamp;
+
+ used = space_info->bytes_pinned;
+
+ /*
+ * If we have more ordered bytes than delalloc bytes then we're either
+ * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
+ * around. Preemptive flushing is only useful in that it can free up
+ * space before tickets need to wait for things to finish. In the case
+ * of ordered extents, preemptively waiting on ordered extents gets us
+ * nothing, if our reservations are tied up in ordered extents we'll
+ * simply have to slow down writers by forcing them to wait on ordered
+ * extents.
+ *
+ * In the case that ordered is larger than delalloc, only include the
+ * block reserves that we would actually be able to directly reclaim
+ * from. In this case if we're heavy on metadata operations this will
+ * clearly be heavy enough to warrant preemptive flushing. In the case
+ * of heavy DIO or ordered reservations, preemptive flushing will just
+ * waste time and cause us to slow down.
+ */
+ ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+ delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
+ if (ordered >= delalloc)
+ used += fs_info->delayed_refs_rsv.reserved +
+ fs_info->delayed_block_rsv.reserved;
+ else
+ used += space_info->bytes_may_use;
return (used >= thresh && !btrfs_fs_closing(fs_info) &&
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
@@ -922,7 +981,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
struct btrfs_fs_info *fs_info;
struct btrfs_space_info *space_info;
u64 to_reclaim;
- int flush_state;
+ enum btrfs_flush_state flush_state;
int commit_cycles = 0;
u64 last_tickets_id;
@@ -941,7 +1000,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
flush_state = FLUSH_DELAYED_ITEMS_NR;
do {
- flush_space(fs_info, space_info, to_reclaim, flush_state);
+ flush_space(fs_info, space_info, to_reclaim, flush_state, false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
space_info->flush = 0;
@@ -990,6 +1049,105 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
}
/*
+ * This handles pre-flushing of metadata space before we get to the point that
+ * we need to start blocking threads on tickets. The logic here is different
+ * from the other flush paths because it doesn't rely on tickets to tell us how
+ * much we need to flush, instead it attempts to keep us below the 80% full
+ * watermark of space by flushing whichever reservation pool is currently the
+ * largest.
+ */
+static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
+{
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_space_info *space_info;
+ struct btrfs_block_rsv *delayed_block_rsv;
+ struct btrfs_block_rsv *delayed_refs_rsv;
+ struct btrfs_block_rsv *global_rsv;
+ struct btrfs_block_rsv *trans_rsv;
+ int loops = 0;
+
+ fs_info = container_of(work, struct btrfs_fs_info,
+ preempt_reclaim_work);
+ space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+ delayed_block_rsv = &fs_info->delayed_block_rsv;
+ delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ global_rsv = &fs_info->global_block_rsv;
+ trans_rsv = &fs_info->trans_block_rsv;
+
+ spin_lock(&space_info->lock);
+ while (need_preemptive_reclaim(fs_info, space_info)) {
+ enum btrfs_flush_state flush;
+ u64 delalloc_size = 0;
+ u64 to_reclaim, block_rsv_size;
+ u64 global_rsv_size = global_rsv->reserved;
+
+ loops++;
+
+ /*
+ * We don't have a precise counter for the metadata being
+ * reserved for delalloc, so we'll approximate it by subtracting
+ * out the block rsv's space from the bytes_may_use. If that
+ * amount is higher than the individual reserves, then we can
+ * assume it's tied up in delalloc reservations.
+ */
+ block_rsv_size = global_rsv_size +
+ delayed_block_rsv->reserved +
+ delayed_refs_rsv->reserved +
+ trans_rsv->reserved;
+ if (block_rsv_size < space_info->bytes_may_use)
+ delalloc_size = space_info->bytes_may_use - block_rsv_size;
+ spin_unlock(&space_info->lock);
+
+ /*
+ * We don't want to include the global_rsv in our calculation,
+ * because that's space we can't touch. Subtract it from the
+ * block_rsv_size for the next checks.
+ */
+ block_rsv_size -= global_rsv_size;
+
+ /*
+ * We really want to avoid flushing delalloc too much, as it
+ * could result in poor allocation patterns, so only flush it if
+ * it's larger than the rest of the pools combined.
+ */
+ if (delalloc_size > block_rsv_size) {
+ to_reclaim = delalloc_size;
+ flush = FLUSH_DELALLOC;
+ } else if (space_info->bytes_pinned >
+ (delayed_block_rsv->reserved +
+ delayed_refs_rsv->reserved)) {
+ to_reclaim = space_info->bytes_pinned;
+ flush = FORCE_COMMIT_TRANS;
+ } else if (delayed_block_rsv->reserved >
+ delayed_refs_rsv->reserved) {
+ to_reclaim = delayed_block_rsv->reserved;
+ flush = FLUSH_DELAYED_ITEMS_NR;
+ } else {
+ to_reclaim = delayed_refs_rsv->reserved;
+ flush = FLUSH_DELAYED_REFS_NR;
+ }
+
+ /*
+ * We don't want to reclaim everything, just a portion, so scale
+ * down the to_reclaim by 1/4. If it takes us down to 0,
+ * reclaim 1 items worth.
+ */
+ to_reclaim >>= 2;
+ if (!to_reclaim)
+ to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
+ flush_space(fs_info, space_info, to_reclaim, flush, true);
+ cond_resched();
+ spin_lock(&space_info->lock);
+ }
+
+ /* We only went through once, back off our clamping. */
+ if (loops == 1 && !space_info->reclaim_size)
+ space_info->clamp = max(1, space_info->clamp - 1);
+ trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
+ spin_unlock(&space_info->lock);
+}
+
+/*
* FLUSH_DELALLOC_WAIT:
* Space is freed from flushing delalloc in one of two ways.
*
@@ -1054,7 +1212,7 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
struct btrfs_fs_info *fs_info;
struct btrfs_space_info *space_info;
u64 last_tickets_id;
- int flush_state = 0;
+ enum btrfs_flush_state flush_state = 0;
fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
space_info = fs_info->data_sinfo;
@@ -1069,7 +1227,7 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
spin_unlock(&space_info->lock);
while (!space_info->full) {
- flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
+ flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
space_info->flush = 0;
@@ -1082,7 +1240,7 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
while (flush_state < ARRAY_SIZE(data_flush_states)) {
flush_space(fs_info, space_info, U64_MAX,
- data_flush_states[flush_state]);
+ data_flush_states[flush_state], false);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
space_info->flush = 0;
@@ -1115,6 +1273,8 @@ void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
{
INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
+ INIT_WORK(&fs_info->preempt_reclaim_work,
+ btrfs_preempt_reclaim_metadata_space);
}
static const enum btrfs_flush_state priority_flush_states[] = {
@@ -1153,7 +1313,8 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
flush_state = 0;
do {
- flush_space(fs_info, space_info, to_reclaim, states[flush_state]);
+ flush_space(fs_info, space_info, to_reclaim, states[flush_state],
+ false);
flush_state++;
spin_lock(&space_info->lock);
if (ticket->bytes == 0) {
@@ -1169,7 +1330,7 @@ static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
struct reserve_ticket *ticket)
{
while (!space_info->full) {
- flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
+ flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
spin_lock(&space_info->lock);
if (ticket->bytes == 0) {
spin_unlock(&space_info->lock);
@@ -1214,11 +1375,14 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
}
/**
- * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
- * @fs_info - the fs
- * @space_info - the space_info for the reservation
- * @ticket - the ticket for the reservation
- * @flush - how much we can flush
+ * Do the appropriate flushing and waiting for a ticket
+ *
+ * @fs_info: the filesystem
+ * @space_info: space info for the reservation
+ * @ticket: ticket for the reservation
+ * @start_ns: timestamp when the reservation started
+ * @orig_bytes: amount of bytes originally reserved
+ * @flush: how much we can flush
*
* This does the work of figuring out how to flush for the ticket, waiting for
* the reservation, and returning the appropriate error if there is one.
@@ -1226,6 +1390,7 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
struct reserve_ticket *ticket,
+ u64 start_ns, u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
{
int ret;
@@ -1281,6 +1446,8 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
* space wasn't reserved at all).
*/
ASSERT(!(ticket->bytes == 0 && ticket->error));
+ trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
+ start_ns, flush, ticket->error);
return ret;
}
@@ -1294,12 +1461,31 @@ static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
(flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
}
+static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info)
+{
+ u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+ u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
+
+ /*
+ * If we're heavy on ordered operations then clamping won't help us. We
+ * need to clamp specifically to keep up with dirty'ing buffered
+ * writers, because there's not a 1:1 correlation of writing delalloc
+ * and freeing space, like there is with flushing delayed refs or
+ * delayed nodes. If we're already more ordered than delalloc then
+ * we're keeping up, otherwise we aren't and should probably clamp.
+ */
+ if (ordered < delalloc)
+ space_info->clamp = min(space_info->clamp + 1, 8);
+}
+
/**
- * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
- * @root - the root we're allocating for
- * @space_info - the space info we want to allocate from
- * @orig_bytes - the number of bytes we want
- * @flush - whether or not we can flush to make our reservation
+ * Try to reserve bytes from the block_rsv's space
+ *
+ * @fs_info: the filesystem
+ * @space_info: space info we want to allocate from
+ * @orig_bytes: number of bytes we want
+ * @flush: whether or not we can flush to make our reservation
*
* This will reserve orig_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
@@ -1314,6 +1500,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
{
struct work_struct *async_work;
struct reserve_ticket ticket;
+ u64 start_ns = 0;
u64 used;
int ret = 0;
bool pending_tickets;
@@ -1366,6 +1553,9 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
space_info->reclaim_size += ticket.bytes;
init_waitqueue_head(&ticket.wait);
ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
+ if (trace_btrfs_reserve_ticket_enabled())
+ start_ns = ktime_get_ns();
+
if (flush == BTRFS_RESERVE_FLUSH_ALL ||
flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
flush == BTRFS_RESERVE_FLUSH_DATA) {
@@ -1382,6 +1572,14 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
list_add_tail(&ticket.list,
&space_info->priority_tickets);
}
+
+ /*
+ * We were forced to add a reserve ticket, so our preemptive
+ * flushing is unable to keep up. Clamp down on the threshold
+ * for the preemptive flushing in order to keep up with the
+ * workload.
+ */
+ maybe_clamp_preempt(fs_info, space_info);
} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
used += orig_bytes;
/*
@@ -1390,27 +1588,29 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* the async reclaim as we will panic.
*/
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
- need_do_async_reclaim(fs_info, space_info, used) &&
- !work_busy(&fs_info->async_reclaim_work)) {
+ need_preemptive_reclaim(fs_info, space_info) &&
+ !work_busy(&fs_info->preempt_reclaim_work)) {
trace_btrfs_trigger_flush(fs_info, space_info->flags,
orig_bytes, flush, "preempt");
queue_work(system_unbound_wq,
- &fs_info->async_reclaim_work);
+ &fs_info->preempt_reclaim_work);
}
}
spin_unlock(&space_info->lock);
if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
return ret;
- return handle_reserve_ticket(fs_info, space_info, &ticket, flush);
+ return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
+ orig_bytes, flush);
}
/**
- * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
- * @root - the root we're allocating for
- * @block_rsv - the block_rsv we're allocating for
- * @orig_bytes - the number of bytes we want
- * @flush - whether or not we can flush to make our reservation
+ * Trye to reserve metadata bytes from the block_rsv's space
+ *
+ * @root: the root we're allocating for
+ * @block_rsv: block_rsv we're allocating for
+ * @orig_bytes: number of bytes we want
+ * @flush: whether or not we can flush to make our reservation
*
* This will reserve orig_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
@@ -1448,10 +1648,11 @@ int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
}
/**
- * btrfs_reserve_data_bytes - try to reserve data bytes for an allocation
- * @fs_info - the filesystem
- * @bytes - the number of bytes we need
- * @flush - how we are allowed to flush
+ * Try to reserve data bytes for an allocation
+ *
+ * @fs_info: the filesystem
+ * @bytes: number of bytes we need
+ * @flush: how we are allowed to flush
*
* This will reserve bytes from the data space info. If there is not enough
* space then we will attempt to flush space as specified by flush.
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 5646393b928c..b1a8ffb03b3e 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -17,11 +17,17 @@ struct btrfs_space_info {
u64 bytes_may_use; /* number of bytes that may be used for
delalloc/allocations */
u64 bytes_readonly; /* total bytes that are read only */
+ u64 bytes_zone_unusable; /* total bytes that are unusable until
+ resetting the device zone */
u64 max_extent_size; /* This will hold the maximum extent size of
the space info if we had an ENOSPC in the
allocator. */
+ int clamp; /* Used to scale our threshold for preemptive
+ flushing. The value is >> clamp, so turns
+ out to be a 2^clamp divisor. */
+
unsigned int full:1; /* indicates that we cannot allocate any more
chunks for this space */
unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
@@ -119,7 +125,7 @@ DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
- u64 bytes_readonly,
+ u64 bytes_readonly, u64 bytes_zone_unusable,
struct btrfs_space_info **space_info);
struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
u64 flags);
@@ -152,4 +158,21 @@ static inline void btrfs_space_info_free_bytes_may_use(
int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
enum btrfs_reserve_flush_enum flush);
+static inline void __btrfs_mod_total_bytes_pinned(
+ struct btrfs_space_info *space_info,
+ s64 mod)
+{
+ percpu_counter_add_batch(&space_info->total_bytes_pinned, mod,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
+}
+
+static inline void btrfs_mod_total_bytes_pinned(struct btrfs_fs_info *fs_info,
+ u64 flags, s64 mod)
+{
+ struct btrfs_space_info *space_info = btrfs_find_space_info(fs_info, flags);
+
+ ASSERT(space_info);
+ __btrfs_mod_total_bytes_pinned(space_info, mod);
+}
+
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
new file mode 100644
index 000000000000..c69049e7daa9
--- /dev/null
+++ b/fs/btrfs/subpage.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+#include "ctree.h"
+#include "subpage.h"
+
+int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
+ struct page *page, enum btrfs_subpage_type type)
+{
+ struct btrfs_subpage *subpage = NULL;
+ int ret;
+
+ /*
+ * We have cases like a dummy extent buffer page, which is not mappped
+ * and doesn't need to be locked.
+ */
+ if (page->mapping)
+ ASSERT(PageLocked(page));
+ /* Either not subpage, or the page already has private attached */
+ if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
+ return 0;
+
+ ret = btrfs_alloc_subpage(fs_info, &subpage, type);
+ if (ret < 0)
+ return ret;
+ attach_page_private(page, subpage);
+ return 0;
+}
+
+void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
+ struct page *page)
+{
+ struct btrfs_subpage *subpage;
+
+ /* Either not subpage, or already detached */
+ if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
+ return;
+
+ subpage = (struct btrfs_subpage *)detach_page_private(page);
+ ASSERT(subpage);
+ btrfs_free_subpage(subpage);
+}
+
+int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
+ struct btrfs_subpage **ret,
+ enum btrfs_subpage_type type)
+{
+ if (fs_info->sectorsize == PAGE_SIZE)
+ return 0;
+
+ *ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS);
+ if (!*ret)
+ return -ENOMEM;
+ spin_lock_init(&(*ret)->lock);
+ if (type == BTRFS_SUBPAGE_METADATA)
+ atomic_set(&(*ret)->eb_refs, 0);
+ else
+ atomic_set(&(*ret)->readers, 0);
+ return 0;
+}
+
+void btrfs_free_subpage(struct btrfs_subpage *subpage)
+{
+ kfree(subpage);
+}
+
+/*
+ * Increase the eb_refs of current subpage.
+ *
+ * This is important for eb allocation, to prevent race with last eb freeing
+ * of the same page.
+ * With the eb_refs increased before the eb inserted into radix tree,
+ * detach_extent_buffer_page() won't detach the page private while we're still
+ * allocating the extent buffer.
+ */
+void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
+ struct page *page)
+{
+ struct btrfs_subpage *subpage;
+
+ if (fs_info->sectorsize == PAGE_SIZE)
+ return;
+
+ ASSERT(PagePrivate(page) && page->mapping);
+ lockdep_assert_held(&page->mapping->private_lock);
+
+ subpage = (struct btrfs_subpage *)page->private;
+ atomic_inc(&subpage->eb_refs);
+}
+
+void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
+ struct page *page)
+{
+ struct btrfs_subpage *subpage;
+
+ if (fs_info->sectorsize == PAGE_SIZE)
+ return;
+
+ ASSERT(PagePrivate(page) && page->mapping);
+ lockdep_assert_held(&page->mapping->private_lock);
+
+ subpage = (struct btrfs_subpage *)page->private;
+ ASSERT(atomic_read(&subpage->eb_refs));
+ atomic_dec(&subpage->eb_refs);
+}
+
+static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ /* Basic checks */
+ ASSERT(PagePrivate(page) && page->private);
+ ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
+ IS_ALIGNED(len, fs_info->sectorsize));
+ /*
+ * The range check only works for mapped page, we can still have
+ * unmapped page like dummy extent buffer pages.
+ */
+ if (page->mapping)
+ ASSERT(page_offset(page) <= start &&
+ start + len <= page_offset(page) + PAGE_SIZE);
+}
+
+void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const int nbits = len >> fs_info->sectorsize_bits;
+ int ret;
+
+ btrfs_subpage_assert(fs_info, page, start, len);
+
+ ret = atomic_add_return(nbits, &subpage->readers);
+ ASSERT(ret == nbits);
+}
+
+void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const int nbits = len >> fs_info->sectorsize_bits;
+
+ btrfs_subpage_assert(fs_info, page, start, len);
+ ASSERT(atomic_read(&subpage->readers) >= nbits);
+ if (atomic_sub_and_test(nbits, &subpage->readers))
+ unlock_page(page);
+}
+
+/*
+ * Convert the [start, start + len) range into a u16 bitmap
+ *
+ * For example: if start == page_offset() + 16K, len = 16K, we get 0x00f0.
+ */
+static u16 btrfs_subpage_calc_bitmap(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ const int bit_start = offset_in_page(start) >> fs_info->sectorsize_bits;
+ const int nbits = len >> fs_info->sectorsize_bits;
+
+ btrfs_subpage_assert(fs_info, page, start, len);
+
+ /*
+ * Here nbits can be 16, thus can go beyond u16 range. We make the
+ * first left shift to be calculate in unsigned long (at least u32),
+ * then truncate the result to u16.
+ */
+ return (u16)(((1UL << nbits) - 1) << bit_start);
+}
+
+void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
+ unsigned long flags;
+
+ spin_lock_irqsave(&subpage->lock, flags);
+ subpage->uptodate_bitmap |= tmp;
+ if (subpage->uptodate_bitmap == U16_MAX)
+ SetPageUptodate(page);
+ spin_unlock_irqrestore(&subpage->lock, flags);
+}
+
+void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
+ unsigned long flags;
+
+ spin_lock_irqsave(&subpage->lock, flags);
+ subpage->uptodate_bitmap &= ~tmp;
+ ClearPageUptodate(page);
+ spin_unlock_irqrestore(&subpage->lock, flags);
+}
+
+void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
+ unsigned long flags;
+
+ spin_lock_irqsave(&subpage->lock, flags);
+ subpage->error_bitmap |= tmp;
+ SetPageError(page);
+ spin_unlock_irqrestore(&subpage->lock, flags);
+}
+
+void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len)
+{
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
+ const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
+ unsigned long flags;
+
+ spin_lock_irqsave(&subpage->lock, flags);
+ subpage->error_bitmap &= ~tmp;
+ if (subpage->error_bitmap == 0)
+ ClearPageError(page);
+ spin_unlock_irqrestore(&subpage->lock, flags);
+}
+
+/*
+ * Unlike set/clear which is dependent on each page status, for test all bits
+ * are tested in the same way.
+ */
+#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
+bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len) \
+{ \
+ struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
+ const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); \
+ unsigned long flags; \
+ bool ret; \
+ \
+ spin_lock_irqsave(&subpage->lock, flags); \
+ ret = ((subpage->name##_bitmap & tmp) == tmp); \
+ spin_unlock_irqrestore(&subpage->lock, flags); \
+ return ret; \
+}
+IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
+IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
+
+/*
+ * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
+ * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
+ * back to regular sectorsize branch.
+ */
+#define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \
+ test_page_func) \
+void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len) \
+{ \
+ if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
+ set_page_func(page); \
+ return; \
+ } \
+ btrfs_subpage_set_##name(fs_info, page, start, len); \
+} \
+void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len) \
+{ \
+ if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
+ clear_page_func(page); \
+ return; \
+ } \
+ btrfs_subpage_clear_##name(fs_info, page, start, len); \
+} \
+bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len) \
+{ \
+ if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \
+ return test_page_func(page); \
+ return btrfs_subpage_test_##name(fs_info, page, start, len); \
+}
+IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
+ PageUptodate);
+IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
new file mode 100644
index 000000000000..b86a4881475d
--- /dev/null
+++ b/fs/btrfs/subpage.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_SUBPAGE_H
+#define BTRFS_SUBPAGE_H
+
+#include <linux/spinlock.h>
+
+/*
+ * Maximum page size we support is 64K, minimum sector size is 4K, u16 bitmap
+ * is sufficient. Regular bitmap_* is not used due to size reasons.
+ */
+#define BTRFS_SUBPAGE_BITMAP_SIZE 16
+
+/*
+ * Structure to trace status of each sector inside a page, attached to
+ * page::private for both data and metadata inodes.
+ */
+struct btrfs_subpage {
+ /* Common members for both data and metadata pages */
+ spinlock_t lock;
+ u16 uptodate_bitmap;
+ u16 error_bitmap;
+ union {
+ /*
+ * Structures only used by metadata
+ *
+ * @eb_refs should only be operated under private_lock, as it
+ * manages whether the subpage can be detached.
+ */
+ atomic_t eb_refs;
+ /* Structures only used by data */
+ struct {
+ atomic_t readers;
+ };
+ };
+};
+
+enum btrfs_subpage_type {
+ BTRFS_SUBPAGE_METADATA,
+ BTRFS_SUBPAGE_DATA,
+};
+
+int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
+ struct page *page, enum btrfs_subpage_type type);
+void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
+ struct page *page);
+
+/* Allocate additional data where page represents more than one sector */
+int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
+ struct btrfs_subpage **ret,
+ enum btrfs_subpage_type type);
+void btrfs_free_subpage(struct btrfs_subpage *subpage);
+
+void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
+ struct page *page);
+void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
+ struct page *page);
+
+void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len);
+void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
+ struct page *page, u64 start, u32 len);
+
+/*
+ * Template for subpage related operations.
+ *
+ * btrfs_subpage_*() are for call sites where the page has subpage attached and
+ * the range is ensured to be inside the page.
+ *
+ * btrfs_page_*() are for call sites where the page can either be subpage
+ * specific or regular page. The function will handle both cases.
+ * But the range still needs to be inside the page.
+ */
+#define DECLARE_BTRFS_SUBPAGE_OPS(name) \
+void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len); \
+void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len); \
+bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len); \
+void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len); \
+void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len); \
+bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
+ struct page *page, u64 start, u32 len);
+
+DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
+DECLARE_BTRFS_SUBPAGE_OPS(error);
+
+#endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 12d7d3be7cd4..f7a4ad86adee 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -48,7 +48,6 @@
#include "tests/btrfs-tests.h"
#include "block-group.h"
#include "discard.h"
-
#include "qgroup.h"
#define CREATE_TRACE_POINTS
#include <trace/events/btrfs.h>
@@ -1919,8 +1918,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_resize_thread_pool(fs_info,
fs_info->thread_pool_size, old_thread_pool_size);
- if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
- btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) !=
+ (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
(!sb_rdonly(sb) || (*flags & SB_RDONLY))) {
btrfs_warn(fs_info,
"remount supports changing free space tree only from ro to rw");
@@ -2028,6 +2027,13 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
ret = -EINVAL;
goto restore;
}
+ if (fs_info->sectorsize < PAGE_SIZE) {
+ btrfs_warn(fs_info,
+ "read-write mount is not yet allowed for sectorsize %u page size %lu",
+ fs_info->sectorsize, PAGE_SIZE);
+ ret = -EINVAL;
+ goto restore;
+ }
/*
* NOTE: when remounting with a change that does writes, don't
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 19b9fffa2c9c..6eb1c50fa98c 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -666,6 +666,7 @@ SPACE_INFO_ATTR(bytes_pinned);
SPACE_INFO_ATTR(bytes_reserved);
SPACE_INFO_ATTR(bytes_may_use);
SPACE_INFO_ATTR(bytes_readonly);
+SPACE_INFO_ATTR(bytes_zone_unusable);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
BTRFS_ATTR(space_info, total_bytes_pinned,
@@ -679,6 +680,7 @@ static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(space_info, bytes_reserved),
BTRFS_ATTR_PTR(space_info, bytes_may_use),
BTRFS_ATTR_PTR(space_info, bytes_readonly),
+ BTRFS_ATTR_PTR(space_info, bytes_zone_unusable),
BTRFS_ATTR_PTR(space_info, disk_used),
BTRFS_ATTR_PTR(space_info, disk_total),
BTRFS_ATTR_PTR(space_info, total_bytes_pinned),
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 6bd97bd4cb37..3a4099a2bf05 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -62,7 +62,7 @@ struct inode *btrfs_new_test_inode(void)
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- inode_init_owner(inode, NULL, S_IFREG);
+ inode_init_owner(&init_user_ns, inode, NULL, S_IFREG);
return inode;
}
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index 57379e96ccc9..c0aefe6dee0b 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -507,7 +507,7 @@ static int test_rmap_block(struct btrfs_fs_info *fs_info,
goto out_free;
}
- ret = btrfs_rmap_block(fs_info, em->start, btrfs_sb_offset(1),
+ ret = btrfs_rmap_block(fs_info, em->start, NULL, btrfs_sb_offset(1),
&logical, &out_ndaddrs, &out_stripe_len);
if (ret || (out_ndaddrs == 0 && test->expected_mapped_addr)) {
test_err("didn't rmap anything but expected %d",
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 6af7f2bf92de..acff6bb49a97 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -21,6 +21,7 @@
#include "qgroup.h"
#include "block-group.h"
#include "space-info.h"
+#include "zoned.h"
#define BTRFS_ROOT_TRANS_TAG 0
@@ -107,6 +108,11 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
__TRANS_JOIN |
__TRANS_JOIN_NOLOCK |
__TRANS_JOIN_NOSTART),
+ [TRANS_STATE_SUPER_COMMITTED] = (__TRANS_START |
+ __TRANS_ATTACH |
+ __TRANS_JOIN |
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_COMPLETED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
@@ -375,6 +381,8 @@ loop:
spin_lock_init(&cur_trans->dirty_bgs_lock);
INIT_LIST_HEAD(&cur_trans->deleted_bgs);
spin_lock_init(&cur_trans->dropped_roots_lock);
+ INIT_LIST_HEAD(&cur_trans->releasing_ebs);
+ spin_lock_init(&cur_trans->releasing_ebs_lock);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
@@ -826,10 +834,11 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
return trans;
}
-/* wait for a transaction commit to be fully complete */
-static noinline void wait_for_commit(struct btrfs_transaction *commit)
+/* Wait for a transaction commit to reach at least the given state. */
+static noinline void wait_for_commit(struct btrfs_transaction *commit,
+ const enum btrfs_trans_state min_state)
{
- wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
+ wait_event(commit->commit_wait, commit->state >= min_state);
}
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
@@ -884,7 +893,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
goto out; /* nothing committing|committed */
}
- wait_for_commit(cur_trans);
+ wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
btrfs_put_transaction(cur_trans);
out:
return ret;
@@ -909,9 +918,8 @@ bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_transaction *cur_trans = trans->transaction;
- smp_mb();
if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
- cur_trans->delayed_refs.flushing)
+ test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags))
return true;
return should_end_transaction(trans);
@@ -1230,10 +1238,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
if (ret)
return ret;
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
- if (ret)
- return ret;
-
ret = btrfs_run_dev_stats(trans);
if (ret)
return ret;
@@ -1248,10 +1252,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
if (ret)
return ret;
- /* run_qgroups might have added some more refs */
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
- if (ret)
- return ret;
again:
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
struct btrfs_root *root;
@@ -1266,15 +1266,24 @@ again:
ret = update_cowonly_root(trans, root);
if (ret)
return ret;
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
- if (ret)
- return ret;
}
+ /* Now flush any delayed refs generated by updating all of the roots */
+ ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ if (ret)
+ return ret;
+
while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
ret = btrfs_write_dirty_block_groups(trans);
if (ret)
return ret;
+
+ /*
+ * We're writing the dirty block groups, which could generate
+ * delayed refs, which could generate more dirty block groups,
+ * so we want to keep this flushing in this loop to make sure
+ * everything gets run.
+ */
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
if (ret)
return ret;
@@ -1319,7 +1328,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
struct btrfs_root *gang[8];
int i;
int ret;
- int err = 0;
spin_lock(&fs_info->fs_roots_radix_lock);
while (1) {
@@ -1331,6 +1339,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
break;
for (i = 0; i < ret; i++) {
struct btrfs_root *root = gang[i];
+ int ret2;
+
radix_tree_tag_clear(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
BTRFS_ROOT_TRANS_TAG);
@@ -1350,17 +1360,17 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
root->node);
}
- err = btrfs_update_root(trans, fs_info->tree_root,
+ ret2 = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key,
&root->root_item);
+ if (ret2)
+ return ret2;
spin_lock(&fs_info->fs_roots_radix_lock);
- if (err)
- break;
btrfs_qgroup_free_meta_all_pertrans(root);
}
}
spin_unlock(&fs_info->fs_roots_radix_lock);
- return err;
+ return 0;
}
/*
@@ -1433,6 +1443,23 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
record_root_in_trans(trans, src, 1);
/*
+ * btrfs_qgroup_inherit relies on a consistent view of the usage for the
+ * src root, so we must run the delayed refs here.
+ *
+ * However this isn't particularly fool proof, because there's no
+ * synchronization keeping us from changing the tree after this point
+ * before we do the qgroup_inherit, or even from making changes while
+ * we're doing the qgroup_inherit. But that's a problem for the future,
+ * for now flush the delayed refs to narrow the race window where the
+ * qgroup counters could end up wrong.
+ */
+ ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+
+ /*
* We are going to commit transaction, see btrfs_commit_transaction()
* comment for reason locking tree_log_mutex
*/
@@ -1525,7 +1552,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ASSERT(pending->root_item);
new_root_item = pending->root_item;
- pending->error = btrfs_find_free_objectid(tree_root, &objectid);
+ pending->error = btrfs_get_free_objectid(tree_root, &objectid);
if (pending->error)
goto no_free_objectid;
@@ -1685,12 +1712,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto fail;
}
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto fail;
- }
-
/*
* Do special qgroup accounting for snapshot, as we do some qgroup
* snapshot hack to do fast snapshot.
@@ -1738,12 +1759,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
}
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto fail;
- }
-
fail:
pending->error = ret;
dir_item_existed:
@@ -2042,32 +2057,25 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
btrfs_trans_release_metadata(trans);
trans->block_rsv = NULL;
- /* make a pass through all the delayed refs we have so far
- * any runnings procs may add more while we are here
- */
- ret = btrfs_run_delayed_refs(trans, 0);
- if (ret) {
- btrfs_end_transaction(trans);
- return ret;
- }
-
- cur_trans = trans->transaction;
-
/*
- * set the flushing flag so procs in this transaction have to
- * start sending their work down.
+ * We only want one transaction commit doing the flushing so we do not
+ * waste a bunch of time on lock contention on the extent root node.
*/
- cur_trans->delayed_refs.flushing = 1;
- smp_wmb();
+ if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING,
+ &cur_trans->delayed_refs.flags)) {
+ /*
+ * Make a pass through all the delayed refs we have so far.
+ * Any running threads may add more while we are here.
+ */
+ ret = btrfs_run_delayed_refs(trans, 0);
+ if (ret) {
+ btrfs_end_transaction(trans);
+ return ret;
+ }
+ }
btrfs_create_pending_block_groups(trans);
- ret = btrfs_run_delayed_refs(trans, 0);
- if (ret) {
- btrfs_end_transaction(trans);
- return ret;
- }
-
if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
int run_it = 0;
@@ -2101,11 +2109,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
spin_lock(&fs_info->trans_lock);
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
+ enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
+
spin_unlock(&fs_info->trans_lock);
refcount_inc(&cur_trans->use_count);
- ret = btrfs_end_transaction(trans);
- wait_for_commit(cur_trans);
+ if (trans->in_fsync)
+ want_state = TRANS_STATE_SUPER_COMMITTED;
+ ret = btrfs_end_transaction(trans);
+ wait_for_commit(cur_trans, want_state);
if (TRANS_ABORTED(cur_trans))
ret = cur_trans->aborted;
@@ -2119,13 +2131,19 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
wake_up(&fs_info->transaction_blocked_wait);
if (cur_trans->list.prev != &fs_info->trans_list) {
+ enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
+
+ if (trans->in_fsync)
+ want_state = TRANS_STATE_SUPER_COMMITTED;
+
prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list);
- if (prev_trans->state != TRANS_STATE_COMPLETED) {
+ if (prev_trans->state < want_state) {
refcount_inc(&prev_trans->use_count);
spin_unlock(&fs_info->trans_lock);
- wait_for_commit(prev_trans);
+ wait_for_commit(prev_trans, want_state);
+
ret = READ_ONCE(prev_trans->aborted);
btrfs_put_transaction(prev_trans);
@@ -2335,6 +2353,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
goto scrub_continue;
}
+ /*
+ * At this point, we should have written all the tree blocks allocated
+ * in this transaction. So it's now safe to free the redirtyied extent
+ * buffers.
+ */
+ btrfs_free_redirty_list(cur_trans);
+
ret = write_all_supers(fs_info, 0);
/*
* the super is written, we can safely allow the tree-loggers
@@ -2344,6 +2369,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (ret)
goto scrub_continue;
+ /*
+ * We needn't acquire the lock here because there is no other task
+ * which can change it.
+ */
+ cur_trans->state = TRANS_STATE_SUPER_COMMITTED;
+ wake_up(&cur_trans->commit_wait);
+
btrfs_finish_extent_commit(trans);
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 31ca81bad822..6335716e513f 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -16,6 +16,7 @@ enum btrfs_trans_state {
TRANS_STATE_COMMIT_START,
TRANS_STATE_COMMIT_DOING,
TRANS_STATE_UNBLOCKED,
+ TRANS_STATE_SUPER_COMMITTED,
TRANS_STATE_COMPLETED,
TRANS_STATE_MAX,
};
@@ -92,6 +93,9 @@ struct btrfs_transaction {
*/
atomic_t pending_ordered;
wait_queue_head_t pending_wait;
+
+ spinlock_t releasing_ebs_lock;
+ struct list_head releasing_ebs;
};
#define __TRANS_FREEZABLE (1U << 0)
@@ -133,6 +137,7 @@ struct btrfs_trans_handle {
bool can_flush_pending_bgs;
bool reloc_reserved;
bool dirty;
+ bool in_fsync;
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
struct list_head new_bgs;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 582061c7b547..f4ade821307d 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1453,22 +1453,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
return -EUCLEAN;
}
for (; ptr < end; ptr += sizeof(*dref)) {
- u64 root_objectid;
- u64 owner;
u64 offset;
- u64 hash;
+ /*
+ * We cannot check the extent_data_ref hash due to possible
+ * overflow from the leaf due to hash collisions.
+ */
dref = (struct btrfs_extent_data_ref *)ptr;
- root_objectid = btrfs_extent_data_ref_root(leaf, dref);
- owner = btrfs_extent_data_ref_objectid(leaf, dref);
offset = btrfs_extent_data_ref_offset(leaf, dref);
- hash = hash_extent_data_ref(root_objectid, owner, offset);
- if (unlikely(hash != key->offset)) {
- extent_err(leaf, slot,
- "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
- hash, key->offset);
- return -EUCLEAN;
- }
if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid extent data backref offset, have %llu expect aligned to %u",
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 254c2ee43aae..92a368627791 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -19,6 +19,7 @@
#include "qgroup.h"
#include "block-group.h"
#include "space-info.h"
+#include "zoned.h"
/* magic values for the inode_only field in btrfs_log_inode:
*
@@ -104,6 +105,7 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
struct btrfs_root *log,
struct btrfs_path *path,
u64 dirid, int del_all);
+static void wait_log_commit(struct btrfs_root *root, int transid);
/*
* tree logging is a special write ahead log used to make sure that
@@ -139,7 +141,9 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *tree_root = fs_info->tree_root;
+ const bool zoned = btrfs_is_zoned(fs_info);
int ret = 0;
+ bool created = false;
/*
* First check if the log root tree was already created. If not, create
@@ -149,8 +153,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
mutex_lock(&tree_root->log_mutex);
if (!fs_info->log_root_tree) {
ret = btrfs_init_log_root_tree(trans, fs_info);
- if (!ret)
+ if (!ret) {
set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state);
+ created = true;
+ }
}
mutex_unlock(&tree_root->log_mutex);
if (ret)
@@ -159,12 +165,20 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
mutex_lock(&root->log_mutex);
+again:
if (root->log_root) {
+ int index = (root->log_transid + 1) % 2;
+
if (btrfs_need_log_full_commit(trans)) {
ret = -EAGAIN;
goto out;
}
+ if (zoned && atomic_read(&root->log_commit[index])) {
+ wait_log_commit(root, root->log_transid - 1);
+ goto again;
+ }
+
if (!root->log_start_pid) {
clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
root->log_start_pid = current->pid;
@@ -172,6 +186,17 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
}
} else {
+ /*
+ * This means fs_info->log_root_tree was already created
+ * for some other FS trees. Do the full commit not to mix
+ * nodes from multiple log transactions to do sequential
+ * writing.
+ */
+ if (zoned && !created) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
ret = btrfs_add_log_tree(trans, root);
if (ret)
goto out;
@@ -200,14 +225,22 @@ out:
*/
static int join_running_log_trans(struct btrfs_root *root)
{
+ const bool zoned = btrfs_is_zoned(root->fs_info);
int ret = -ENOENT;
if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
return ret;
mutex_lock(&root->log_mutex);
+again:
if (root->log_root) {
+ int index = (root->log_transid + 1) % 2;
+
ret = 0;
+ if (zoned && atomic_read(&root->log_commit[index])) {
+ wait_log_commit(root, root->log_transid - 1);
+ goto again;
+ }
atomic_inc(&root->log_writers);
}
mutex_unlock(&root->log_mutex);
@@ -2752,6 +2785,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
free_extent_buffer(next);
return ret;
}
+ btrfs_redirty_list_add(
+ trans->transaction, next);
} else {
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
clear_extent_buffer_dirty(next);
@@ -3085,6 +3120,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
*/
blk_start_plug(&plug);
ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
+ /*
+ * -EAGAIN happens when someone, e.g., a concurrent transaction
+ * commit, writes a dirty extent in this tree-log commit. This
+ * concurrent write will create a hole writing out the extents,
+ * and we cannot proceed on a zoned filesystem, requiring
+ * sequential writing. While we can bail out to a full commit
+ * here, but we can continue hoping the concurrent writing fills
+ * the hole.
+ */
+ if (ret == -EAGAIN && btrfs_is_zoned(fs_info))
+ ret = 0;
if (ret) {
blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret);
@@ -3123,6 +3169,16 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_lock(&log_root_tree->log_mutex);
+ if (btrfs_is_zoned(fs_info)) {
+ if (!log_root_tree->node) {
+ ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
+ if (ret) {
+ mutex_unlock(&log_root_tree->log_mutex);
+ goto out;
+ }
+ }
+ }
+
index2 = log_root_tree->log_transid % 2;
list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
root_log_ctx.log_transid = log_root_tree->log_transid;
@@ -3194,7 +3250,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
&log_root_tree->dirty_log_pages,
EXTENT_DIRTY | EXTENT_NEW);
blk_finish_plug(&plug);
- if (ret) {
+ /*
+ * As described above, -EAGAIN indicates a hole in the extents. We
+ * cannot wait for these write outs since the waiting cause a
+ * deadlock. Bail out to the full commit instead.
+ */
+ if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) {
+ btrfs_set_log_full_commit(trans);
+ btrfs_wait_tree_log_extents(log, mark);
+ mutex_unlock(&log_root_tree->log_mutex);
+ goto out_wake_log_root;
+ } else if (ret) {
btrfs_set_log_full_commit(trans);
btrfs_abort_transaction(trans, ret);
mutex_unlock(&log_root_tree->log_mutex);
@@ -3285,17 +3351,22 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
.process_func = process_one_buffer
};
- ret = walk_log_tree(trans, log, &wc);
- if (ret) {
- if (trans)
- btrfs_abort_transaction(trans, ret);
- else
- btrfs_handle_fs_error(log->fs_info, ret, NULL);
+ if (log->node) {
+ ret = walk_log_tree(trans, log, &wc);
+ if (ret) {
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(log->fs_info, ret, NULL);
+ }
}
clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
extent_io_tree_release(&log->log_csum_range);
+
+ if (trans && log->node)
+ btrfs_redirty_list_add(trans->transaction, log->node);
btrfs_put_root(log);
}
@@ -3379,7 +3450,6 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
int ret;
int err = 0;
- int bytes_del = 0;
u64 dir_ino = btrfs_ino(dir);
if (!inode_logged(trans, dir))
@@ -3406,7 +3476,6 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
}
if (di) {
ret = btrfs_delete_one_dir_name(trans, log, path, di);
- bytes_del += name_len;
if (ret) {
err = ret;
goto fail;
@@ -3421,46 +3490,17 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
}
if (di) {
ret = btrfs_delete_one_dir_name(trans, log, path, di);
- bytes_del += name_len;
if (ret) {
err = ret;
goto fail;
}
}
- /* update the directory size in the log to reflect the names
- * we have removed
+ /*
+ * We do not need to update the size field of the directory's inode item
+ * because on log replay we update the field to reflect all existing
+ * entries in the directory (see overwrite_item()).
*/
- if (bytes_del) {
- struct btrfs_key key;
-
- key.objectid = dir_ino;
- key.offset = 0;
- key.type = BTRFS_INODE_ITEM_KEY;
- btrfs_release_path(path);
-
- ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
- if (ret < 0) {
- err = ret;
- goto fail;
- }
- if (ret == 0) {
- struct btrfs_inode_item *item;
- u64 i_size;
-
- item = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_inode_item);
- i_size = btrfs_inode_size(path->nodes[0], item);
- if (i_size > bytes_del)
- i_size -= bytes_del;
- else
- i_size = 0;
- btrfs_set_inode_size(path->nodes[0], item, i_size);
- btrfs_mark_buffer_dirty(path->nodes[0]);
- } else
- ret = 0;
- btrfs_release_path(path);
- }
fail:
btrfs_free_path(path);
out_unlock:
@@ -3889,7 +3929,14 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_token_timespec_nsec(&token, &item->ctime,
inode->i_ctime.tv_nsec);
- btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
+ /*
+ * We do not need to set the nbytes field, in fact during a fast fsync
+ * its value may not even be correct, since a fast fsync does not wait
+ * for ordered extent completion, which is where we update nbytes, it
+ * only waits for writeback to complete. During log replay as we find
+ * file extent items and replay them, we adjust the nbytes field of the
+ * inode item in subvolume tree as needed (see overwrite_item()).
+ */
btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
btrfs_set_token_inode_transid(&token, item, trans->transid);
@@ -5290,12 +5337,28 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
}
/*
+ * This is for cases where logging a directory could result in losing a
+ * a file after replaying the log. For example, if we move a file from a
+ * directory A to a directory B, then fsync directory A, we have no way
+ * to known the file was moved from A to B, so logging just A would
+ * result in losing the file after a log replay.
+ */
+ if (S_ISDIR(inode->vfs_inode.i_mode) &&
+ inode_only == LOG_INODE_ALL &&
+ inode->last_unlink_trans >= trans->transid) {
+ btrfs_set_log_full_commit(trans);
+ err = 1;
+ goto out_unlock;
+ }
+
+ /*
* a brute force approach to making sure we get the most uptodate
* copies of everything.
*/
if (S_ISDIR(inode->vfs_inode.i_mode)) {
int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
+ clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
if (inode_only == LOG_INODE_EXISTS)
max_key_type = BTRFS_XATTR_ITEM_KEY;
ret = drop_objectid_items(trans, log, path, ino, max_key_type);
@@ -5452,96 +5515,31 @@ out_unlock:
}
/*
- * Check if we must fallback to a transaction commit when logging an inode.
- * This must be called after logging the inode and is used only in the context
- * when fsyncing an inode requires the need to log some other inode - in which
- * case we can't lock the i_mutex of each other inode we need to log as that
- * can lead to deadlocks with concurrent fsync against other inodes (as we can
- * log inodes up or down in the hierarchy) or rename operations for example. So
- * we take the log_mutex of the inode after we have logged it and then check for
- * its last_unlink_trans value - this is safe because any task setting
- * last_unlink_trans must take the log_mutex and it must do this before it does
- * the actual unlink operation, so if we do this check before a concurrent task
- * sets last_unlink_trans it means we've logged a consistent version/state of
- * all the inode items, otherwise we are not sure and must do a transaction
- * commit (the concurrent task might have only updated last_unlink_trans before
- * we logged the inode or it might have also done the unlink).
+ * Check if we need to log an inode. This is used in contexts where while
+ * logging an inode we need to log another inode (either that it exists or in
+ * full mode). This is used instead of btrfs_inode_in_log() because the later
+ * requires the inode to be in the log and have the log transaction committed,
+ * while here we do not care if the log transaction was already committed - our
+ * caller will commit the log later - and we want to avoid logging an inode
+ * multiple times when multiple tasks have joined the same log transaction.
*/
-static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode)
+static bool need_log_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode)
{
- bool ret = false;
-
- mutex_lock(&inode->log_mutex);
- if (inode->last_unlink_trans >= trans->transid) {
- /*
- * Make sure any commits to the log are forced to be full
- * commits.
- */
- btrfs_set_log_full_commit(trans);
- ret = true;
- }
- mutex_unlock(&inode->log_mutex);
-
- return ret;
-}
-
-/*
- * follow the dentry parent pointers up the chain and see if any
- * of the directories in it require a full commit before they can
- * be logged. Returns zero if nothing special needs to be done or 1 if
- * a full commit is required.
- */
-static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode,
- struct dentry *parent,
- struct super_block *sb)
-{
- int ret = 0;
- struct dentry *old_parent = NULL;
-
/*
- * for regular files, if its inode is already on disk, we don't
- * have to worry about the parents at all. This is because
- * we can use the last_unlink_trans field to record renames
- * and other fun in this file.
+ * If this inode does not have new/updated/deleted xattrs since the last
+ * time it was logged and is flagged as logged in the current transaction,
+ * we can skip logging it. As for new/deleted names, those are updated in
+ * the log by link/unlink/rename operations.
+ * In case the inode was logged and then evicted and reloaded, its
+ * logged_trans will be 0, in which case we have to fully log it since
+ * logged_trans is a transient field, not persisted.
*/
- if (S_ISREG(inode->vfs_inode.i_mode) &&
- inode->generation < trans->transid &&
- inode->last_unlink_trans < trans->transid)
- goto out;
-
- if (!S_ISDIR(inode->vfs_inode.i_mode)) {
- if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
- goto out;
- inode = BTRFS_I(d_inode(parent));
- }
-
- while (1) {
- if (btrfs_must_commit_transaction(trans, inode)) {
- ret = 1;
- break;
- }
-
- if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
- break;
-
- if (IS_ROOT(parent)) {
- inode = BTRFS_I(d_inode(parent));
- if (btrfs_must_commit_transaction(trans, inode))
- ret = 1;
- break;
- }
-
- parent = dget_parent(parent);
- dput(old_parent);
- old_parent = parent;
- inode = BTRFS_I(d_inode(parent));
+ if (inode->logged_trans == trans->transid &&
+ !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
+ return false;
- }
- dput(old_parent);
-out:
- return ret;
+ return true;
}
struct btrfs_dir_list {
@@ -5671,7 +5669,7 @@ process_leaf:
goto next_dir_inode;
}
- if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
+ if (!need_log_inode(trans, BTRFS_I(di_inode))) {
btrfs_add_delayed_iput(di_inode);
break;
}
@@ -5681,9 +5679,6 @@ process_leaf:
log_mode = LOG_INODE_ALL;
ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
log_mode, ctx);
- if (!ret &&
- btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
- ret = 1;
btrfs_add_delayed_iput(di_inode);
if (ret)
goto next_dir_inode;
@@ -5821,13 +5816,15 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
goto out;
}
+ if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
+ btrfs_add_delayed_iput(dir_inode);
+ continue;
+ }
+
if (ctx)
ctx->log_new_dentries = false;
ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
LOG_INODE_ALL, ctx);
- if (!ret &&
- btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
- ret = 1;
if (!ret && ctx && ctx->log_new_dentries)
ret = log_new_dir_dentries(trans, root,
BTRFS_I(dir_inode), ctx);
@@ -5872,7 +5869,8 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (BTRFS_I(inode)->generation >= trans->transid)
+ if (BTRFS_I(inode)->generation >= trans->transid &&
+ need_log_inode(trans, BTRFS_I(inode)))
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
LOG_INODE_EXISTS, ctx);
btrfs_add_delayed_iput(inode);
@@ -5926,7 +5924,8 @@ static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
if (root != inode->root)
break;
- if (inode->generation >= trans->transid) {
+ if (inode->generation >= trans->transid &&
+ need_log_inode(trans, inode)) {
ret = btrfs_log_inode(trans, root, inode,
LOG_INODE_EXISTS, ctx);
if (ret)
@@ -6041,12 +6040,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
- struct super_block *sb;
int ret = 0;
bool log_dentries = false;
- sb = inode->vfs_inode.i_sb;
-
if (btrfs_test_opt(fs_info, NOTREELOG)) {
ret = 1;
goto end_no_trans;
@@ -6057,10 +6053,6 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_no_trans;
}
- ret = check_parent_dirs_for_sync(trans, inode, parent, sb);
- if (ret)
- goto end_no_trans;
-
/*
* Skip already logged inodes or inodes corresponding to tmpfiles
* (since logging them is pointless, a link count of 0 means they
@@ -6307,8 +6299,7 @@ again:
* root->objectid_mutex is not acquired as log replay
* could only happen during mount.
*/
- ret = btrfs_find_highest_objectid(root,
- &root->highest_objectid);
+ ret = btrfs_init_root_free_objectid(root);
}
wc.replay_dest->log_root = NULL;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d6c24c8ad749..bc3b33efddc5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -421,7 +421,7 @@ static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
* Preallocate a bio that's always going to be used for flushing device
* barriers and matches the device lifespan
*/
- dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
+ dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
if (!dev->flush_bio) {
kfree(dev);
return ERR_PTR(-ENOMEM);
@@ -669,10 +669,6 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
device->mode = flags;
- ret = btrfs_get_dev_zone_info(device);
- if (ret != 0)
- goto error_free_page;
-
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -1418,11 +1414,62 @@ static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
* make sure to start at an offset of at least 1MB.
*/
return max_t(u64, start, SZ_1M);
+ case BTRFS_CHUNK_ALLOC_ZONED:
+ /*
+ * We don't care about the starting region like regular
+ * allocator, because we anyway use/reserve the first two zones
+ * for superblock logging.
+ */
+ return ALIGN(start, device->zone_info->zone_size);
default:
BUG();
}
}
+static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
+ u64 *hole_start, u64 *hole_size,
+ u64 num_bytes)
+{
+ u64 zone_size = device->zone_info->zone_size;
+ u64 pos;
+ int ret;
+ bool changed = false;
+
+ ASSERT(IS_ALIGNED(*hole_start, zone_size));
+
+ while (*hole_size > 0) {
+ pos = btrfs_find_allocatable_zones(device, *hole_start,
+ *hole_start + *hole_size,
+ num_bytes);
+ if (pos != *hole_start) {
+ *hole_size = *hole_start + *hole_size - pos;
+ *hole_start = pos;
+ changed = true;
+ if (*hole_size < num_bytes)
+ break;
+ }
+
+ ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
+
+ /* Range is ensured to be empty */
+ if (!ret)
+ return changed;
+
+ /* Given hole range was invalid (outside of device) */
+ if (ret == -ERANGE) {
+ *hole_start += *hole_size;
+ *hole_size = 0;
+ return 1;
+ }
+
+ *hole_start += zone_size;
+ *hole_size -= zone_size;
+ changed = true;
+ }
+
+ return changed;
+}
+
/**
* dev_extent_hole_check - check if specified hole is suitable for allocation
* @device: the device which we have the hole
@@ -1430,7 +1477,7 @@ static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
* @hole_size: the size of the hole
* @num_bytes: the size of the free space that we need
*
- * This function may modify @hole_start and @hole_end to reflect the suitable
+ * This function may modify @hole_start and @hole_size to reflect the suitable
* position for allocation. Returns 1 if hole position is updated, 0 otherwise.
*/
static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
@@ -1439,24 +1486,39 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
bool changed = false;
u64 hole_end = *hole_start + *hole_size;
- /*
- * Check before we set max_hole_start, otherwise we could end up
- * sending back this offset anyway.
- */
- if (contains_pending_extent(device, hole_start, *hole_size)) {
- if (hole_end >= *hole_start)
- *hole_size = hole_end - *hole_start;
- else
- *hole_size = 0;
- changed = true;
- }
+ for (;;) {
+ /*
+ * Check before we set max_hole_start, otherwise we could end up
+ * sending back this offset anyway.
+ */
+ if (contains_pending_extent(device, hole_start, *hole_size)) {
+ if (hole_end >= *hole_start)
+ *hole_size = hole_end - *hole_start;
+ else
+ *hole_size = 0;
+ changed = true;
+ }
+
+ switch (device->fs_devices->chunk_alloc_policy) {
+ case BTRFS_CHUNK_ALLOC_REGULAR:
+ /* No extra check */
+ break;
+ case BTRFS_CHUNK_ALLOC_ZONED:
+ if (dev_extent_hole_check_zoned(device, hole_start,
+ hole_size, num_bytes)) {
+ changed = true;
+ /*
+ * The changed hole can contain pending extent.
+ * Loop again to check that.
+ */
+ continue;
+ }
+ break;
+ default:
+ BUG();
+ }
- switch (device->fs_devices->chunk_alloc_policy) {
- case BTRFS_CHUNK_ALLOC_REGULAR:
- /* No extra check */
break;
- default:
- BUG();
}
return changed;
@@ -1509,6 +1571,9 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
search_start = dev_extent_search_start(device, search_start);
+ WARN_ON(device->zone_info &&
+ !IS_ALIGNED(num_bytes, device->zone_info->zone_size));
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -4668,11 +4733,10 @@ again:
}
ret = btrfs_previous_item(root, path, 0, key.type);
- if (ret)
- mutex_unlock(&fs_info->delete_unused_bgs_mutex);
- if (ret < 0)
- goto done;
if (ret) {
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+ if (ret < 0)
+ goto done;
ret = 0;
btrfs_release_path(path);
break;
@@ -4904,6 +4968,37 @@ static void init_alloc_chunk_ctl_policy_regular(
ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
}
+static void init_alloc_chunk_ctl_policy_zoned(
+ struct btrfs_fs_devices *fs_devices,
+ struct alloc_chunk_ctl *ctl)
+{
+ u64 zone_size = fs_devices->fs_info->zone_size;
+ u64 limit;
+ int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
+ int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
+ u64 min_chunk_size = min_data_stripes * zone_size;
+ u64 type = ctl->type;
+
+ ctl->max_stripe_size = zone_size;
+ if (type & BTRFS_BLOCK_GROUP_DATA) {
+ ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
+ zone_size);
+ } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
+ ctl->max_chunk_size = ctl->max_stripe_size;
+ } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
+ ctl->max_chunk_size = 2 * ctl->max_stripe_size;
+ ctl->devs_max = min_t(int, ctl->devs_max,
+ BTRFS_MAX_DEVS_SYS_CHUNK);
+ }
+
+ /* We don't want a chunk larger than 10% of writable space */
+ limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
+ zone_size),
+ min_chunk_size);
+ ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
+ ctl->dev_extent_min = zone_size * ctl->dev_stripes;
+}
+
static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
struct alloc_chunk_ctl *ctl)
{
@@ -4924,6 +5019,9 @@ static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
case BTRFS_CHUNK_ALLOC_REGULAR:
init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
break;
+ case BTRFS_CHUNK_ALLOC_ZONED:
+ init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
+ break;
default:
BUG();
}
@@ -5050,6 +5148,38 @@ static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
return 0;
}
+static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
+ struct btrfs_device_info *devices_info)
+{
+ u64 zone_size = devices_info[0].dev->zone_info->zone_size;
+ /* Number of stripes that count for block group size */
+ int data_stripes;
+
+ /*
+ * It should hold because:
+ * dev_extent_min == dev_extent_want == zone_size * dev_stripes
+ */
+ ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
+
+ ctl->stripe_size = zone_size;
+ ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
+ data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
+
+ /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
+ if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
+ ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
+ ctl->stripe_size) + ctl->nparity,
+ ctl->dev_stripes);
+ ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
+ data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
+ ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
+ }
+
+ ctl->chunk_size = ctl->stripe_size * data_stripes;
+
+ return 0;
+}
+
static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
struct alloc_chunk_ctl *ctl,
struct btrfs_device_info *devices_info)
@@ -5077,6 +5207,8 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
switch (fs_devices->chunk_alloc_policy) {
case BTRFS_CHUNK_ALLOC_REGULAR:
return decide_stripe_size_regular(ctl, devices_info);
+ case BTRFS_CHUNK_ALLOC_ZONED:
+ return decide_stripe_size_zoned(ctl, devices_info);
default:
BUG();
}
@@ -5841,9 +5973,29 @@ static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
return ret;
}
+static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
+{
+ struct btrfs_block_group *cache;
+ bool ret;
+
+ /* Non zoned filesystem does not use "to_copy" flag */
+ if (!btrfs_is_zoned(fs_info))
+ return false;
+
+ cache = btrfs_lookup_block_group(fs_info, logical);
+
+ spin_lock(&cache->lock);
+ ret = cache->to_copy;
+ spin_unlock(&cache->lock);
+
+ btrfs_put_block_group(cache);
+ return ret;
+}
+
static void handle_ops_on_dev_replace(enum btrfs_map_op op,
struct btrfs_bio **bbio_ret,
struct btrfs_dev_replace *dev_replace,
+ u64 logical,
int *num_stripes_ret, int *max_errors_ret)
{
struct btrfs_bio *bbio = *bbio_ret;
@@ -5857,6 +6009,13 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
int index_where_to_add;
/*
+ * A block group which have "to_copy" set will eventually
+ * copied by dev-replace process. We can avoid cloning IO here.
+ */
+ if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
+ return;
+
+ /*
* duplicate the write operations while the dev replace
* procedure is running. Since the copying of the old disk to
* the new disk takes place at run time while the filesystem is
@@ -5941,23 +6100,24 @@ static bool need_full_stripe(enum btrfs_map_op op)
}
/*
- * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
- * tuple. This information is used to calculate how big a
- * particular bio can get before it straddles a stripe.
+ * Calculate the geometry of a particular (address, len) tuple. This
+ * information is used to calculate how big a particular bio can get before it
+ * straddles a stripe.
*
- * @fs_info - the filesystem
- * @logical - address that we want to figure out the geometry of
- * @len - the length of IO we are going to perform, starting at @logical
- * @op - type of operation - write or read
- * @io_geom - pointer used to return values
+ * @fs_info: the filesystem
+ * @em: mapping containing the logical extent
+ * @op: type of operation - write or read
+ * @logical: address that we want to figure out the geometry of
+ * @len: the length of IO we are going to perform, starting at @logical
+ * @io_geom: pointer used to return values
*
* Returns < 0 in case a chunk for the given logical address cannot be found,
* usually shouldn't happen unless @logical is corrupted, 0 otherwise.
*/
-int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
- u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
+int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
+ enum btrfs_map_op op, u64 logical, u64 len,
+ struct btrfs_io_geometry *io_geom)
{
- struct extent_map *em;
struct map_lookup *map;
u64 offset;
u64 stripe_offset;
@@ -5965,14 +6125,9 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 stripe_len;
u64 raid56_full_stripe_start = (u64)-1;
int data_stripes;
- int ret = 0;
ASSERT(op != BTRFS_MAP_DISCARD);
- em = btrfs_get_chunk_map(fs_info, logical, len);
- if (IS_ERR(em))
- return PTR_ERR(em);
-
map = em->map_lookup;
/* Offset of this logical address in the chunk */
offset = logical - em->start;
@@ -5986,8 +6141,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
btrfs_crit(fs_info,
"stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
stripe_offset, offset, em->start, logical, stripe_len);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* stripe_offset is the offset of this block in its stripe */
@@ -6034,10 +6188,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
io_geom->stripe_offset = stripe_offset;
io_geom->raid56_stripe_offset = raid56_full_stripe_start;
-out:
- /* once for us */
- free_extent_map(em);
- return ret;
+ return 0;
}
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
@@ -6070,12 +6221,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
ASSERT(bbio_ret);
ASSERT(op != BTRFS_MAP_DISCARD);
- ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
+ em = btrfs_get_chunk_map(fs_info, logical, *length);
+ ASSERT(!IS_ERR(em));
+
+ ret = btrfs_get_io_geometry(fs_info, em, op, logical, *length, &geom);
if (ret < 0)
return ret;
- em = btrfs_get_chunk_map(fs_info, logical, *length);
- ASSERT(!IS_ERR(em));
map = em->map_lookup;
*length = geom.len;
@@ -6251,8 +6403,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
need_full_stripe(op)) {
- handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
- &max_errors);
+ handle_ops_on_dev_replace(op, &bbio, dev_replace, logical,
+ &num_stripes, &max_errors);
}
*bbio_ret = bbio;
@@ -6323,7 +6475,7 @@ static void btrfs_end_bio(struct bio *bio)
struct btrfs_device *dev = btrfs_io_bio(bio)->device;
ASSERT(dev->bdev);
- if (bio_op(bio) == REQ_OP_WRITE)
+ if (btrfs_op(bio) == BTRFS_MAP_WRITE)
btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_WRITE_ERRS);
else if (!(bio->bi_opf & REQ_RAHEAD))
@@ -6375,6 +6527,20 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
btrfs_io_bio(bio)->device = dev;
bio->bi_end_io = btrfs_end_bio;
bio->bi_iter.bi_sector = physical >> 9;
+ /*
+ * For zone append writing, bi_sector must point the beginning of the
+ * zone
+ */
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ if (btrfs_dev_is_sequential(dev, physical)) {
+ u64 zone_start = round_down(physical, fs_info->zone_size);
+
+ bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
+ } else {
+ bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
+ bio->bi_opf |= REQ_OP_WRITE;
+ }
+ }
btrfs_debug_in_rcu(fs_info,
"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
@@ -6436,10 +6602,10 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
- ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
+ ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
/* In this case, map_length has been set to the length of
a single stripe; not the whole write */
- if (bio_op(bio) == REQ_OP_WRITE) {
+ if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
ret = raid56_parity_write(fs_info, bio, bbio,
map_length);
} else {
@@ -6462,7 +6628,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
&dev->dev_state) ||
- (bio_op(first_bio) == REQ_OP_WRITE &&
+ (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
bbio_error(bbio, first_bio, logical);
continue;
@@ -7644,6 +7810,20 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
ret = -EUCLEAN;
goto out;
}
+
+ if (dev->zone_info) {
+ u64 zone_size = dev->zone_info->zone_size;
+
+ if (!IS_ALIGNED(physical_offset, zone_size) ||
+ !IS_ALIGNED(physical_len, zone_size)) {
+ btrfs_err(fs_info,
+"zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
+ devid, physical_offset, physical_len);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+
out:
free_extent_map(em);
return ret;
@@ -7800,3 +7980,75 @@ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
spin_unlock(&fs_info->swapfile_pins_lock);
return node != NULL;
}
+
+static int relocating_repair_kthread(void *data)
+{
+ struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ u64 target;
+ int ret = 0;
+
+ target = cache->start;
+ btrfs_put_block_group(cache);
+
+ if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
+ btrfs_info(fs_info,
+ "zoned: skip relocating block group %llu to repair: EBUSY",
+ target);
+ return -EBUSY;
+ }
+
+ mutex_lock(&fs_info->delete_unused_bgs_mutex);
+
+ /* Ensure block group still exists */
+ cache = btrfs_lookup_block_group(fs_info, target);
+ if (!cache)
+ goto out;
+
+ if (!cache->relocating_repair)
+ goto out;
+
+ ret = btrfs_may_alloc_data_chunk(fs_info, target);
+ if (ret < 0)
+ goto out;
+
+ btrfs_info(fs_info,
+ "zoned: relocating block group %llu to repair IO failure",
+ target);
+ ret = btrfs_relocate_chunk(fs_info, target);
+
+out:
+ if (cache)
+ btrfs_put_block_group(cache);
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+ btrfs_exclop_finish(fs_info);
+
+ return ret;
+}
+
+int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
+{
+ struct btrfs_block_group *cache;
+
+ /* Do not attempt to repair in degraded state */
+ if (btrfs_test_opt(fs_info, DEGRADED))
+ return 0;
+
+ cache = btrfs_lookup_block_group(fs_info, logical);
+ if (!cache)
+ return 0;
+
+ spin_lock(&cache->lock);
+ if (cache->relocating_repair) {
+ spin_unlock(&cache->lock);
+ btrfs_put_block_group(cache);
+ return 0;
+ }
+ cache->relocating_repair = 1;
+ spin_unlock(&cache->lock);
+
+ kthread_run(relocating_repair_kthread, cache,
+ "btrfs-relocating-repair");
+
+ return 0;
+}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index c43663d9c22e..d4c3e0dd32b8 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -214,6 +214,7 @@ BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
enum btrfs_chunk_allocation_policy {
BTRFS_CHUNK_ALLOC_REGULAR,
+ BTRFS_CHUNK_ALLOC_ZONED,
};
/*
@@ -423,6 +424,7 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
case REQ_OP_DISCARD:
return BTRFS_MAP_DISCARD;
case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
return BTRFS_MAP_WRITE;
default:
WARN_ON_ONCE(1);
@@ -440,8 +442,9 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret);
-int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
- u64 logical, u64 len, struct btrfs_io_geometry *io_geom);
+int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
+ enum btrfs_map_op op, u64 logical, u64 len,
+ struct btrfs_io_geometry *io_geom);
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
@@ -596,5 +599,6 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
int btrfs_bg_type_to_factor(u64 flags);
const char *btrfs_bg_type_to_raid_name(u64 flags);
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
+int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index af6246f36a9e..8a4514283a4b 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -229,11 +229,33 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
+ const bool start_trans = (current->journal_info == NULL);
int ret;
- trans = btrfs_start_transaction(root, 2);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ if (start_trans) {
+ /*
+ * 1 unit for inserting/updating/deleting the xattr
+ * 1 unit for the inode item update
+ */
+ trans = btrfs_start_transaction(root, 2);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ } else {
+ /*
+ * This can happen when smack is enabled and a directory is being
+ * created. It happens through d_instantiate_new(), which calls
+ * smack_d_instantiate(), which in turn calls __vfs_setxattr() to
+ * set the transmute xattr (XATTR_NAME_SMACKTRANSMUTE) on the
+ * inode. We have already reserved space for the xattr and inode
+ * update at btrfs_mkdir(), so just use the transaction handle.
+ * We don't join or start a transaction, as that will reset the
+ * block_rsv of the handle and trigger a warning for the start
+ * case.
+ */
+ ASSERT(strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN) == 0);
+ trans = current->journal_info;
+ }
ret = btrfs_setxattr(trans, inode, name, value, size, flags);
if (ret)
@@ -244,7 +266,8 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
BUG_ON(ret);
out:
- btrfs_end_transaction(trans);
+ if (start_trans)
+ btrfs_end_transaction(trans);
return ret;
}
@@ -362,6 +385,7 @@ static int btrfs_xattr_handler_get(const struct xattr_handler *handler,
}
static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
@@ -371,6 +395,7 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
}
static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 05615a1099db..d524acf7b3e5 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -432,9 +432,8 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
PAGE_SIZE - (buf_offset % PAGE_SIZE));
bytes = min(bytes, bytes_left);
- kaddr = kmap_atomic(dest_page);
- memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
- kunmap_atomic(kaddr);
+ memcpy_to_page(dest_page, pg_offset,
+ workspace->buf + buf_offset, bytes);
pg_offset += bytes;
bytes_left -= bytes;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index c38846659019..1f972b75a9ab 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1,14 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "volumes.h"
#include "zoned.h"
#include "rcu-string.h"
+#include "disk-io.h"
+#include "block-group.h"
+#include "transaction.h"
+#include "dev-replace.h"
+#include "space-info.h"
/* Maximum number of zones to report per blkdev_report_zones() call */
#define BTRFS_REPORT_NR_ZONES 4096
+/* Invalid allocation pointer value for missing devices */
+#define WP_MISSING_DEV ((u64)-1)
+/* Pseudo write pointer value for conventional zone */
+#define WP_CONVENTIONAL ((u64)-2)
/* Number of superblock log zones */
#define BTRFS_NR_SB_LOG_ZONES 2
@@ -119,6 +130,36 @@ static inline u32 sb_zone_number(int shift, int mirror)
return 0;
}
+/*
+ * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
+ * device into static sized chunks and fake a conventional zone on each of
+ * them.
+ */
+static int emulate_report_zones(struct btrfs_device *device, u64 pos,
+ struct blk_zone *zones, unsigned int nr_zones)
+{
+ const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
+ sector_t bdev_size = bdev_nr_sectors(device->bdev);
+ unsigned int i;
+
+ pos >>= SECTOR_SHIFT;
+ for (i = 0; i < nr_zones; i++) {
+ zones[i].start = i * zone_sectors + pos;
+ zones[i].len = zone_sectors;
+ zones[i].capacity = zone_sectors;
+ zones[i].wp = zones[i].start + zone_sectors;
+ zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
+ zones[i].cond = BLK_ZONE_COND_NOT_WP;
+
+ if (zones[i].wp >= bdev_size) {
+ i++;
+ break;
+ }
+ }
+
+ return i;
+}
+
static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
struct blk_zone *zones, unsigned int *nr_zones)
{
@@ -127,6 +168,12 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
if (!*nr_zones)
return 0;
+ if (!bdev_is_zoned(device->bdev)) {
+ ret = emulate_report_zones(device, pos, zones, *nr_zones);
+ *nr_zones = ret;
+ return 0;
+ }
+
ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
copy_zone_info_cb, zones);
if (ret < 0) {
@@ -143,8 +190,78 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
return 0;
}
+/* The emulated zone size is determined from the size of device extent */
+static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_path *path;
+ struct btrfs_root *root = fs_info->dev_root;
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_dev_extent *dext;
+ int ret = 0;
+
+ key.objectid = 1;
+ key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_item(root, path);
+ if (ret < 0)
+ goto out;
+ /* No dev extents at all? Not good */
+ if (ret > 0) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+
+ leaf = path->nodes[0];
+ dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
+ fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
+ ret = 0;
+
+out:
+ btrfs_free_path(path);
+
+ return ret;
+}
+
+int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_device *device;
+ int ret = 0;
+
+ /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
+ if (!btrfs_fs_incompat(fs_info, ZONED))
+ return 0;
+
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ /* We can skip reading of zone info for missing devices */
+ if (!device->bdev)
+ continue;
+
+ ret = btrfs_get_dev_zone_info(device);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ return ret;
+}
+
int btrfs_get_dev_zone_info(struct btrfs_device *device)
{
+ struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_zoned_device_info *zone_info = NULL;
struct block_device *bdev = device->bdev;
struct request_queue *queue = bdev_get_queue(bdev);
@@ -152,10 +269,15 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
sector_t sector = 0;
struct blk_zone *zones = NULL;
unsigned int i, nreported = 0, nr_zones;
- unsigned int zone_sectors;
+ sector_t zone_sectors;
+ char *model, *emulated;
int ret;
- if (!bdev_is_zoned(bdev))
+ /*
+ * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
+ * yet be set.
+ */
+ if (!btrfs_fs_incompat(fs_info, ZONED))
return 0;
if (device->zone_info)
@@ -165,8 +287,20 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
if (!zone_info)
return -ENOMEM;
+ if (!bdev_is_zoned(bdev)) {
+ if (!fs_info->zone_size) {
+ ret = calculate_emulated_zone_size(fs_info);
+ if (ret)
+ goto out;
+ }
+
+ ASSERT(fs_info->zone_size);
+ zone_sectors = fs_info->zone_size >> SECTOR_SHIFT;
+ } else {
+ zone_sectors = bdev_zone_sectors(bdev);
+ }
+
nr_sectors = bdev_nr_sectors(bdev);
- zone_sectors = bdev_zone_sectors(bdev);
/* Check if it's power of 2 (see is_power_of_2) */
ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
@@ -272,20 +406,42 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
device->zone_info = zone_info;
- /* device->fs_info is not safe to use for printing messages */
- btrfs_info_in_rcu(NULL,
- "host-%s zoned block device %s, %u zones of %llu bytes",
- bdev_zoned_model(bdev) == BLK_ZONED_HM ? "managed" : "aware",
- rcu_str_deref(device->name), zone_info->nr_zones,
- zone_info->zone_size);
+ switch (bdev_zoned_model(bdev)) {
+ case BLK_ZONED_HM:
+ model = "host-managed zoned";
+ emulated = "";
+ break;
+ case BLK_ZONED_HA:
+ model = "host-aware zoned";
+ emulated = "";
+ break;
+ case BLK_ZONED_NONE:
+ model = "regular";
+ emulated = "emulated ";
+ break;
+ default:
+ /* Just in case */
+ btrfs_err_in_rcu(fs_info, "zoned: unsupported model %d on %s",
+ bdev_zoned_model(bdev),
+ rcu_str_deref(device->name));
+ ret = -EOPNOTSUPP;
+ goto out_free_zone_info;
+ }
+
+ btrfs_info_in_rcu(fs_info,
+ "%s block device %s, %u %szones of %llu bytes",
+ model, rcu_str_deref(device->name), zone_info->nr_zones,
+ emulated, zone_info->zone_size);
return 0;
out:
kfree(zones);
+out_free_zone_info:
bitmap_free(zone_info->empty_zones);
bitmap_free(zone_info->seq_zones);
kfree(zone_info);
+ device->zone_info = NULL;
return ret;
}
@@ -324,7 +480,7 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
u64 nr_devices = 0;
u64 zone_size = 0;
u64 max_zone_append_size = 0;
- const bool incompat_zoned = btrfs_is_zoned(fs_info);
+ const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
int ret = 0;
/* Count zoned devices */
@@ -335,9 +491,17 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
continue;
model = bdev_zoned_model(device->bdev);
+ /*
+ * A Host-Managed zoned device must be used as a zoned device.
+ * A Host-Aware zoned device and a non-zoned devices can be
+ * treated as a zoned device, if ZONED flag is enabled in the
+ * superblock.
+ */
if (model == BLK_ZONED_HM ||
- (model == BLK_ZONED_HA && incompat_zoned)) {
- struct btrfs_zoned_device_info *zone_info;
+ (model == BLK_ZONED_HA && incompat_zoned) ||
+ (model == BLK_ZONED_NONE && incompat_zoned)) {
+ struct btrfs_zoned_device_info *zone_info =
+ device->zone_info;
zone_info = device->zone_info;
zoned_devices++;
@@ -406,6 +570,15 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
fs_info->zone_size = zone_size;
fs_info->max_zone_append_size = max_zone_append_size;
+ fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED;
+
+ /*
+ * Check mount options here, because we might change fs_info->zoned
+ * from fs_info->zone_size.
+ */
+ ret = btrfs_check_mountopts_zoned(fs_info);
+ if (ret)
+ goto out;
btrfs_info(fs_info, "zoned mode enabled with zone size %llu", zone_size);
out:
@@ -485,10 +658,9 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
u64 *bytenr_ret)
{
struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES];
- unsigned int zone_sectors;
+ sector_t zone_sectors;
u32 sb_zone;
int ret;
- u64 zone_size;
u8 zone_sectors_shift;
sector_t nr_sectors;
u32 nr_zones;
@@ -503,7 +675,6 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
zone_sectors = bdev_zone_sectors(bdev);
if (!is_power_of_2(zone_sectors))
return -EINVAL;
- zone_size = zone_sectors << SECTOR_SHIFT;
zone_sectors_shift = ilog2(zone_sectors);
nr_sectors = bdev_nr_sectors(bdev);
nr_zones = nr_sectors >> zone_sectors_shift;
@@ -529,7 +700,13 @@ int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
struct btrfs_zoned_device_info *zinfo = device->zone_info;
u32 zone_num;
- if (!zinfo) {
+ /*
+ * For a zoned filesystem on a non-zoned block device, use the same
+ * super block locations as regular filesystem. Doing so, the super
+ * block can always be retrieved and the zoned flag of the volume
+ * detected from the super block information.
+ */
+ if (!bdev_is_zoned(device->bdev)) {
*bytenr_ret = btrfs_sb_offset(mirror);
return 0;
}
@@ -614,3 +791,671 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
sb_zone << zone_sectors_shift,
zone_sectors * BTRFS_NR_SB_LOG_ZONES, GFP_NOFS);
}
+
+/**
+ * btrfs_find_allocatable_zones - find allocatable zones within a given region
+ *
+ * @device: the device to allocate a region on
+ * @hole_start: the position of the hole to allocate the region
+ * @num_bytes: size of wanted region
+ * @hole_end: the end of the hole
+ * @return: position of allocatable zones
+ *
+ * Allocatable region should not contain any superblock locations.
+ */
+u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
+ u64 hole_end, u64 num_bytes)
+{
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ const u8 shift = zinfo->zone_size_shift;
+ u64 nzones = num_bytes >> shift;
+ u64 pos = hole_start;
+ u64 begin, end;
+ bool have_sb;
+ int i;
+
+ ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size));
+ ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size));
+
+ while (pos < hole_end) {
+ begin = pos >> shift;
+ end = begin + nzones;
+
+ if (end > zinfo->nr_zones)
+ return hole_end;
+
+ /* Check if zones in the region are all empty */
+ if (btrfs_dev_is_sequential(device, pos) &&
+ find_next_zero_bit(zinfo->empty_zones, end, begin) != end) {
+ pos += zinfo->zone_size;
+ continue;
+ }
+
+ have_sb = false;
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ u32 sb_zone;
+ u64 sb_pos;
+
+ sb_zone = sb_zone_number(shift, i);
+ if (!(end <= sb_zone ||
+ sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) {
+ have_sb = true;
+ pos = ((u64)sb_zone + BTRFS_NR_SB_LOG_ZONES) << shift;
+ break;
+ }
+
+ /* We also need to exclude regular superblock positions */
+ sb_pos = btrfs_sb_offset(i);
+ if (!(pos + num_bytes <= sb_pos ||
+ sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) {
+ have_sb = true;
+ pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE,
+ zinfo->zone_size);
+ break;
+ }
+ }
+ if (!have_sb)
+ break;
+ }
+
+ return pos;
+}
+
+int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
+ u64 length, u64 *bytes)
+{
+ int ret;
+
+ *bytes = 0;
+ ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_RESET,
+ physical >> SECTOR_SHIFT, length >> SECTOR_SHIFT,
+ GFP_NOFS);
+ if (ret)
+ return ret;
+
+ *bytes = length;
+ while (length) {
+ btrfs_dev_set_zone_empty(device, physical);
+ physical += device->zone_info->zone_size;
+ length -= device->zone_info->zone_size;
+ }
+
+ return 0;
+}
+
+int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
+{
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ const u8 shift = zinfo->zone_size_shift;
+ unsigned long begin = start >> shift;
+ unsigned long end = (start + size) >> shift;
+ u64 pos;
+ int ret;
+
+ ASSERT(IS_ALIGNED(start, zinfo->zone_size));
+ ASSERT(IS_ALIGNED(size, zinfo->zone_size));
+
+ if (end > zinfo->nr_zones)
+ return -ERANGE;
+
+ /* All the zones are conventional */
+ if (find_next_bit(zinfo->seq_zones, begin, end) == end)
+ return 0;
+
+ /* All the zones are sequential and empty */
+ if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
+ find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
+ return 0;
+
+ for (pos = start; pos < start + size; pos += zinfo->zone_size) {
+ u64 reset_bytes;
+
+ if (!btrfs_dev_is_sequential(device, pos) ||
+ btrfs_dev_is_empty_zone(device, pos))
+ continue;
+
+ /* Free regions should be empty */
+ btrfs_warn_in_rcu(
+ device->fs_info,
+ "zoned: resetting device %s (devid %llu) zone %llu for allocation",
+ rcu_str_deref(device->name), device->devid, pos >> shift);
+ WARN_ON_ONCE(1);
+
+ ret = btrfs_reset_device_zone(device, pos, zinfo->zone_size,
+ &reset_bytes);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate an allocation pointer from the extent allocation information
+ * for a block group consist of conventional zones. It is pointed to the
+ * end of the highest addressed extent in the block group as an allocation
+ * offset.
+ */
+static int calculate_alloc_pointer(struct btrfs_block_group *cache,
+ u64 *offset_ret)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ int ret;
+ u64 length;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = cache->start + cache->length;
+ key.type = 0;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ /* We should not find the exact match */
+ if (!ret)
+ ret = -EUCLEAN;
+ if (ret < 0)
+ goto out;
+
+ ret = btrfs_previous_extent_item(root, path, cache->start);
+ if (ret) {
+ if (ret == 1) {
+ ret = 0;
+ *offset_ret = 0;
+ }
+ goto out;
+ }
+
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
+
+ if (found_key.type == BTRFS_EXTENT_ITEM_KEY)
+ length = found_key.offset;
+ else
+ length = fs_info->nodesize;
+
+ if (!(found_key.objectid >= cache->start &&
+ found_key.objectid + length <= cache->start + cache->length)) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ *offset_ret = found_key.objectid + length - cache->start;
+ ret = 0;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree;
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct btrfs_device *device;
+ u64 logical = cache->start;
+ u64 length = cache->length;
+ u64 physical = 0;
+ int ret;
+ int i;
+ unsigned int nofs_flag;
+ u64 *alloc_offsets = NULL;
+ u64 last_alloc = 0;
+ u32 num_sequential = 0, num_conventional = 0;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ /* Sanity check */
+ if (!IS_ALIGNED(length, fs_info->zone_size)) {
+ btrfs_err(fs_info,
+ "zoned: block group %llu len %llu unaligned to zone size %llu",
+ logical, length, fs_info->zone_size);
+ return -EIO;
+ }
+
+ /* Get the chunk mapping */
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, logical, length);
+ read_unlock(&em_tree->lock);
+
+ if (!em)
+ return -EINVAL;
+
+ map = em->map_lookup;
+
+ alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
+ if (!alloc_offsets) {
+ free_extent_map(em);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < map->num_stripes; i++) {
+ bool is_sequential;
+ struct blk_zone zone;
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ int dev_replace_is_ongoing = 0;
+
+ device = map->stripes[i].dev;
+ physical = map->stripes[i].physical;
+
+ if (device->bdev == NULL) {
+ alloc_offsets[i] = WP_MISSING_DEV;
+ continue;
+ }
+
+ is_sequential = btrfs_dev_is_sequential(device, physical);
+ if (is_sequential)
+ num_sequential++;
+ else
+ num_conventional++;
+
+ if (!is_sequential) {
+ alloc_offsets[i] = WP_CONVENTIONAL;
+ continue;
+ }
+
+ /*
+ * This zone will be used for allocation, so mark this zone
+ * non-empty.
+ */
+ btrfs_dev_clear_zone_empty(device, physical);
+
+ down_read(&dev_replace->rwsem);
+ dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
+ btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical);
+ up_read(&dev_replace->rwsem);
+
+ /*
+ * The group is mapped to a sequential zone. Get the zone write
+ * pointer to determine the allocation offset within the zone.
+ */
+ WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
+ nofs_flag = memalloc_nofs_save();
+ ret = btrfs_get_dev_zone(device, physical, &zone);
+ memalloc_nofs_restore(nofs_flag);
+ if (ret == -EIO || ret == -EOPNOTSUPP) {
+ ret = 0;
+ alloc_offsets[i] = WP_MISSING_DEV;
+ continue;
+ } else if (ret) {
+ goto out;
+ }
+
+ switch (zone.cond) {
+ case BLK_ZONE_COND_OFFLINE:
+ case BLK_ZONE_COND_READONLY:
+ btrfs_err(fs_info,
+ "zoned: offline/readonly zone %llu on device %s (devid %llu)",
+ physical >> device->zone_info->zone_size_shift,
+ rcu_str_deref(device->name), device->devid);
+ alloc_offsets[i] = WP_MISSING_DEV;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ alloc_offsets[i] = 0;
+ break;
+ case BLK_ZONE_COND_FULL:
+ alloc_offsets[i] = fs_info->zone_size;
+ break;
+ default:
+ /* Partially used zone */
+ alloc_offsets[i] =
+ ((zone.wp - zone.start) << SECTOR_SHIFT);
+ break;
+ }
+ }
+
+ if (num_sequential > 0)
+ cache->seq_zone = true;
+
+ if (num_conventional > 0) {
+ /*
+ * Avoid calling calculate_alloc_pointer() for new BG. It
+ * is no use for new BG. It must be always 0.
+ *
+ * Also, we have a lock chain of extent buffer lock ->
+ * chunk mutex. For new BG, this function is called from
+ * btrfs_make_block_group() which is already taking the
+ * chunk mutex. Thus, we cannot call
+ * calculate_alloc_pointer() which takes extent buffer
+ * locks to avoid deadlock.
+ */
+ if (new) {
+ cache->alloc_offset = 0;
+ goto out;
+ }
+ ret = calculate_alloc_pointer(cache, &last_alloc);
+ if (ret || map->num_stripes == num_conventional) {
+ if (!ret)
+ cache->alloc_offset = last_alloc;
+ else
+ btrfs_err(fs_info,
+ "zoned: failed to determine allocation offset of bg %llu",
+ cache->start);
+ goto out;
+ }
+ }
+
+ switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ case 0: /* single */
+ cache->alloc_offset = alloc_offsets[0];
+ break;
+ case BTRFS_BLOCK_GROUP_DUP:
+ case BTRFS_BLOCK_GROUP_RAID1:
+ case BTRFS_BLOCK_GROUP_RAID0:
+ case BTRFS_BLOCK_GROUP_RAID10:
+ case BTRFS_BLOCK_GROUP_RAID5:
+ case BTRFS_BLOCK_GROUP_RAID6:
+ /* non-single profiles are not supported yet */
+ default:
+ btrfs_err(fs_info, "zoned: profile %s not yet supported",
+ btrfs_bg_type_to_raid_name(map->type));
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ /* An extent is allocated after the write pointer */
+ if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
+ btrfs_err(fs_info,
+ "zoned: got wrong write pointer in BG %llu: %llu > %llu",
+ logical, last_alloc, cache->alloc_offset);
+ ret = -EIO;
+ }
+
+ if (!ret)
+ cache->meta_write_pointer = cache->alloc_offset + cache->start;
+
+ kfree(alloc_offsets);
+ free_extent_map(em);
+
+ return ret;
+}
+
+void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
+{
+ u64 unusable, free;
+
+ if (!btrfs_is_zoned(cache->fs_info))
+ return;
+
+ WARN_ON(cache->bytes_super != 0);
+ unusable = cache->alloc_offset - cache->used;
+ free = cache->length - cache->alloc_offset;
+
+ /* We only need ->free_space in ALLOC_SEQ block groups */
+ cache->last_byte_to_unpin = (u64)-1;
+ cache->cached = BTRFS_CACHE_FINISHED;
+ cache->free_space_ctl->free_space = free;
+ cache->zone_unusable = unusable;
+
+ /* Should not have any excluded extents. Just in case, though */
+ btrfs_free_excluded_extents(cache);
+}
+
+void btrfs_redirty_list_add(struct btrfs_transaction *trans,
+ struct extent_buffer *eb)
+{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
+
+ if (!btrfs_is_zoned(fs_info) ||
+ btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) ||
+ !list_empty(&eb->release_list))
+ return;
+
+ set_extent_buffer_dirty(eb);
+ set_extent_bits_nowait(&trans->dirty_pages, eb->start,
+ eb->start + eb->len - 1, EXTENT_DIRTY);
+ memzero_extent_buffer(eb, 0, eb->len);
+ set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
+
+ spin_lock(&trans->releasing_ebs_lock);
+ list_add_tail(&eb->release_list, &trans->releasing_ebs);
+ spin_unlock(&trans->releasing_ebs_lock);
+ atomic_inc(&eb->refs);
+}
+
+void btrfs_free_redirty_list(struct btrfs_transaction *trans)
+{
+ spin_lock(&trans->releasing_ebs_lock);
+ while (!list_empty(&trans->releasing_ebs)) {
+ struct extent_buffer *eb;
+
+ eb = list_first_entry(&trans->releasing_ebs,
+ struct extent_buffer, release_list);
+ list_del_init(&eb->release_list);
+ free_extent_buffer(eb);
+ }
+ spin_unlock(&trans->releasing_ebs_lock);
+}
+
+bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct btrfs_block_group *cache;
+ bool ret = false;
+
+ if (!btrfs_is_zoned(fs_info))
+ return false;
+
+ if (!fs_info->max_zone_append_size)
+ return false;
+
+ if (!is_data_inode(&inode->vfs_inode))
+ return false;
+
+ cache = btrfs_lookup_block_group(fs_info, em->block_start);
+ ASSERT(cache);
+ if (!cache)
+ return false;
+
+ ret = cache->seq_zone;
+ btrfs_put_block_group(cache);
+
+ return ret;
+}
+
+void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
+ struct bio *bio)
+{
+ struct btrfs_ordered_extent *ordered;
+ const u64 physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+
+ if (bio_op(bio) != REQ_OP_ZONE_APPEND)
+ return;
+
+ ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), file_offset);
+ if (WARN_ON(!ordered))
+ return;
+
+ ordered->physical = physical;
+ ordered->disk = bio->bi_bdev->bd_disk;
+ ordered->partno = bio->bi_bdev->bd_partno;
+
+ btrfs_put_ordered_extent(ordered);
+}
+
+void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
+{
+ struct btrfs_inode *inode = BTRFS_I(ordered->inode);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+ struct btrfs_ordered_sum *sum;
+ struct block_device *bdev;
+ u64 orig_logical = ordered->disk_bytenr;
+ u64 *logical = NULL;
+ int nr, stripe_len;
+
+ /* Zoned devices should not have partitions. So, we can assume it is 0 */
+ ASSERT(ordered->partno == 0);
+ bdev = bdgrab(ordered->disk->part0);
+ if (WARN_ON(!bdev))
+ return;
+
+ if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev,
+ ordered->physical, &logical, &nr,
+ &stripe_len)))
+ goto out;
+
+ WARN_ON(nr != 1);
+
+ if (orig_logical == *logical)
+ goto out;
+
+ ordered->disk_bytenr = *logical;
+
+ em_tree = &inode->extent_tree;
+ write_lock(&em_tree->lock);
+ em = search_extent_mapping(em_tree, ordered->file_offset,
+ ordered->num_bytes);
+ em->block_start = *logical;
+ free_extent_map(em);
+ write_unlock(&em_tree->lock);
+
+ list_for_each_entry(sum, &ordered->list, list) {
+ if (*logical < orig_logical)
+ sum->bytenr -= orig_logical - *logical;
+ else
+ sum->bytenr += *logical - orig_logical;
+ }
+
+out:
+ kfree(logical);
+ bdput(bdev);
+}
+
+bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb,
+ struct btrfs_block_group **cache_ret)
+{
+ struct btrfs_block_group *cache;
+ bool ret = true;
+
+ if (!btrfs_is_zoned(fs_info))
+ return true;
+
+ cache = *cache_ret;
+
+ if (cache && (eb->start < cache->start ||
+ cache->start + cache->length <= eb->start)) {
+ btrfs_put_block_group(cache);
+ cache = NULL;
+ *cache_ret = NULL;
+ }
+
+ if (!cache)
+ cache = btrfs_lookup_block_group(fs_info, eb->start);
+
+ if (cache) {
+ if (cache->meta_write_pointer != eb->start) {
+ btrfs_put_block_group(cache);
+ cache = NULL;
+ ret = false;
+ } else {
+ cache->meta_write_pointer = eb->start + eb->len;
+ }
+
+ *cache_ret = cache;
+ }
+
+ return ret;
+}
+
+void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
+ struct extent_buffer *eb)
+{
+ if (!btrfs_is_zoned(eb->fs_info) || !cache)
+ return;
+
+ ASSERT(cache->meta_write_pointer == eb->start + eb->len);
+ cache->meta_write_pointer = eb->start;
+}
+
+int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
+{
+ if (!btrfs_dev_is_sequential(device, physical))
+ return -EOPNOTSUPP;
+
+ return blkdev_issue_zeroout(device->bdev, physical >> SECTOR_SHIFT,
+ length >> SECTOR_SHIFT, GFP_NOFS, 0);
+}
+
+static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
+ struct blk_zone *zone)
+{
+ struct btrfs_bio *bbio = NULL;
+ u64 mapped_length = PAGE_SIZE;
+ unsigned int nofs_flag;
+ int nmirrors;
+ int i, ret;
+
+ ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
+ &mapped_length, &bbio);
+ if (ret || !bbio || mapped_length < PAGE_SIZE) {
+ btrfs_put_bbio(bbio);
+ return -EIO;
+ }
+
+ if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK)
+ return -EINVAL;
+
+ nofs_flag = memalloc_nofs_save();
+ nmirrors = (int)bbio->num_stripes;
+ for (i = 0; i < nmirrors; i++) {
+ u64 physical = bbio->stripes[i].physical;
+ struct btrfs_device *dev = bbio->stripes[i].dev;
+
+ /* Missing device */
+ if (!dev->bdev)
+ continue;
+
+ ret = btrfs_get_dev_zone(dev, physical, zone);
+ /* Failing device */
+ if (ret == -EIO || ret == -EOPNOTSUPP)
+ continue;
+ break;
+ }
+ memalloc_nofs_restore(nofs_flag);
+
+ return ret;
+}
+
+/*
+ * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
+ * filling zeros between @physical_pos to a write pointer of dev-replace
+ * source device.
+ */
+int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
+ u64 physical_start, u64 physical_pos)
+{
+ struct btrfs_fs_info *fs_info = tgt_dev->fs_info;
+ struct blk_zone zone;
+ u64 length;
+ u64 wp;
+ int ret;
+
+ if (!btrfs_dev_is_sequential(tgt_dev, physical_pos))
+ return 0;
+
+ ret = read_zone_info(fs_info, logical, &zone);
+ if (ret)
+ return ret;
+
+ wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
+
+ if (physical_pos == wp)
+ return 0;
+
+ if (physical_pos > wp)
+ return -EUCLEAN;
+
+ length = wp - physical_pos;
+ return btrfs_zoned_issue_zeroout(tgt_dev, physical_pos, length);
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 8abe2f83272b..61e969652fe1 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -7,6 +7,7 @@
#include <linux/blkdev.h>
#include "volumes.h"
#include "disk-io.h"
+#include "block-group.h"
struct btrfs_zoned_device_info {
/*
@@ -25,6 +26,7 @@ struct btrfs_zoned_device_info {
#ifdef CONFIG_BLK_DEV_ZONED
int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone);
+int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info);
int btrfs_get_dev_zone_info(struct btrfs_device *device);
void btrfs_destroy_dev_zone_info(struct btrfs_device *device);
int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info);
@@ -35,6 +37,28 @@ int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw,
u64 *bytenr_ret);
void btrfs_advance_sb_log(struct btrfs_device *device, int mirror);
int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror);
+u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
+ u64 hole_end, u64 num_bytes);
+int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
+ u64 length, u64 *bytes);
+int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size);
+int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new);
+void btrfs_calc_zone_unusable(struct btrfs_block_group *cache);
+void btrfs_redirty_list_add(struct btrfs_transaction *trans,
+ struct extent_buffer *eb);
+void btrfs_free_redirty_list(struct btrfs_transaction *trans);
+bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
+void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
+ struct bio *bio);
+void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
+bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb,
+ struct btrfs_block_group **cache_ret);
+void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
+ struct extent_buffer *eb);
+int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
+int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
+ u64 physical_start, u64 physical_pos);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone)
@@ -42,6 +66,11 @@ static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
return 0;
}
+static inline int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
+{
+ return 0;
+}
+
static inline int btrfs_get_dev_zone_info(struct btrfs_device *device)
{
return 0;
@@ -85,6 +114,78 @@ static inline int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror
return 0;
}
+static inline u64 btrfs_find_allocatable_zones(struct btrfs_device *device,
+ u64 hole_start, u64 hole_end,
+ u64 num_bytes)
+{
+ return hole_start;
+}
+
+static inline int btrfs_reset_device_zone(struct btrfs_device *device,
+ u64 physical, u64 length, u64 *bytes)
+{
+ *bytes = 0;
+ return 0;
+}
+
+static inline int btrfs_ensure_empty_zones(struct btrfs_device *device,
+ u64 start, u64 size)
+{
+ return 0;
+}
+
+static inline int btrfs_load_block_group_zone_info(
+ struct btrfs_block_group *cache, bool new)
+{
+ return 0;
+}
+
+static inline void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) { }
+
+static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans,
+ struct extent_buffer *eb) { }
+static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { }
+
+static inline bool btrfs_use_zone_append(struct btrfs_inode *inode,
+ struct extent_map *em)
+{
+ return false;
+}
+
+static inline void btrfs_record_physical_zoned(struct inode *inode,
+ u64 file_offset, struct bio *bio)
+{
+}
+
+static inline void btrfs_rewrite_logical_zoned(
+ struct btrfs_ordered_extent *ordered) { }
+
+static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb,
+ struct btrfs_block_group **cache_ret)
+{
+ return true;
+}
+
+static inline void btrfs_revert_meta_write_pointer(
+ struct btrfs_block_group *cache,
+ struct extent_buffer *eb)
+{
+}
+
+static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device,
+ u64 physical, u64 length)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev,
+ u64 logical, u64 physical_start,
+ u64 physical_pos)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
@@ -136,12 +237,16 @@ static inline void btrfs_dev_clear_zone_empty(struct btrfs_device *device, u64 p
static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_info,
struct block_device *bdev)
{
- u64 zone_size;
-
if (btrfs_is_zoned(fs_info)) {
- zone_size = bdev_zone_sectors(bdev) << SECTOR_SHIFT;
- /* Do not allow non-zoned device */
- return bdev_is_zoned(bdev) && fs_info->zone_size == zone_size;
+ /*
+ * We can allow a regular device on a zoned filesystem, because
+ * we will emulate the zoned capabilities.
+ */
+ if (!bdev_is_zoned(bdev))
+ return true;
+
+ return fs_info->zone_size ==
+ (bdev_zone_sectors(bdev) << SECTOR_SHIFT);
}
/* Do not allow Host Manged zoned device */
@@ -157,4 +262,46 @@ static inline bool btrfs_check_super_location(struct btrfs_device *device, u64 p
return device->zone_info == NULL || !btrfs_dev_is_sequential(device, pos);
}
+static inline bool btrfs_can_zone_reset(struct btrfs_device *device,
+ u64 physical, u64 length)
+{
+ u64 zone_size;
+
+ if (!btrfs_dev_is_sequential(device, physical))
+ return false;
+
+ zone_size = device->zone_info->zone_size;
+ if (!IS_ALIGNED(physical, zone_size) || !IS_ALIGNED(length, zone_size))
+ return false;
+
+ return true;
+}
+
+static inline void btrfs_zoned_meta_io_lock(struct btrfs_fs_info *fs_info)
+{
+ if (!btrfs_is_zoned(fs_info))
+ return;
+ mutex_lock(&fs_info->zoned_meta_io_lock);
+}
+
+static inline void btrfs_zoned_meta_io_unlock(struct btrfs_fs_info *fs_info)
+{
+ if (!btrfs_is_zoned(fs_info))
+ return;
+ mutex_unlock(&fs_info->zoned_meta_io_lock);
+}
+
+static inline void btrfs_clear_treelog_bg(struct btrfs_block_group *bg)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+ if (!btrfs_is_zoned(fs_info))
+ return;
+
+ spin_lock(&fs_info->treelog_bg_lock);
+ if (fs_info->treelog_bg == bg->start)
+ fs_info->treelog_bg = 0;
+ spin_unlock(&fs_info->treelog_bg_lock);
+}
+
#endif
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 9a4871636c6c..8e9626d63976 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -688,10 +688,8 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
bytes = min_t(unsigned long, destlen - pg_offset,
workspace->out_buf.size - buf_offset);
- kaddr = kmap_atomic(dest_page);
- memcpy(kaddr + pg_offset, workspace->out_buf.dst + buf_offset,
- bytes);
- kunmap_atomic(kaddr);
+ memcpy_to_page(dest_page, pg_offset,
+ workspace->out_buf.dst + buf_offset, bytes);
pg_offset += bytes;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 32647d2011df..0cb7ffd4977c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -847,7 +847,8 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
if (retry)
gfp |= __GFP_NOFAIL;
- memcg = get_mem_cgroup_from_page(page);
+ /* The page lock pins the memcg */
+ memcg = page_memcg(page);
old_memcg = set_active_memcg(memcg);
head = NULL;
@@ -868,7 +869,6 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
}
out:
set_active_memcg(old_memcg);
- mem_cgroup_put(memcg);
return head;
/*
* In case anything failed, we just free everything we got.
@@ -2083,7 +2083,8 @@ static int __block_commit_write(struct inode *inode, struct page *page,
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
- clear_buffer_new(bh);
+ if (buffer_new(bh))
+ clear_buffer_new(bh);
block_start = block_end;
bh = bh->b_this_page;
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 4cea5fbf695e..5efa6a3702c0 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -470,14 +470,14 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
_debug("discard tail %llx", oi_size);
newattrs.ia_valid = ATTR_SIZE;
newattrs.ia_size = oi_size & PAGE_MASK;
- ret = notify_change(object->backer, &newattrs, NULL);
+ ret = notify_change(&init_user_ns, object->backer, &newattrs, NULL);
if (ret < 0)
goto truncate_failed;
}
newattrs.ia_valid = ATTR_SIZE;
newattrs.ia_size = ni_size;
- ret = notify_change(object->backer, &newattrs, NULL);
+ ret = notify_change(&init_user_ns, object->backer, &newattrs, NULL);
truncate_failed:
inode_unlock(d_inode(object->backer));
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index ecc8ecbbfa5a..7bf0732ae25c 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -311,7 +311,8 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
cachefiles_io_error(cache, "Unlink security error");
} else {
trace_cachefiles_unlink(object, rep, why);
- ret = vfs_unlink(d_inode(dir), rep, NULL);
+ ret = vfs_unlink(&init_user_ns, d_inode(dir), rep,
+ NULL);
if (preemptive)
cachefiles_mark_object_buried(cache, rep, why);
@@ -412,9 +413,16 @@ try_again:
if (ret < 0) {
cachefiles_io_error(cache, "Rename security error %d", ret);
} else {
+ struct renamedata rd = {
+ .old_mnt_userns = &init_user_ns,
+ .old_dir = d_inode(dir),
+ .old_dentry = rep,
+ .new_mnt_userns = &init_user_ns,
+ .new_dir = d_inode(cache->graveyard),
+ .new_dentry = grave,
+ };
trace_cachefiles_rename(object, rep, grave, why);
- ret = vfs_rename(d_inode(dir), rep,
- d_inode(cache->graveyard), grave, NULL, 0);
+ ret = vfs_rename(&rd);
if (ret != 0 && ret != -ENOMEM)
cachefiles_io_error(cache,
"Rename failed with error %d", ret);
@@ -561,7 +569,7 @@ lookup_again:
if (ret < 0)
goto create_error;
start = jiffies;
- ret = vfs_mkdir(d_inode(dir), next, 0);
+ ret = vfs_mkdir(&init_user_ns, d_inode(dir), next, 0);
cachefiles_hist(cachefiles_mkdir_histogram, start);
if (!key)
trace_cachefiles_mkdir(object, next, ret);
@@ -597,7 +605,8 @@ lookup_again:
if (ret < 0)
goto create_error;
start = jiffies;
- ret = vfs_create(d_inode(dir), next, S_IFREG, true);
+ ret = vfs_create(&init_user_ns, d_inode(dir), next,
+ S_IFREG, true);
cachefiles_hist(cachefiles_create_histogram, start);
trace_cachefiles_create(object, next, ret);
if (ret < 0)
@@ -791,7 +800,7 @@ retry:
ret = security_path_mkdir(&path, subdir, 0700);
if (ret < 0)
goto mkdir_error;
- ret = vfs_mkdir(d_inode(dir), subdir, 0700);
+ ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
if (ret < 0)
goto mkdir_error;
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 72e42438f3d7..a591b5e09637 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -39,8 +39,8 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
_enter("%p{%s}", object, type);
/* attempt to install a type label directly */
- ret = vfs_setxattr(dentry, cachefiles_xattr_cache, type, 2,
- XATTR_CREATE);
+ ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache, type,
+ 2, XATTR_CREATE);
if (ret == 0) {
_debug("SET"); /* we succeeded */
goto error;
@@ -54,7 +54,8 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
}
/* read the current type label */
- ret = vfs_getxattr(dentry, cachefiles_xattr_cache, xtype, 3);
+ ret = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache, xtype,
+ 3);
if (ret < 0) {
if (ret == -ERANGE)
goto bad_type_length;
@@ -110,9 +111,8 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object,
_debug("SET #%u", auxdata->len);
clear_bit(FSCACHE_COOKIE_AUX_UPDATED, &object->fscache.cookie->flags);
- ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
- &auxdata->type, auxdata->len,
- XATTR_CREATE);
+ ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
+ &auxdata->type, auxdata->len, XATTR_CREATE);
if (ret < 0 && ret != -ENOMEM)
cachefiles_io_error_obj(
object,
@@ -140,9 +140,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
_debug("SET #%u", auxdata->len);
clear_bit(FSCACHE_COOKIE_AUX_UPDATED, &object->fscache.cookie->flags);
- ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
- &auxdata->type, auxdata->len,
- XATTR_REPLACE);
+ ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
+ &auxdata->type, auxdata->len, XATTR_REPLACE);
if (ret < 0 && ret != -ENOMEM)
cachefiles_io_error_obj(
object,
@@ -171,7 +170,7 @@ int cachefiles_check_auxdata(struct cachefiles_object *object)
if (!auxbuf)
return -ENOMEM;
- xlen = vfs_getxattr(dentry, cachefiles_xattr_cache,
+ xlen = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
&auxbuf->type, 512 + 1);
ret = -ESTALE;
if (xlen < 1 ||
@@ -213,7 +212,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object,
}
/* read the current type label */
- ret = vfs_getxattr(dentry, cachefiles_xattr_cache,
+ ret = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
&auxbuf->type, 512 + 1);
if (ret < 0) {
if (ret == -ENODATA)
@@ -270,9 +269,9 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object,
}
/* update the current label */
- ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
- &auxdata->type, auxdata->len,
- XATTR_REPLACE);
+ ret = vfs_setxattr(&init_user_ns, dentry,
+ cachefiles_xattr_cache, &auxdata->type,
+ auxdata->len, XATTR_REPLACE);
if (ret < 0) {
cachefiles_io_error_obj(object,
"Can't update xattr on %lu"
@@ -309,7 +308,7 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
{
int ret;
- ret = vfs_removexattr(dentry, cachefiles_xattr_cache);
+ ret = vfs_removexattr(&init_user_ns, dentry, cachefiles_xattr_cache);
if (ret < 0) {
if (ret == -ENOENT || ret == -ENODATA)
ret = 0;
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index e0465741c591..529af59d9fd3 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -82,7 +82,8 @@ retry:
return acl;
}
-int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int ceph_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int ret = 0, size = 0;
const char *name = NULL;
@@ -100,7 +101,8 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- ret = posix_acl_update_mode(inode, &new_mode, &acl);
+ ret = posix_acl_update_mode(&init_user_ns, inode,
+ &new_mode, &acl);
if (ret)
goto out;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 950552944436..26e66436f005 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1662,7 +1662,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
inode, off, len, ceph_cap_string(got), ret);
- ceph_put_cap_refs(ci, got);
+ ceph_put_cap_refs_async(ci, got);
out_free:
ceph_restore_sigs(&oldset);
sb_end_pagefault(inode->i_sb);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 255a512f1277..570731c4d019 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3027,6 +3027,12 @@ static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
return 0;
}
+enum put_cap_refs_mode {
+ PUT_CAP_REFS_SYNC = 0,
+ PUT_CAP_REFS_NO_CHECK,
+ PUT_CAP_REFS_ASYNC,
+};
+
/*
* Release cap refs.
*
@@ -3037,10 +3043,11 @@ static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
* cap_snap, and wake up any waiters.
*/
static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
- bool skip_checking_caps)
+ enum put_cap_refs_mode mode)
{
struct inode *inode = &ci->vfs_inode;
int last = 0, put = 0, flushsnaps = 0, wake = 0;
+ bool check_flushsnaps = false;
spin_lock(&ci->i_ceph_lock);
if (had & CEPH_CAP_PIN)
@@ -3057,26 +3064,17 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
if (had & CEPH_CAP_FILE_BUFFER) {
if (--ci->i_wb_ref == 0) {
last++;
+ /* put the ref held by ceph_take_cap_refs() */
put++;
+ check_flushsnaps = true;
}
dout("put_cap_refs %p wb %d -> %d (?)\n",
inode, ci->i_wb_ref+1, ci->i_wb_ref);
}
- if (had & CEPH_CAP_FILE_WR)
+ if (had & CEPH_CAP_FILE_WR) {
if (--ci->i_wr_ref == 0) {
last++;
- if (__ceph_have_pending_cap_snap(ci)) {
- struct ceph_cap_snap *capsnap =
- list_last_entry(&ci->i_cap_snaps,
- struct ceph_cap_snap,
- ci_item);
- capsnap->writing = 0;
- if (ceph_try_drop_cap_snap(ci, capsnap))
- put++;
- else if (__ceph_finish_cap_snap(ci, capsnap))
- flushsnaps = 1;
- wake = 1;
- }
+ check_flushsnaps = true;
if (ci->i_wrbuffer_ref_head == 0 &&
ci->i_dirty_caps == 0 &&
ci->i_flushing_caps == 0) {
@@ -3088,15 +3086,42 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm)
drop_inode_snap_realm(ci);
}
+ }
+ if (check_flushsnaps && __ceph_have_pending_cap_snap(ci)) {
+ struct ceph_cap_snap *capsnap =
+ list_last_entry(&ci->i_cap_snaps,
+ struct ceph_cap_snap,
+ ci_item);
+
+ capsnap->writing = 0;
+ if (ceph_try_drop_cap_snap(ci, capsnap))
+ /* put the ref held by ceph_queue_cap_snap() */
+ put++;
+ else if (__ceph_finish_cap_snap(ci, capsnap))
+ flushsnaps = 1;
+ wake = 1;
+ }
spin_unlock(&ci->i_ceph_lock);
dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
last ? " last" : "", put ? " put" : "");
- if (last && !skip_checking_caps)
- ceph_check_caps(ci, 0, NULL);
- else if (flushsnaps)
- ceph_flush_snaps(ci, NULL);
+ switch (mode) {
+ case PUT_CAP_REFS_SYNC:
+ if (last)
+ ceph_check_caps(ci, 0, NULL);
+ else if (flushsnaps)
+ ceph_flush_snaps(ci, NULL);
+ break;
+ case PUT_CAP_REFS_ASYNC:
+ if (last)
+ ceph_queue_check_caps(inode);
+ else if (flushsnaps)
+ ceph_queue_flush_snaps(inode);
+ break;
+ default:
+ break;
+ }
if (wake)
wake_up_all(&ci->i_cap_wq);
while (put-- > 0)
@@ -3105,12 +3130,17 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
{
- __ceph_put_cap_refs(ci, had, false);
+ __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_SYNC);
+}
+
+void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had)
+{
+ __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC);
}
void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
{
- __ceph_put_cap_refs(ci, had, true);
+ __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_NO_CHECK);
}
/*
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 858ee7362ff5..83d9358854fb 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -823,8 +823,8 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
return PTR_ERR(result);
}
-static int ceph_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int ceph_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req;
@@ -878,14 +878,14 @@ out:
return err;
}
-static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int ceph_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
- return ceph_mknod(dir, dentry, mode, 0);
+ return ceph_mknod(mnt_userns, dir, dentry, mode, 0);
}
-static int ceph_symlink(struct inode *dir, struct dentry *dentry,
- const char *dest)
+static int ceph_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *dest)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req;
@@ -937,7 +937,8 @@ out:
return err;
}
-static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int ceph_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req;
@@ -1183,9 +1184,9 @@ out:
return err;
}
-static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int ceph_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old_dir->i_sb);
struct ceph_mds_request *req;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index adc8fc3c5d85..156f849f5385 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1816,60 +1816,17 @@ void ceph_async_iput(struct inode *inode)
}
}
-/*
- * Write back inode data in a worker thread. (This can't be done
- * in the message handler context.)
- */
-void ceph_queue_writeback(struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- set_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask);
-
- ihold(inode);
- if (queue_work(ceph_inode_to_client(inode)->inode_wq,
- &ci->i_work)) {
- dout("ceph_queue_writeback %p\n", inode);
- } else {
- dout("ceph_queue_writeback %p already queued, mask=%lx\n",
- inode, ci->i_work_mask);
- iput(inode);
- }
-}
-
-/*
- * queue an async invalidation
- */
-void ceph_queue_invalidate(struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- set_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask);
-
- ihold(inode);
- if (queue_work(ceph_inode_to_client(inode)->inode_wq,
- &ceph_inode(inode)->i_work)) {
- dout("ceph_queue_invalidate %p\n", inode);
- } else {
- dout("ceph_queue_invalidate %p already queued, mask=%lx\n",
- inode, ci->i_work_mask);
- iput(inode);
- }
-}
-
-/*
- * Queue an async vmtruncate. If we fail to queue work, we will handle
- * the truncation the next time we call __ceph_do_pending_vmtruncate.
- */
-void ceph_queue_vmtruncate(struct inode *inode)
+void ceph_queue_inode_work(struct inode *inode, int work_bit)
{
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
- set_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask);
+ set_bit(work_bit, &ci->i_work_mask);
ihold(inode);
- if (queue_work(ceph_inode_to_client(inode)->inode_wq,
- &ci->i_work)) {
- dout("ceph_queue_vmtruncate %p\n", inode);
+ if (queue_work(fsc->inode_wq, &ci->i_work)) {
+ dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
} else {
- dout("ceph_queue_vmtruncate %p already queued, mask=%lx\n",
+ dout("queue_inode_work %p already queued, mask=%lx\n",
inode, ci->i_work_mask);
iput(inode);
}
@@ -2008,6 +1965,12 @@ static void ceph_inode_work(struct work_struct *work)
if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
__ceph_do_pending_vmtruncate(inode);
+ if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
+ ceph_check_caps(ci, 0, NULL);
+
+ if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
+ ceph_flush_snaps(ci, NULL);
+
iput(inode);
}
@@ -2238,7 +2201,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
/*
* setattr
*/
-int ceph_setattr(struct dentry *dentry, struct iattr *attr)
+int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -2247,7 +2211,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
- err = setattr_prepare(dentry, attr);
+ err = setattr_prepare(&init_user_ns, dentry, attr);
if (err != 0)
return err;
@@ -2262,7 +2226,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
err = __ceph_setattr(inode, attr);
if (err >= 0 && (attr->ia_valid & ATTR_MODE))
- err = posix_acl_chmod(inode, attr->ia_mode);
+ err = posix_acl_chmod(&init_user_ns, inode, attr->ia_mode);
return err;
}
@@ -2321,7 +2285,8 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
* Check inode permissions. We verify we have a valid value for
* the AUTH cap, then call the generic handler.
*/
-int ceph_permission(struct inode *inode, int mask)
+int ceph_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
int err;
@@ -2331,7 +2296,7 @@ int ceph_permission(struct inode *inode, int mask)
err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
if (!err)
- err = generic_permission(inode, mask);
+ err = generic_permission(&init_user_ns, inode, mask);
return err;
}
@@ -2368,8 +2333,8 @@ static int statx_to_caps(u32 want, umode_t mode)
* Get all the attributes. If we have sufficient caps for the requested attrs,
* then we can avoid talking to the MDS at all.
*/
-int ceph_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -2385,7 +2350,7 @@ int ceph_getattr(const struct path *path, struct kstat *stat,
return err;
}
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
stat->ino = ceph_present_inode(inode);
/*
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index b611f829cb61..0728b01d4d43 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -623,6 +623,16 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
return 0;
}
+ /* Fb cap still in use, delay it */
+ if (ci->i_wb_ref) {
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
+ "used WRBUFFER, delaying\n", inode, capsnap,
+ capsnap->context, capsnap->context->seq,
+ ceph_cap_string(capsnap->dirty), capsnap->size);
+ capsnap->writing = 1;
+ return 0;
+ }
+
ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
inode, capsnap, capsnap->context,
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b62d8fee3b86..c48bb30c8d70 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -562,9 +562,11 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
/*
* Masks of ceph inode work.
*/
-#define CEPH_I_WORK_WRITEBACK 0 /* writeback */
-#define CEPH_I_WORK_INVALIDATE_PAGES 1 /* invalidate pages */
-#define CEPH_I_WORK_VMTRUNCATE 2 /* vmtruncate */
+#define CEPH_I_WORK_WRITEBACK 0
+#define CEPH_I_WORK_INVALIDATE_PAGES 1
+#define CEPH_I_WORK_VMTRUNCATE 2
+#define CEPH_I_WORK_CHECK_CAPS 3
+#define CEPH_I_WORK_FLUSH_SNAPS 4
/*
* We set the ERROR_WRITE bit when we start seeing write errors on an inode
@@ -962,21 +964,49 @@ extern int ceph_inode_holds_cap(struct inode *inode, int mask);
extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
extern void __ceph_do_pending_vmtruncate(struct inode *inode);
-extern void ceph_queue_vmtruncate(struct inode *inode);
-extern void ceph_queue_invalidate(struct inode *inode);
-extern void ceph_queue_writeback(struct inode *inode);
+
extern void ceph_async_iput(struct inode *inode);
+void ceph_queue_inode_work(struct inode *inode, int work_bit);
+
+static inline void ceph_queue_vmtruncate(struct inode *inode)
+{
+ ceph_queue_inode_work(inode, CEPH_I_WORK_VMTRUNCATE);
+}
+
+static inline void ceph_queue_invalidate(struct inode *inode)
+{
+ ceph_queue_inode_work(inode, CEPH_I_WORK_INVALIDATE_PAGES);
+}
+
+static inline void ceph_queue_writeback(struct inode *inode)
+{
+ ceph_queue_inode_work(inode, CEPH_I_WORK_WRITEBACK);
+}
+
+static inline void ceph_queue_check_caps(struct inode *inode)
+{
+ ceph_queue_inode_work(inode, CEPH_I_WORK_CHECK_CAPS);
+}
+
+static inline void ceph_queue_flush_snaps(struct inode *inode)
+{
+ ceph_queue_inode_work(inode, CEPH_I_WORK_FLUSH_SNAPS);
+}
+
extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
int mask, bool force);
static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
{
return __ceph_do_getattr(inode, NULL, mask, force);
}
-extern int ceph_permission(struct inode *inode, int mask);
+extern int ceph_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask);
extern int __ceph_setattr(struct inode *inode, struct iattr *attr);
-extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
-extern int ceph_getattr(const struct path *path, struct kstat *stat,
+extern int ceph_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr);
+extern int ceph_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
/* xattr.c */
@@ -1037,7 +1067,8 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx);
#ifdef CONFIG_CEPH_FS_POSIX_ACL
struct posix_acl *ceph_get_acl(struct inode *, int);
-int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int ceph_set_acl(struct user_namespace *mnt_userns,
+ struct inode *inode, struct posix_acl *acl, int type);
int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
struct ceph_acl_sec_ctx *as_ctx);
void ceph_init_inode_acls(struct inode *inode,
@@ -1105,6 +1136,7 @@ extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
bool snap_rwsem_locked);
extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
+extern void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had);
extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 24997982de01..02f59bcb4f27 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1238,6 +1238,7 @@ static int ceph_get_xattr_handler(const struct xattr_handler *handler,
}
static int ceph_set_xattr_handler(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index b231dcf1d1f9..88a7958170ee 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -133,11 +133,12 @@ cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan)
{
struct TCP_Server_Info *server = chan->server;
- seq_printf(m, "\t\tChannel %d Number of credits: %d Dialect 0x%x "
- "TCP status: %d Instance: %d Local Users To Server: %d "
- "SecMode: 0x%x Req On Wire: %d In Send: %d "
- "In MaxReq Wait: %d\n",
- i+1,
+ seq_printf(m, "\n\n\t\tChannel: %d ConnectionId: 0x%llx"
+ "\n\t\tNumber of credits: %d Dialect 0x%x"
+ "\n\t\tTCP status: %d Instance: %d"
+ "\n\t\tLocal Users To Server: %d SecMode: 0x%x Req On Wire: %d"
+ "\n\t\tIn Send: %d In MaxReq Wait: %d",
+ i+1, server->conn_id,
server->credits,
server->dialect,
server->tcpStatus,
@@ -197,16 +198,16 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
cfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
seq_printf(m,
- "0x%x 0x%llx 0x%x %d %d %d %s",
+ "0x%x 0x%llx 0x%x %d %d %d %pd",
tcon->tid,
cfile->fid.persistent_fid,
cfile->f_flags,
cfile->count,
cfile->pid,
from_kuid(&init_user_ns, cfile->uid),
- cfile->dentry->d_name.name);
+ cfile->dentry);
#ifdef CONFIG_CIFS_DEBUG2
- seq_printf(m, " 0x%llx\n", cfile->fid.mid);
+ seq_printf(m, " %llu\n", cfile->fid.mid);
#else
seq_printf(m, "\n");
#endif /* CIFS_DEBUG2 */
@@ -227,7 +228,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- int i, j;
+ int c, i, j;
seq_puts(m,
"Display Internal CIFS Data Structures for Debugging\n"
@@ -275,14 +276,25 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_putc(m, '\n');
seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
- seq_printf(m, "Servers:");
- i = 0;
+ seq_printf(m, "\nServers: ");
+
+ c = 0;
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+ /* channel info will be printed as a part of sessions below */
+ if (server->is_channel)
+ continue;
+
+ c++;
+ seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
+ c, server->conn_id);
+
+ if (server->hostname)
+ seq_printf(m, "Hostname: %s ", server->hostname);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (!server->rdma)
goto skip_rdma;
@@ -362,46 +374,48 @@ skip_rdma:
if (server->posix_ext_supported)
seq_printf(m, " posix");
- i++;
+ if (server->rdma)
+ seq_printf(m, "\nRDMA ");
+ seq_printf(m, "\nTCP status: %d Instance: %d"
+ "\nLocal Users To Server: %d SecMode: 0x%x Req On Wire: %d",
+ server->tcpStatus,
+ server->reconnect_instance,
+ server->srv_count,
+ server->sec_mode, in_flight(server));
+
+ seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d",
+ atomic_read(&server->in_send),
+ atomic_read(&server->num_waiters));
+
+ seq_printf(m, "\n\n\tSessions: ");
+ i = 0;
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
+ i++;
if ((ses->serverDomain == NULL) ||
(ses->serverOS == NULL) ||
(ses->serverNOS == NULL)) {
- seq_printf(m, "\n%d) Name: %s Uses: %d Capability: 0x%x\tSession Status: %d ",
- i, ses->serverName, ses->ses_count,
+ seq_printf(m, "\n\t%d) Address: %s Uses: %d Capability: 0x%x\tSession Status: %d ",
+ i, ses->ip_addr, ses->ses_count,
ses->capabilities, ses->status);
if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
- seq_printf(m, "Guest\t");
+ seq_printf(m, "Guest ");
else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
- seq_printf(m, "Anonymous\t");
+ seq_printf(m, "Anonymous ");
} else {
seq_printf(m,
- "\n%d) Name: %s Domain: %s Uses: %d OS:"
- " %s\n\tNOS: %s\tCapability: 0x%x\n\tSMB"
- " session status: %d ",
- i, ses->serverName, ses->serverDomain,
+ "\n\t%d) Name: %s Domain: %s Uses: %d OS: %s "
+ "\n\tNOS: %s\tCapability: 0x%x"
+ "\n\tSMB session status: %d ",
+ i, ses->ip_addr, ses->serverDomain,
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->status);
}
- seq_printf(m,"Security type: %s\n",
+ seq_printf(m, "\n\tSecurity type: %s ",
get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
- if (server->rdma)
- seq_printf(m, "RDMA\n\t");
- seq_printf(m, "TCP status: %d Instance: %d\n\tLocal Users To "
- "Server: %d SecMode: 0x%x Req On Wire: %d",
- server->tcpStatus,
- server->reconnect_instance,
- server->srv_count,
- server->sec_mode, in_flight(server));
-
- seq_printf(m, " In Send: %d In MaxReq Wait: %d",
- atomic_read(&server->in_send),
- atomic_read(&server->num_waiters));
-
/* dump session id helpful for use with network trace */
seq_printf(m, " SessionId: 0x%llx", ses->Suid);
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
@@ -414,13 +428,13 @@ skip_rdma:
from_kuid(&init_user_ns, ses->cred_uid));
if (ses->chan_count > 1) {
- seq_printf(m, "\n\n\tExtra Channels: %zu\n",
+ seq_printf(m, "\n\n\tExtra Channels: %zu ",
ses->chan_count-1);
for (j = 1; j < ses->chan_count; j++)
cifs_dump_channel(m, j, &ses->chans[j]);
}
- seq_puts(m, "\n\n\tShares:");
+ seq_puts(m, "\n\n\tShares: ");
j = 0;
seq_printf(m, "\n\t%d) IPC: ", j);
@@ -437,38 +451,43 @@ skip_rdma:
cifs_debug_tcon(m, tcon);
}
- seq_puts(m, "\n\tMIDs:\n");
-
- spin_lock(&GlobalMid_Lock);
- list_for_each(tmp3, &server->pending_mid_q) {
- mid_entry = list_entry(tmp3, struct mid_q_entry,
- qhead);
- seq_printf(m, "\tState: %d com: %d pid:"
- " %d cbdata: %p mid %llu\n",
- mid_entry->mid_state,
- le16_to_cpu(mid_entry->command),
- mid_entry->pid,
- mid_entry->callback_data,
- mid_entry->mid);
- }
- spin_unlock(&GlobalMid_Lock);
-
spin_lock(&ses->iface_lock);
if (ses->iface_count)
- seq_printf(m, "\n\tServer interfaces: %zu\n",
+ seq_printf(m, "\n\n\tServer interfaces: %zu",
ses->iface_count);
for (j = 0; j < ses->iface_count; j++) {
struct cifs_server_iface *iface;
iface = &ses->iface_list[j];
- seq_printf(m, "\t%d)", j);
+ seq_printf(m, "\n\t%d)", j+1);
cifs_dump_iface(m, iface);
if (is_ses_using_iface(ses, iface))
seq_puts(m, "\t\t[CONNECTED]\n");
}
spin_unlock(&ses->iface_lock);
}
+ if (i == 0)
+ seq_printf(m, "\n\t\t[NONE]");
+
+ seq_puts(m, "\n\n\tMIDs: ");
+ spin_lock(&GlobalMid_Lock);
+ list_for_each(tmp3, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp3, struct mid_q_entry,
+ qhead);
+ seq_printf(m, "\n\tState: %d com: %d pid:"
+ " %d cbdata: %p mid %llu\n",
+ mid_entry->mid_state,
+ le16_to_cpu(mid_entry->command),
+ mid_entry->pid,
+ mid_entry->callback_data,
+ mid_entry->mid);
+ }
+ spin_unlock(&GlobalMid_Lock);
+ seq_printf(m, "\n--\n");
}
+ if (c == 0)
+ seq_printf(m, "\n\t[NONE]");
+
spin_unlock(&cifs_tcp_ses_lock);
seq_putc(m, '\n');
diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
index d35f599aa00e..d829b8bf833e 100644
--- a/fs/cifs/cifs_swn.c
+++ b/fs/cifs/cifs_swn.c
@@ -248,7 +248,7 @@ nlmsg_fail:
/*
* Try to find a matching registration for the tcon's server name and share name.
- * Calls to this funciton must be protected by cifs_swnreg_idr_mutex.
+ * Calls to this function must be protected by cifs_swnreg_idr_mutex.
* TODO Try to avoid memory allocations
*/
static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
@@ -272,7 +272,7 @@ static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
if (IS_ERR(share_name)) {
int ret;
- ret = PTR_ERR(net_name);
+ ret = PTR_ERR(share_name);
cifs_dbg(VFS, "%s: failed to extract share name from target '%s': %d\n",
__func__, tcon->treeName, ret);
kfree(net_name);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 562913e2b3f2..2be22a5c690f 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -267,10 +267,11 @@ is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
return true; /* well known sid found, uid returned */
}
-static void
+static __u16
cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
{
int i;
+ __u16 size = 1 + 1 + 6;
dst->revision = src->revision;
dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES);
@@ -278,6 +279,9 @@ cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
dst->authority[i] = src->authority[i];
for (i = 0; i < dst->num_subauth; ++i)
dst->sub_auth[i] = src->sub_auth[i];
+ size += (dst->num_subauth * 4);
+
+ return size;
}
static int
@@ -521,8 +525,11 @@ exit_cifs_idmap(void)
}
/* copy ntsd, owner sid, and group sid from a security descriptor to another */
-static void copy_sec_desc(const struct cifs_ntsd *pntsd,
- struct cifs_ntsd *pnntsd, __u32 sidsoffset)
+static __u32 copy_sec_desc(const struct cifs_ntsd *pntsd,
+ struct cifs_ntsd *pnntsd,
+ __u32 sidsoffset,
+ struct cifs_sid *pownersid,
+ struct cifs_sid *pgrpsid)
{
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
@@ -536,19 +543,25 @@ static void copy_sec_desc(const struct cifs_ntsd *pntsd,
pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
/* copy owner sid */
- owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ if (pownersid)
+ owner_sid_ptr = pownersid;
+ else
+ owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
/* copy group sid */
- group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ if (pgrpsid)
+ group_sid_ptr = pgrpsid;
+ else
+ group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset));
ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
sizeof(struct cifs_sid));
cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
- return;
+ return sidsoffset + (2 * sizeof(struct cifs_sid));
}
@@ -663,6 +676,25 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
return;
}
+static __u16 cifs_copy_ace(struct cifs_ace *dst, struct cifs_ace *src, struct cifs_sid *psid)
+{
+ __u16 size = 1 + 1 + 2 + 4;
+
+ dst->type = src->type;
+ dst->flags = src->flags;
+ dst->access_req = src->access_req;
+
+ /* Check if there's a replacement sid specified */
+ if (psid)
+ size += cifs_copy_sid(&dst->sid, psid);
+ else
+ size += cifs_copy_sid(&dst->sid, &src->sid);
+
+ dst->size = cpu_to_le16(size);
+
+ return size;
+}
+
static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
const struct cifs_sid *psid, __u64 nmode,
umode_t bits, __u8 access_type,
@@ -907,29 +939,30 @@ unsigned int setup_special_user_owner_ACE(struct cifs_ace *pntace)
return ace_size;
}
-static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
- struct cifs_sid *pgrpsid, __u64 *pnmode, bool modefromsid)
+static void populate_new_aces(char *nacl_base,
+ struct cifs_sid *pownersid,
+ struct cifs_sid *pgrpsid,
+ __u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
+ bool modefromsid)
{
- u16 size = 0;
- u32 num_aces = 0;
- struct cifs_acl *pnndacl;
__u64 nmode;
+ u32 num_aces = 0;
+ u16 nsize = 0;
__u64 user_mode;
__u64 group_mode;
__u64 other_mode;
__u64 deny_user_mode = 0;
__u64 deny_group_mode = 0;
bool sticky_set = false;
-
- pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
+ struct cifs_ace *pnntace = NULL;
nmode = *pnmode;
+ num_aces = *pnum_aces;
+ nsize = *pnsize;
if (modefromsid) {
- struct cifs_ace *pntace =
- (struct cifs_ace *)((char *)pnndacl + size);
-
- size += setup_special_mode_ACE(pntace, nmode);
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += setup_special_mode_ACE(pnntace, nmode);
num_aces++;
goto set_size;
}
@@ -966,40 +999,173 @@ static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
sticky_set = true;
if (deny_user_mode) {
- size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
- pownersid, deny_user_mode, 0700, ACCESS_DENIED, false);
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pownersid, deny_user_mode,
+ 0700, ACCESS_DENIED, false);
num_aces++;
}
+
/* Group DENY ACE does not conflict with owner ALLOW ACE. Keep in preferred order*/
if (deny_group_mode && !(deny_group_mode & (user_mode >> 3))) {
- size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
- pgrpsid, deny_group_mode, 0070, ACCESS_DENIED, false);
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
+ 0070, ACCESS_DENIED, false);
num_aces++;
}
- size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
- pownersid, user_mode, 0700, ACCESS_ALLOWED, true);
+
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pownersid, user_mode,
+ 0700, ACCESS_ALLOWED, true);
num_aces++;
+
/* Group DENY ACE conflicts with owner ALLOW ACE. So keep it after. */
if (deny_group_mode && (deny_group_mode & (user_mode >> 3))) {
- size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
- pgrpsid, deny_group_mode, 0070, ACCESS_DENIED, false);
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
+ 0070, ACCESS_DENIED, false);
num_aces++;
}
- size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
- pgrpsid, group_mode, 0070, ACCESS_ALLOWED, !sticky_set);
+
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pgrpsid, group_mode,
+ 0070, ACCESS_ALLOWED, !sticky_set);
num_aces++;
- size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
- &sid_everyone, other_mode, 0007, ACCESS_ALLOWED, !sticky_set);
+
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, &sid_everyone, other_mode,
+ 0007, ACCESS_ALLOWED, !sticky_set);
num_aces++;
set_size:
+ *pnum_aces = num_aces;
+ *pnsize = nsize;
+}
+
+static __u16 replace_sids_and_copy_aces(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+ struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
+ struct cifs_sid *pnownersid, struct cifs_sid *pngrpsid)
+{
+ int i;
+ u16 size = 0;
+ struct cifs_ace *pntace = NULL;
+ char *acl_base = NULL;
+ u32 src_num_aces = 0;
+ u16 nsize = 0;
+ struct cifs_ace *pnntace = NULL;
+ char *nacl_base = NULL;
+ u16 ace_size = 0;
+
+ acl_base = (char *)pdacl;
+ size = sizeof(struct cifs_acl);
+ src_num_aces = le32_to_cpu(pdacl->num_aces);
+
+ nacl_base = (char *)pndacl;
+ nsize = sizeof(struct cifs_acl);
+
+ /* Go through all the ACEs */
+ for (i = 0; i < src_num_aces; ++i) {
+ pntace = (struct cifs_ace *) (acl_base + size);
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+
+ if (pnownersid && compare_sids(&pntace->sid, pownersid) == 0)
+ ace_size = cifs_copy_ace(pnntace, pntace, pnownersid);
+ else if (pngrpsid && compare_sids(&pntace->sid, pgrpsid) == 0)
+ ace_size = cifs_copy_ace(pnntace, pntace, pngrpsid);
+ else
+ ace_size = cifs_copy_ace(pnntace, pntace, NULL);
+
+ size += le16_to_cpu(pntace->size);
+ nsize += ace_size;
+ }
+
+ return nsize;
+}
+
+static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+ struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
+ __u64 *pnmode, bool mode_from_sid)
+{
+ int i;
+ u16 size = 0;
+ struct cifs_ace *pntace = NULL;
+ char *acl_base = NULL;
+ u32 src_num_aces = 0;
+ u16 nsize = 0;
+ struct cifs_ace *pnntace = NULL;
+ char *nacl_base = NULL;
+ u32 num_aces = 0;
+ __u64 nmode;
+ bool new_aces_set = false;
+
+ /* Assuming that pndacl and pnmode are never NULL */
+ nmode = *pnmode;
+ nacl_base = (char *)pndacl;
+ nsize = sizeof(struct cifs_acl);
+
+ /* If pdacl is NULL, we don't have a src. Simply populate new ACL. */
+ if (!pdacl) {
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+ mode_from_sid);
+ goto finalize_dacl;
+ }
+
+ acl_base = (char *)pdacl;
+ size = sizeof(struct cifs_acl);
+ src_num_aces = le32_to_cpu(pdacl->num_aces);
+
+ /* Retain old ACEs which we can retain */
+ for (i = 0; i < src_num_aces; ++i) {
+ pntace = (struct cifs_ace *) (acl_base + size);
+
+ if (!new_aces_set && (pntace->flags & INHERITED_ACE)) {
+ /* Place the new ACEs in between existing explicit and inherited */
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+ mode_from_sid);
+
+ new_aces_set = true;
+ }
+
+ /* If it's any one of the ACE we're replacing, skip! */
+ if (!mode_from_sid &&
+ ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
+ (compare_sids(&pntace->sid, pownersid) == 0) ||
+ (compare_sids(&pntace->sid, pgrpsid) == 0) ||
+ (compare_sids(&pntace->sid, &sid_everyone) == 0) ||
+ (compare_sids(&pntace->sid, &sid_authusers) == 0))) {
+ goto next_ace;
+ }
+
+ /* update the pointer to the next ACE to populate*/
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+
+ nsize += cifs_copy_ace(pnntace, pntace, NULL);
+ num_aces++;
+
+next_ace:
+ size += le16_to_cpu(pntace->size);
+ }
+
+ /* If inherited ACEs are not present, place the new ones at the tail */
+ if (!new_aces_set) {
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+ mode_from_sid);
+
+ new_aces_set = true;
+ }
+
+finalize_dacl:
pndacl->num_aces = cpu_to_le32(num_aces);
- pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
+ pndacl->size = cpu_to_le16(nsize);
return 0;
}
-
static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
{
/* BB need to add parm so we can store the SID BB */
@@ -1094,7 +1260,7 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
/* Convert permission bits from mode to equivalent CIFS ACL */
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
- __u32 secdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid,
+ __u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid,
bool mode_from_sid, bool id_from_sid, int *aclflag)
{
int rc = 0;
@@ -1102,39 +1268,59 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
__u32 ndacloffset;
__u32 sidsoffset;
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
- struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
+ struct cifs_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL;
struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
+ char *end_of_acl = ((char *)pntsd) + secdesclen;
+ u16 size = 0;
- if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
- owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
- le32_to_cpu(pntsd->osidoffset));
- group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
- le32_to_cpu(pntsd->gsidoffset));
- dacloffset = le32_to_cpu(pntsd->dacloffset);
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+ if (dacloffset) {
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
+ if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) {
+ cifs_dbg(VFS, "Server returned illegal ACL size\n");
+ return -EINVAL;
+ }
+ }
+
+ owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+ group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->gsidoffset));
+
+ if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
ndacloffset = sizeof(struct cifs_ntsd);
ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
- ndacl_ptr->revision = dacl_ptr->revision;
- ndacl_ptr->size = 0;
- ndacl_ptr->num_aces = 0;
+ ndacl_ptr->revision =
+ dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
- rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
+ ndacl_ptr->size = cpu_to_le16(0);
+ ndacl_ptr->num_aces = cpu_to_le32(0);
+
+ rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
pnmode, mode_from_sid);
+
sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
- /* copy sec desc control portion & owner and group sids */
- copy_sec_desc(pntsd, pnntsd, sidsoffset);
- *aclflag = CIFS_ACL_DACL;
+ /* copy the non-dacl portion of secdesc */
+ *pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset,
+ NULL, NULL);
+
+ *aclflag |= CIFS_ACL_DACL;
} else {
- memcpy(pnntsd, pntsd, secdesclen);
+ ndacloffset = sizeof(struct cifs_ntsd);
+ ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
+ ndacl_ptr->revision =
+ dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
+ ndacl_ptr->num_aces = dacl_ptr->num_aces;
+
if (uid_valid(uid)) { /* chown */
uid_t id;
- owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
- le32_to_cpu(pnntsd->osidoffset));
nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
- if (!nowner_sid_ptr)
- return -ENOMEM;
+ if (!nowner_sid_ptr) {
+ rc = -ENOMEM;
+ goto chown_chgrp_exit;
+ }
id = from_kuid(&init_user_ns, uid);
if (id_from_sid) {
struct owner_sid *osid = (struct owner_sid *)nowner_sid_ptr;
@@ -1145,27 +1331,25 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
osid->SubAuthorities[0] = cpu_to_le32(88);
osid->SubAuthorities[1] = cpu_to_le32(1);
osid->SubAuthorities[2] = cpu_to_le32(id);
+
} else { /* lookup sid with upcall */
rc = id_to_sid(id, SIDOWNER, nowner_sid_ptr);
if (rc) {
cifs_dbg(FYI, "%s: Mapping error %d for owner id %d\n",
__func__, rc, id);
- kfree(nowner_sid_ptr);
- return rc;
+ goto chown_chgrp_exit;
}
}
- cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
- kfree(nowner_sid_ptr);
- *aclflag = CIFS_ACL_OWNER;
+ *aclflag |= CIFS_ACL_OWNER;
}
if (gid_valid(gid)) { /* chgrp */
gid_t id;
- group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
- le32_to_cpu(pnntsd->gsidoffset));
ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
- if (!ngroup_sid_ptr)
- return -ENOMEM;
+ if (!ngroup_sid_ptr) {
+ rc = -ENOMEM;
+ goto chown_chgrp_exit;
+ }
id = from_kgid(&init_user_ns, gid);
if (id_from_sid) {
struct owner_sid *gsid = (struct owner_sid *)ngroup_sid_ptr;
@@ -1176,19 +1360,35 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
gsid->SubAuthorities[0] = cpu_to_le32(88);
gsid->SubAuthorities[1] = cpu_to_le32(2);
gsid->SubAuthorities[2] = cpu_to_le32(id);
+
} else { /* lookup sid with upcall */
rc = id_to_sid(id, SIDGROUP, ngroup_sid_ptr);
if (rc) {
cifs_dbg(FYI, "%s: Mapping error %d for group id %d\n",
__func__, rc, id);
- kfree(ngroup_sid_ptr);
- return rc;
+ goto chown_chgrp_exit;
}
}
- cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
- kfree(ngroup_sid_ptr);
- *aclflag = CIFS_ACL_GROUP;
+ *aclflag |= CIFS_ACL_GROUP;
+ }
+
+ if (dacloffset) {
+ /* Replace ACEs for old owner with new one */
+ size = replace_sids_and_copy_aces(dacl_ptr, ndacl_ptr,
+ owner_sid_ptr, group_sid_ptr,
+ nowner_sid_ptr, ngroup_sid_ptr);
+ ndacl_ptr->size = cpu_to_le16(size);
}
+
+ sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
+ /* copy the non-dacl portion of secdesc */
+ *pnsecdesclen = copy_sec_desc(pntsd, pnntsd, sidsoffset,
+ nowner_sid_ptr, ngroup_sid_ptr);
+
+chown_chgrp_exit:
+ /* errors could jump here. So make sure we return soon after this */
+ kfree(nowner_sid_ptr);
+ kfree(ngroup_sid_ptr);
}
return rc;
@@ -1384,6 +1584,9 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
int rc = 0;
int aclflag = CIFS_ACL_DACL; /* default flag to set */
__u32 secdesclen = 0;
+ __u32 nsecdesclen = 0;
+ __u32 dacloffset = 0;
+ struct cifs_acl *dacl_ptr = NULL;
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -1414,31 +1617,52 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
return rc;
}
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
+ mode_from_sid = true;
+ else
+ mode_from_sid = false;
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
+ id_from_sid = true;
+ else
+ id_from_sid = false;
+
+ /* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */
+ nsecdesclen = secdesclen;
+ if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
+ if (mode_from_sid)
+ nsecdesclen += sizeof(struct cifs_ace);
+ else /* cifsacl */
+ nsecdesclen += 5 * sizeof(struct cifs_ace);
+ } else { /* chown */
+ /* When ownership changes, changes new owner sid length could be different */
+ nsecdesclen = sizeof(struct cifs_ntsd) + (sizeof(struct cifs_sid) * 2);
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+ if (dacloffset) {
+ dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
+ if (mode_from_sid)
+ nsecdesclen +=
+ le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct cifs_ace);
+ else /* cifsacl */
+ nsecdesclen += le16_to_cpu(dacl_ptr->size);
+ }
+ }
+
/*
* Add three ACEs for owner, group, everyone getting rid of other ACEs
* as chmod disables ACEs and set the security descriptor. Allocate
* memory for the smb header, set security descriptor request security
* descriptor parameters, and secuirty descriptor itself
*/
- secdesclen = max_t(u32, secdesclen, DEFAULT_SEC_DESC_LEN);
- pnntsd = kmalloc(secdesclen, GFP_KERNEL);
+ nsecdesclen = max_t(u32, nsecdesclen, DEFAULT_SEC_DESC_LEN);
+ pnntsd = kmalloc(nsecdesclen, GFP_KERNEL);
if (!pnntsd) {
kfree(pntsd);
cifs_put_tlink(tlink);
return -ENOMEM;
}
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
- mode_from_sid = true;
- else
- mode_from_sid = false;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
- id_from_sid = true;
- else
- id_from_sid = false;
-
- rc = build_sec_desc(pntsd, pnntsd, secdesclen, pnmode, uid, gid,
+ rc = build_sec_desc(pntsd, pnntsd, secdesclen, &nsecdesclen, pnmode, uid, gid,
mode_from_sid, id_from_sid, &aclflag);
cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
@@ -1448,7 +1672,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
if (!rc) {
/* Set the security descriptor */
- rc = ops->set_acl(pnntsd, secdesclen, inode, path, aclflag);
+ rc = ops->set_acl(pnntsd, nsecdesclen, inode, path, aclflag);
cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
}
cifs_put_tlink(tlink);
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index ff7fd0862e28..d9e704979d99 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -31,8 +31,8 @@
#define EXEC_BIT 0x1
#define ACL_OWNER_MASK 0700
-#define ACL_GROUP_MASK 0770
-#define ACL_EVERYONE_MASK 0777
+#define ACL_GROUP_MASK 0070
+#define ACL_EVERYONE_MASK 0007
#define UBITSHIFT 6
#define GBITSHIFT 3
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 51d53e4bdf6b..b8f1ff9a83f3 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -568,15 +568,15 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
return rc;
}
} else {
- /* We use ses->serverName if no domain name available */
- len = strlen(ses->serverName);
+ /* We use ses->ip_addr if no domain name available */
+ len = strlen(ses->ip_addr);
server = kmalloc(2 + (len * 2), GFP_KERNEL);
if (server == NULL) {
rc = -ENOMEM;
return rc;
}
- len = cifs_strtoUTF16((__le16 *)server, ses->serverName, len,
+ len = cifs_strtoUTF16((__le16 *)server, ses->ip_addr, len,
nls_cp);
rc =
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index ab883e84e116..099ad9f3660b 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -290,7 +290,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
free_xid(xid);
- return 0;
+ return rc;
}
static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
@@ -305,7 +305,8 @@ static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
return -EOPNOTSUPP;
}
-static int cifs_permission(struct inode *inode, int mask)
+static int cifs_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
struct cifs_sb_info *cifs_sb;
@@ -320,7 +321,7 @@ static int cifs_permission(struct inode *inode, int mask)
on the client (above and beyond ACL on servers) for
servers which do not support setting and viewing mode bits,
so allowing client to check permissions is useful */
- return generic_permission(inode, mask);
+ return generic_permission(&init_user_ns, inode, mask);
}
static struct kmem_cache *cifs_inode_cachep;
@@ -637,8 +638,18 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
if (tcon->handle_timeout)
seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
- /* convert actimeo and display it in seconds */
- seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->actimeo / HZ);
+
+ /*
+ * Display file and directory attribute timeout in seconds.
+ * If file and directory attribute timeout the same then actimeo
+ * was likely specified on mount
+ */
+ if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
+ seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
+ else {
+ seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
+ seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
+ }
if (tcon->ses->chan_max > 1)
seq_printf(s, ",multichannel,max_channels=%zu",
@@ -1525,6 +1536,7 @@ init_cifs(void)
*/
atomic_set(&sesInfoAllocCount, 0);
atomic_set(&tconInfoAllocCount, 0);
+ atomic_set(&tcpSesNextId, 0);
atomic_set(&tcpSesAllocCount, 0);
atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 2307bb0f6147..0d7ef150dbb2 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -62,19 +62,22 @@ extern void cifs_sb_deactive(struct super_block *sb);
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
extern struct inode *cifs_root_iget(struct super_block *);
-extern int cifs_create(struct inode *, struct dentry *, umode_t,
- bool excl);
+extern int cifs_create(struct user_namespace *, struct inode *,
+ struct dentry *, umode_t, bool excl);
extern int cifs_atomic_open(struct inode *, struct dentry *,
struct file *, unsigned, umode_t);
extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
unsigned int);
extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
-extern int cifs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-extern int cifs_mkdir(struct inode *, struct dentry *, umode_t);
+extern int cifs_mknod(struct user_namespace *, struct inode *, struct dentry *,
+ umode_t, dev_t);
+extern int cifs_mkdir(struct user_namespace *, struct inode *, struct dentry *,
+ umode_t);
extern int cifs_rmdir(struct inode *, struct dentry *);
-extern int cifs_rename2(struct inode *, struct dentry *, struct inode *,
- struct dentry *, unsigned int);
+extern int cifs_rename2(struct user_namespace *, struct inode *,
+ struct dentry *, struct inode *, struct dentry *,
+ unsigned int);
extern int cifs_revalidate_file_attr(struct file *filp);
extern int cifs_revalidate_dentry_attr(struct dentry *);
extern int cifs_revalidate_file(struct file *filp);
@@ -82,8 +85,10 @@ extern int cifs_revalidate_dentry(struct dentry *);
extern int cifs_invalidate_mapping(struct inode *inode);
extern int cifs_revalidate_mapping(struct inode *inode);
extern int cifs_zap_mapping(struct inode *inode);
-extern int cifs_getattr(const struct path *, struct kstat *, u32, unsigned int);
-extern int cifs_setattr(struct dentry *, struct iattr *);
+extern int cifs_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
+extern int cifs_setattr(struct user_namespace *, struct dentry *,
+ struct iattr *);
extern int cifs_fiemap(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
@@ -132,8 +137,8 @@ extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
/* Functions related to symlinks */
extern const char *cifs_get_link(struct dentry *, struct inode *,
struct delayed_call *);
-extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
- const char *symname);
+extern int cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode,
+ struct dentry *direntry, const char *symname);
#ifdef CONFIG_CIFS_XATTR
extern const struct xattr_handler *cifs_xattr_handlers[];
@@ -160,5 +165,5 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.30"
+#define CIFS_VERSION "2.31"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 50fcb65920e8..31fc8695abd6 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -21,6 +21,7 @@
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
@@ -256,7 +257,7 @@ struct smb_version_operations {
/* verify the message */
int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
- int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
+ int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *);
void (*downgrade_oplock)(struct TCP_Server_Info *server,
struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache);
@@ -504,6 +505,8 @@ struct smb_version_operations {
loff_t (*llseek)(struct file *, struct cifs_tcon *, loff_t, int);
/* Check for STATUS_IO_TIMEOUT */
bool (*is_status_io_timeout)(char *buf);
+ /* Check for STATUS_NETWORK_NAME_DELETED */
+ void (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
};
struct smb_version_values {
@@ -577,6 +580,7 @@ inc_rfc1001_len(void *buf, int count)
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
+ __u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
@@ -901,7 +905,7 @@ struct cifs_ses {
kuid_t linux_uid; /* overriding owner of files on the mount */
kuid_t cred_uid; /* owner of credentials */
unsigned int capabilities;
- char serverName[SERVER_NAME_LEN_WITH_NULL];
+ char ip_addr[INET6_ADDRSTRLEN + 1]; /* Max ipv6 (or v4) addr string len */
char *user_name; /* must not be null except during init of sess
and after mount option parsing we fill it */
char *domainName;
@@ -1701,14 +1705,17 @@ static inline bool is_retryable_error(int error)
#define CIFS_NO_RSP_BUF 0x040 /* no response buffer required */
/* Type of request operation */
-#define CIFS_ECHO_OP 0x080 /* echo request */
-#define CIFS_OBREAK_OP 0x0100 /* oplock break request */
-#define CIFS_NEG_OP 0x0200 /* negotiate request */
-#define CIFS_OP_MASK 0x0380 /* mask request type */
-
-#define CIFS_HAS_CREDITS 0x0400 /* already has credits */
-#define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */
-#define CIFS_NO_SRV_RSP 0x1000 /* there is no server response */
+#define CIFS_ECHO_OP 0x080 /* echo request */
+#define CIFS_OBREAK_OP 0x0100 /* oplock break request */
+#define CIFS_NEG_OP 0x0200 /* negotiate request */
+#define CIFS_CP_CREATE_CLOSE_OP 0x0400 /* compound create+close request */
+/* Lower bitmask values are reserved by others below. */
+#define CIFS_SESS_OP 0x2000 /* session setup request */
+#define CIFS_OP_MASK 0x2780 /* mask request type */
+
+#define CIFS_HAS_CREDITS 0x0400 /* already has credits */
+#define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */
+#define CIFS_NO_SRV_RSP 0x1000 /* there is no server response */
/* Security Flags: indicate type of session setup needed */
#define CIFSSEC_MAY_SIGN 0x00001
@@ -1844,6 +1851,7 @@ GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */
*/
GLOBAL_EXTERN atomic_t sesInfoAllocCount;
GLOBAL_EXTERN atomic_t tconInfoAllocCount;
+GLOBAL_EXTERN atomic_t tcpSesNextId;
GLOBAL_EXTERN atomic_t tcpSesAllocCount;
GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 32f7a013402e..75ce6f742b8d 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -232,6 +232,8 @@ extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace);
extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read);
+extern ssize_t cifs_discard_from_socket(struct TCP_Server_Info *server,
+ size_t to_read);
extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
struct page *page,
unsigned int page_offset,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 0496934feecb..c279527aae92 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1451,9 +1451,9 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
while (remaining > 0) {
int length;
- length = cifs_read_from_socket(server, server->bigbuf,
- min_t(unsigned int, remaining,
- CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
+ length = cifs_discard_from_socket(server,
+ min_t(size_t, remaining,
+ CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
if (length < 0)
return length;
server->total_read += length;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4bb9decbbf27..eec8a2052da2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -242,7 +242,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->max_read = 0;
cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
- trace_smb3_reconnect(server->CurrentMid, server->hostname);
+ trace_smb3_reconnect(server->CurrentMid, server->conn_id, server->hostname);
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
@@ -564,6 +564,23 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
return cifs_readv_from_socket(server, &smb_msg);
}
+ssize_t
+cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
+{
+ struct msghdr smb_msg;
+
+ /*
+ * iov_iter_discard already sets smb_msg.type and count and iov_offset
+ * and cifs_readv_from_socket sets msg_control and msg_controllen
+ * so little to initialize in struct msghdr
+ */
+ smb_msg.msg_name = NULL;
+ smb_msg.msg_namelen = 0;
+ iov_iter_discard(&smb_msg.msg_iter, READ, to_read);
+
+ return cifs_readv_from_socket(server, &smb_msg);
+}
+
int
cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
unsigned int page_offset, unsigned int to_read)
@@ -724,7 +741,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid);
+ cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
kref_get(&mid_entry->refcount);
mid_entry->mid_state = MID_SHUTDOWN;
list_move(&mid_entry->qhead, &dispose_list);
@@ -735,7 +752,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
/* now walk dispose list and issue callbacks */
list_for_each_safe(tmp, tmp2, &dispose_list) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid);
+ cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
cifs_mid_q_entry_release(mid_entry);
@@ -846,7 +863,7 @@ static void
smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
{
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buffer;
- int scredits = server->credits;
+ int scredits, in_flight;
/*
* SMB1 does not use credits.
@@ -857,12 +874,14 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
if (shdr->CreditRequest) {
spin_lock(&server->req_lock);
server->credits += le16_to_cpu(shdr->CreditRequest);
+ scredits = server->credits;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
trace_smb3_add_credits(server->CurrentMid,
- server->hostname, scredits,
- le16_to_cpu(shdr->CreditRequest));
+ server->conn_id, server->hostname, scredits,
+ le16_to_cpu(shdr->CreditRequest), in_flight);
cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
__func__, le16_to_cpu(shdr->CreditRequest),
scredits);
@@ -993,6 +1012,10 @@ next_pdu:
if (mids[i] != NULL) {
mids[i]->resp_buf_size = server->pdu_size;
+ if (bufs[i] && server->ops->is_network_name_deleted)
+ server->ops->is_network_name_deleted(bufs[i],
+ server);
+
if (!mids[i]->multiRsp || mids[i]->multiEnd)
mids[i]->callback(mids[i]);
@@ -1317,6 +1340,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
goto out_err_crypto_release;
}
+ tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
tcp_ses->noblockcnt = ctx->rootfs;
tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
tcp_ses->noautotune = ctx->noautotune;
@@ -1405,6 +1429,11 @@ smbd_connected:
tcp_ses->min_offload = ctx->min_offload;
tcp_ses->tcpStatus = CifsNeedNegotiate;
+ if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
+ tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
+ else
+ tcp_ses->max_credits = ctx->max_credits;
+
tcp_ses->nr_targets = 1;
tcp_ses->ignore_signature = ctx->ignore_signature;
/* thread spawned, put it on the list */
@@ -1838,9 +1867,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
/* new SMB session uses our server ref */
ses->server = server;
if (server->dstaddr.ss_family == AF_INET6)
- sprintf(ses->serverName, "%pI6", &addr6->sin6_addr);
+ sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
else
- sprintf(ses->serverName, "%pI4", &addr->sin_addr);
+ sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
if (ctx->username) {
ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
@@ -2269,7 +2298,9 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
if (strcmp(old->local_nls->charset, new->local_nls->charset))
return 0;
- if (old->ctx->actimeo != new->ctx->actimeo)
+ if (old->ctx->acregmax != new->ctx->acregmax)
+ return 0;
+ if (old->ctx->acdirmax != new->ctx->acdirmax)
return 0;
return 1;
@@ -2806,11 +2837,6 @@ static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cif
*nserver = server;
- if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
- server->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
- else
- server->max_credits = ctx->max_credits;
-
/* get a reference to a SMB session */
ses = cifs_get_smb_ses(server, ctx);
if (IS_ERR(ses)) {
@@ -2911,7 +2937,7 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
#ifdef CONFIG_CIFS_DFS_UPCALL
/*
* cifs_build_path_to_root returns full path to root when we do not have an
- * exiting connection (tcon)
+ * existing connection (tcon)
*/
static char *
build_unc_path_to_root(const struct smb3_fs_context *ctx,
@@ -3038,96 +3064,91 @@ static int update_vol_info(const struct dfs_cache_tgt_iterator *tgt_it,
return 0;
}
-static int setup_dfs_tgt_conn(const char *path, const char *full_path,
- const struct dfs_cache_tgt_iterator *tgt_it,
- struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
- unsigned int *xid, struct TCP_Server_Info **server,
- struct cifs_ses **ses, struct cifs_tcon **tcon)
-{
- int rc;
- struct dfs_info3_param ref = {0};
- char *mdata = NULL;
- struct smb3_fs_context fake_ctx = {NULL};
- char *fake_devname = NULL;
-
- cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
-
- rc = dfs_cache_get_tgt_referral(path, tgt_it, &ref);
- if (rc)
- return rc;
-
- mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- full_path + 1, &ref,
- &fake_devname);
- free_dfs_info_param(&ref);
-
- if (IS_ERR(mdata)) {
- rc = PTR_ERR(mdata);
- mdata = NULL;
- } else
- rc = cifs_setup_volume_info(&fake_ctx, mdata, fake_devname);
-
- kfree(mdata);
- kfree(fake_devname);
-
- if (!rc) {
- /*
- * We use a 'fake_ctx' here because we need pass it down to the
- * mount_{get,put} functions to test connection against new DFS
- * targets.
- */
- mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
- rc = mount_get_conns(&fake_ctx, cifs_sb, xid, server, ses,
- tcon);
- if (!rc || (*server && *ses)) {
- /*
- * We were able to connect to new target server.
- * Update current context with new target server.
- */
- rc = update_vol_info(tgt_it, &fake_ctx, ctx);
- }
- }
- smb3_cleanup_fs_context_contents(&fake_ctx);
- return rc;
-}
-
static int do_dfs_failover(const char *path, const char *full_path, struct cifs_sb_info *cifs_sb,
struct smb3_fs_context *ctx, struct cifs_ses *root_ses,
unsigned int *xid, struct TCP_Server_Info **server,
struct cifs_ses **ses, struct cifs_tcon **tcon)
{
int rc;
- struct dfs_cache_tgt_list tgt_list;
+ struct dfs_cache_tgt_list tgt_list = {0};
struct dfs_cache_tgt_iterator *tgt_it = NULL;
+ struct smb3_fs_context tmp_ctx = {NULL};
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
return -EOPNOTSUPP;
+ cifs_dbg(FYI, "%s: path=%s full_path=%s\n", __func__, path, full_path);
+
rc = dfs_cache_noreq_find(path, NULL, &tgt_list);
if (rc)
return rc;
+ /*
+ * We use a 'tmp_ctx' here because we need pass it down to the mount_{get,put} functions to
+ * test connection against new DFS targets.
+ */
+ rc = smb3_fs_context_dup(&tmp_ctx, ctx);
+ if (rc)
+ goto out;
for (;;) {
+ struct dfs_info3_param ref = {0};
+ char *fake_devname = NULL, *mdata = NULL;
+
/* Get next DFS target server - if any */
rc = get_next_dfs_tgt(path, &tgt_list, &tgt_it);
if (rc)
break;
- /* Connect to next DFS target */
- rc = setup_dfs_tgt_conn(path, full_path, tgt_it, cifs_sb, ctx, xid, server, ses,
- tcon);
- if (!rc || (*server && *ses))
+
+ rc = dfs_cache_get_tgt_referral(path, tgt_it, &ref);
+ if (rc)
break;
+
+ cifs_dbg(FYI, "%s: old ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
+ tmp_ctx.prepath);
+
+ mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, &ref,
+ &fake_devname);
+ free_dfs_info_param(&ref);
+
+ if (IS_ERR(mdata)) {
+ rc = PTR_ERR(mdata);
+ mdata = NULL;
+ } else
+ rc = cifs_setup_volume_info(&tmp_ctx, mdata, fake_devname);
+
+ kfree(mdata);
+ kfree(fake_devname);
+
+ if (rc)
+ break;
+
+ cifs_dbg(FYI, "%s: new ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
+ tmp_ctx.prepath);
+
+ mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
+ rc = mount_get_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
+ if (!rc || (*server && *ses)) {
+ /*
+ * We were able to connect to new target server. Update current context with
+ * new target server.
+ */
+ rc = update_vol_info(tgt_it, &tmp_ctx, ctx);
+ break;
+ }
}
if (!rc) {
+ cifs_dbg(FYI, "%s: final ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
+ tmp_ctx.prepath);
/*
- * Update DFS target hint in DFS referral cache with the target
- * server we successfully reconnected to.
+ * Update DFS target hint in DFS referral cache with the target server we
+ * successfully reconnected to.
*/
- rc = dfs_cache_update_tgthint(*xid, root_ses ? root_ses : *ses,
- cifs_sb->local_nls,
- cifs_remap(cifs_sb), path,
- tgt_it);
+ rc = dfs_cache_update_tgthint(*xid, root_ses ? root_ses : *ses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), path, tgt_it);
}
+
+out:
+ smb3_cleanup_fs_context_contents(&tmp_ctx);
dfs_cache_free_tgts(&tgt_list);
return rc;
}
@@ -3285,77 +3306,77 @@ static void put_root_ses(struct cifs_ses *ses)
cifs_put_smb_ses(ses);
}
-/* Check if a path component is remote and then update @dfs_path accordingly */
-static int check_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
- const unsigned int xid, struct TCP_Server_Info *server,
- struct cifs_tcon *tcon, char **dfs_path)
+/* Set up next dfs prefix path in @dfs_path */
+static int next_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
+ const unsigned int xid, struct TCP_Server_Info *server,
+ struct cifs_tcon *tcon, char **dfs_path)
{
- char *path, *s;
- char sep = CIFS_DIR_SEP(cifs_sb), tmp;
- char *npath;
- int rc = 0;
- int added_treename = tcon->Flags & SMB_SHARE_IS_IN_DFS;
- int skip = added_treename;
+ char *path, *npath;
+ int added_treename = is_tcon_dfs(tcon);
+ int rc;
path = cifs_build_path_to_root(ctx, cifs_sb, tcon, added_treename);
if (!path)
return -ENOMEM;
- /*
- * Walk through the path components in @path and check if they're accessible. In case any of
- * the components is -EREMOTE, then update @dfs_path with the next DFS referral request path
- * (NOT including the remaining components).
- */
- s = path;
- do {
- /* skip separators */
- while (*s && *s == sep)
- s++;
- if (!*s)
- break;
- /* next separator */
- while (*s && *s != sep)
- s++;
- /*
- * if the treename is added, we then have to skip the first
- * part within the separators
- */
- if (skip) {
- skip = 0;
- continue;
+ rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
+ if (rc == -EREMOTE) {
+ struct smb3_fs_context v = {NULL};
+ /* if @path contains a tree name, skip it in the prefix path */
+ if (added_treename) {
+ rc = smb3_parse_devname(path, &v);
+ if (rc)
+ goto out;
+ npath = build_unc_path_to_root(&v, cifs_sb, true);
+ smb3_cleanup_fs_context_contents(&v);
+ } else {
+ v.UNC = ctx->UNC;
+ v.prepath = path + 1;
+ npath = build_unc_path_to_root(&v, cifs_sb, true);
}
- tmp = *s;
- *s = 0;
- rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, path);
- if (rc && rc == -EREMOTE) {
- struct smb3_fs_context v = {NULL};
- /* if @path contains a tree name, skip it in the prefix path */
- if (added_treename) {
- rc = smb3_parse_devname(path, &v);
- if (rc)
- break;
- rc = -EREMOTE;
- npath = build_unc_path_to_root(&v, cifs_sb, true);
- smb3_cleanup_fs_context_contents(&v);
- } else {
- v.UNC = ctx->UNC;
- v.prepath = path + 1;
- npath = build_unc_path_to_root(&v, cifs_sb, true);
- }
- if (IS_ERR(npath)) {
- rc = PTR_ERR(npath);
- break;
- }
- kfree(*dfs_path);
- *dfs_path = npath;
+
+ if (IS_ERR(npath)) {
+ rc = PTR_ERR(npath);
+ goto out;
}
- *s = tmp;
- } while (rc == 0);
+ kfree(*dfs_path);
+ *dfs_path = npath;
+ rc = -EREMOTE;
+ }
+
+out:
kfree(path);
return rc;
}
+/* Check if resolved targets can handle any DFS referrals */
+static int is_referral_server(const char *ref_path, struct cifs_tcon *tcon, bool *ref_server)
+{
+ int rc;
+ struct dfs_info3_param ref = {0};
+
+ if (is_tcon_dfs(tcon)) {
+ *ref_server = true;
+ } else {
+ cifs_dbg(FYI, "%s: ref_path=%s\n", __func__, ref_path);
+
+ rc = dfs_cache_noreq_find(ref_path, &ref, NULL);
+ if (rc) {
+ cifs_dbg(VFS, "%s: dfs_cache_noreq_find: failed (rc=%d)\n", __func__, rc);
+ return rc;
+ }
+ cifs_dbg(FYI, "%s: ref.flags=0x%x\n", __func__, ref.flags);
+ /*
+ * Check if all targets are capable of handling DFS referrals as per
+ * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
+ */
+ *ref_server = !!(ref.flags & DFSREF_REFERRAL_SERVER);
+ free_dfs_info_param(&ref);
+ }
+ return 0;
+}
+
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
{
int rc = 0;
@@ -3367,18 +3388,19 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
char *ref_path = NULL, *full_path = NULL;
char *oldmnt = NULL;
char *mntdata = NULL;
+ bool ref_server = false;
rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
/*
- * Unconditionally try to get an DFS referral (even cached) to determine whether it is an
- * DFS mount.
+ * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
+ * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
*
* Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
* to respond with PATH_NOT_COVERED to requests that include the prefix.
*/
- if (dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ctx->UNC + 1, NULL,
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
+ dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ctx->UNC + 1, NULL,
NULL)) {
- /* No DFS referral was returned. Looks like a regular share. */
if (rc)
goto error;
/* Check if it is fully accessible and then mount it */
@@ -3432,13 +3454,18 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
break;
if (!tcon)
continue;
+
/* Make sure that requests go through new root servers */
- if (is_tcon_dfs(tcon)) {
+ rc = is_referral_server(ref_path + 1, tcon, &ref_server);
+ if (rc)
+ break;
+ if (ref_server) {
put_root_ses(root_ses);
set_root_ses(cifs_sb, ses, &root_ses);
}
- /* Check for remaining path components and then continue chasing them (-EREMOTE) */
- rc = check_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
+
+ /* Get next dfs path and then continue chasing them if -EREMOTE */
+ rc = next_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
/* Prevent recursion on broken link referrals */
if (rc == -EREMOTE && ++count > MAX_NESTED_LINKS)
rc = -ELOOP;
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 4950ab0486ae..098b4bc8da59 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -37,11 +37,12 @@ struct cache_dfs_tgt {
struct cache_entry {
struct hlist_node hlist;
const char *path;
- int ttl;
- int srvtype;
- int flags;
+ int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
+ int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
+ int srvtype; /* DFS_REREFERRAL_V3.ServerType */
+ int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
struct timespec64 etime;
- int path_consumed;
+ int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
int numtgts;
struct list_head tlist;
struct cache_dfs_tgt *tgthint;
@@ -166,14 +167,11 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
continue;
seq_printf(m,
- "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
- "interlink=%s,path_consumed=%d,expired=%s\n",
- ce->path,
- ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
- ce->ttl, ce->etime.tv_nsec,
- IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
- ce->path_consumed,
- cache_entry_expired(ce) ? "yes" : "no");
+ "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
+ ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
+ ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
+ IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
+ ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
list_for_each_entry(t, &ce->tlist, list) {
seq_printf(m, " %s%s\n",
@@ -236,11 +234,12 @@ static inline void dump_tgts(const struct cache_entry *ce)
static inline void dump_ce(const struct cache_entry *ce)
{
- cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
+ cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
ce->path,
ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
ce->etime.tv_nsec,
- IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
+ ce->hdr_flags, ce->ref_flags,
+ IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
ce->path_consumed,
cache_entry_expired(ce) ? "yes" : "no");
dump_tgts(ce);
@@ -381,7 +380,8 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
ce->ttl = refs[0].ttl;
ce->etime = get_expire_time(ce->ttl);
ce->srvtype = refs[0].server_type;
- ce->flags = refs[0].ref_flag;
+ ce->hdr_flags = refs[0].flags;
+ ce->ref_flags = refs[0].ref_flag;
ce->path_consumed = refs[0].path_consumed;
for (i = 0; i < numrefs; i++) {
@@ -799,7 +799,8 @@ static int setup_referral(const char *path, struct cache_entry *ce,
ref->path_consumed = ce->path_consumed;
ref->ttl = ce->ttl;
ref->server_type = ce->srvtype;
- ref->ref_flag = ce->flags;
+ ref->ref_flag = ce->ref_flags;
+ ref->flags = ce->hdr_flags;
return 0;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 97ac363b5df1..a3fb81e0ba17 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -567,8 +567,8 @@ out_free_xid:
return rc;
}
-int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
- bool excl)
+int cifs_create(struct user_namespace *mnt_userns, struct inode *inode,
+ struct dentry *direntry, umode_t mode, bool excl)
{
int rc;
unsigned int xid = get_xid();
@@ -611,8 +611,8 @@ out_free_xid:
return rc;
}
-int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
- dev_t device_number)
+int cifs_mknod(struct user_namespace *mnt_userns, struct inode *inode,
+ struct dentry *direntry, umode_t mode, dev_t device_number)
{
int rc = -EPERM;
unsigned int xid;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 6d001905c8e5..26de4329d161 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -580,7 +580,7 @@ int cifs_open(struct inode *inode, struct file *file)
} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
if (tcon->ses->serverNOS)
cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
- tcon->ses->serverName,
+ tcon->ses->ip_addr,
tcon->ses->serverNOS);
tcon->broken_posix_open = true;
} else if ((rc != -EIO) && (rc != -EREMOTE) &&
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 12a5da0230b5..78889024a7ed 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -140,6 +140,8 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_u32("rsize", Opt_rsize),
fsparam_u32("wsize", Opt_wsize),
fsparam_u32("actimeo", Opt_actimeo),
+ fsparam_u32("acdirmax", Opt_acdirmax),
+ fsparam_u32("acregmax", Opt_acregmax),
fsparam_u32("echo_interval", Opt_echo_interval),
fsparam_u32("max_credits", Opt_max_credits),
fsparam_u32("handletimeout", Opt_handletimeout),
@@ -397,7 +399,7 @@ cifs_parse_smb_version(char *value, struct smb3_fs_context *ctx, bool is_smb3)
ctx->vals = &smb3any_values;
break;
case Smb_default:
- ctx->ops = &smb30_operations; /* currently identical with 3.0 */
+ ctx->ops = &smb30_operations;
ctx->vals = &smbdefault_values;
break;
default:
@@ -542,20 +544,37 @@ static int smb3_fs_context_parse_monolithic(struct fs_context *fc,
/* BB Need to add support for sep= here TBD */
while ((key = strsep(&options, ",")) != NULL) {
- if (*key) {
- size_t v_len = 0;
- char *value = strchr(key, '=');
-
- if (value) {
- if (value == key)
- continue;
- *value++ = 0;
- v_len = strlen(value);
- }
- ret = vfs_parse_fs_string(fc, key, value, v_len);
- if (ret < 0)
- break;
+ size_t len;
+ char *value;
+
+ if (*key == 0)
+ break;
+
+ /* Check if following character is the deliminator If yes,
+ * we have encountered a double deliminator reset the NULL
+ * character to the deliminator
+ */
+ while (options && options[0] == ',') {
+ len = strlen(key);
+ strcpy(key + len, options);
+ options = strchr(options, ',');
+ if (options)
+ *options++ = 0;
+ }
+
+
+ len = 0;
+ value = strchr(key, '=');
+ if (value) {
+ if (value == key)
+ continue;
+ *value++ = 0;
+ len = strlen(value);
}
+
+ ret = vfs_parse_fs_string(fc, key, value, len);
+ if (ret < 0)
+ break;
}
return ret;
@@ -929,12 +948,31 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->wsize = result.uint_32;
ctx->got_wsize = true;
break;
+ case Opt_acregmax:
+ ctx->acregmax = HZ * result.uint_32;
+ if (ctx->acregmax > CIFS_MAX_ACTIMEO) {
+ cifs_dbg(VFS, "acregmax too large\n");
+ goto cifs_parse_mount_err;
+ }
+ break;
+ case Opt_acdirmax:
+ ctx->acdirmax = HZ * result.uint_32;
+ if (ctx->acdirmax > CIFS_MAX_ACTIMEO) {
+ cifs_dbg(VFS, "acdirmax too large\n");
+ goto cifs_parse_mount_err;
+ }
+ break;
case Opt_actimeo:
- ctx->actimeo = HZ * result.uint_32;
- if (ctx->actimeo > CIFS_MAX_ACTIMEO) {
- cifs_dbg(VFS, "attribute cache timeout too large\n");
+ if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) {
+ cifs_dbg(VFS, "timeout too large\n");
goto cifs_parse_mount_err;
}
+ if ((ctx->acdirmax != CIFS_DEF_ACTIMEO) ||
+ (ctx->acregmax != CIFS_DEF_ACTIMEO)) {
+ cifs_dbg(VFS, "actimeo ignored since acregmax or acdirmax specified\n");
+ break;
+ }
+ ctx->acdirmax = ctx->acregmax = HZ * result.uint_32;
break;
case Opt_echo_interval:
ctx->echo_interval = result.uint_32;
@@ -1158,9 +1196,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
pr_warn_once("Witness protocol support is experimental\n");
break;
case Opt_rootfs:
-#ifdef CONFIG_CIFS_ROOT
- ctx->rootfs = true;
+#ifndef CONFIG_CIFS_ROOT
+ cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n");
+ goto cifs_parse_mount_err;
#endif
+ ctx->rootfs = true;
break;
case Opt_posixpaths:
if (result.negated)
@@ -1361,7 +1401,8 @@ int smb3_init_fs_context(struct fs_context *fc)
/* default is to use strict cifs caching semantics */
ctx->strict_io = true;
- ctx->actimeo = CIFS_DEF_ACTIMEO;
+ ctx->acregmax = CIFS_DEF_ACTIMEO;
+ ctx->acdirmax = CIFS_DEF_ACTIMEO;
/* Most clients set timeout to 0, allows server to use its default */
ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
index 1c44a460e2c0..87dd1f7168f2 100644
--- a/fs/cifs/fs_context.h
+++ b/fs/cifs/fs_context.h
@@ -118,6 +118,8 @@ enum cifs_param {
Opt_rsize,
Opt_wsize,
Opt_actimeo,
+ Opt_acdirmax,
+ Opt_acregmax,
Opt_echo_interval,
Opt_max_credits,
Opt_snapshot,
@@ -232,7 +234,9 @@ struct smb3_fs_context {
unsigned int wsize;
unsigned int min_offload;
bool sockopt_tcp_nodelay:1;
- unsigned long actimeo; /* attribute cache timeout (jiffies) */
+ /* attribute cache timemout for files and directories in jiffies */
+ unsigned long acregmax;
+ unsigned long acdirmax;
struct smb_version_operations *ops;
struct smb_version_values *vals;
char *prepath;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index a83b3a8ffaac..f2df4422e54a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1857,7 +1857,8 @@ posix_mkdir_get_info:
goto posix_mkdir_out;
}
-int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
+int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode,
+ struct dentry *direntry, umode_t mode)
{
int rc = 0;
unsigned int xid;
@@ -2067,9 +2068,9 @@ do_rename_exit:
}
int
-cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
- struct inode *target_dir, struct dentry *target_dentry,
- unsigned int flags)
+cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
+ struct dentry *source_dentry, struct inode *target_dir,
+ struct dentry *target_dentry, unsigned int flags)
{
char *from_name = NULL;
char *to_name = NULL;
@@ -2198,12 +2199,23 @@ cifs_inode_needs_reval(struct inode *inode)
if (!lookupCacheEnabled)
return true;
- if (!cifs_sb->ctx->actimeo)
- return true;
-
- if (!time_in_range(jiffies, cifs_i->time,
- cifs_i->time + cifs_sb->ctx->actimeo))
- return true;
+ /*
+ * depending on inode type, check if attribute caching disabled for
+ * files or directories
+ */
+ if (S_ISDIR(inode->i_mode)) {
+ if (!cifs_sb->ctx->acdirmax)
+ return true;
+ if (!time_in_range(jiffies, cifs_i->time,
+ cifs_i->time + cifs_sb->ctx->acdirmax))
+ return true;
+ } else { /* file */
+ if (!cifs_sb->ctx->acregmax)
+ return true;
+ if (!time_in_range(jiffies, cifs_i->time,
+ cifs_i->time + cifs_sb->ctx->acregmax))
+ return true;
+ }
/* hardlinked files w/ noserverino get "special" treatment */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
@@ -2370,8 +2382,8 @@ int cifs_revalidate_dentry(struct dentry *dentry)
return cifs_revalidate_mapping(inode);
}
-int cifs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int cifs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
@@ -2383,7 +2395,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
* We need to be sure that all dirty pages are written and the server
* has actual ctime, mtime and file length.
*/
- if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE)) &&
+ if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) &&
!CIFS_CACHE_READ(CIFS_I(inode)) &&
inode->i_mapping && inode->i_mapping->nrpages != 0) {
rc = filemap_fdatawait(inode->i_mapping);
@@ -2408,7 +2420,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
return rc;
}
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
stat->blksize = cifs_sb->ctx->bsize;
stat->ino = CIFS_I(inode)->uniqueid;
@@ -2573,6 +2585,14 @@ set_size_out:
if (rc == 0) {
cifsInode->server_eof = attrs->ia_size;
cifs_setsize(inode, attrs->ia_size);
+ /*
+ * i_blocks is not related to (i_size / i_blksize), but instead
+ * 512 byte (2**9) size is required for calculating num blocks.
+ * Until we can query the server for actual allocation size,
+ * this is best estimate we have for blocks allocated for a file
+ * Number of blocks must be rounded up so size 1 is not 0 blocks
+ */
+ inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;
/*
* The man page of truncate says if the size changed,
@@ -2610,7 +2630,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
- rc = setattr_prepare(direntry, attrs);
+ rc = setattr_prepare(&init_user_ns, direntry, attrs);
if (rc < 0)
goto out;
@@ -2715,7 +2735,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
attrs->ia_size != i_size_read(inode))
truncate_setsize(inode, attrs->ia_size);
- setattr_copy(inode, attrs);
+ setattr_copy(&init_user_ns, inode, attrs);
mark_inode_dirty(inode);
/* force revalidate when any of these times are set since some
@@ -2757,7 +2777,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
- rc = setattr_prepare(direntry, attrs);
+ rc = setattr_prepare(&init_user_ns, direntry, attrs);
if (rc < 0) {
free_xid(xid);
return rc;
@@ -2913,7 +2933,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
attrs->ia_size != i_size_read(inode))
truncate_setsize(inode, attrs->ia_size);
- setattr_copy(inode, attrs);
+ setattr_copy(&init_user_ns, inode, attrs);
mark_inode_dirty(inode);
cifs_setattr_exit:
@@ -2923,7 +2943,8 @@ cifs_setattr_exit:
}
int
-cifs_setattr(struct dentry *direntry, struct iattr *attrs)
+cifs_setattr(struct user_namespace *mnt_userns, struct dentry *direntry,
+ struct iattr *attrs)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 94dab4309fbb..7c5878a645d9 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -661,7 +661,8 @@ cifs_get_link(struct dentry *direntry, struct inode *inode,
}
int
-cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
+cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode,
+ struct dentry *direntry, const char *symname)
{
int rc = -EOPNOTSUPP;
unsigned int xid;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 213465718fa8..63d517b9f2ff 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -218,7 +218,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
/* UNC and paths */
/* XXX: Use ses->server->hostname? */
- sprintf(unc, unc_fmt, ses->serverName);
+ sprintf(unc, unc_fmt, ses->ip_addr);
ctx.UNC = unc;
ctx.prepath = "";
@@ -230,6 +230,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
ctx.noautotune = ses->server->noautotune;
ctx.sockopt_tcp_nodelay = ses->server->tcp_nodelay;
ctx.echo_interval = ses->server->echo_interval / HZ;
+ ctx.max_credits = ses->server->max_credits;
/*
* This will be used for encoding/decoding user/domain/pw
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 1f900b81c34a..a718dc77e604 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -358,6 +358,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (cfile)
goto after_close;
/* Close */
+ flags |= CIFS_CP_CREATE_CLOSE_OP;
rqst[num_rqst].rq_iov = &vars->close_iov[0];
rqst[num_rqst].rq_nvec = 1;
rc = SMB2_close_init(tcon, server,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 60d4bd1eae2b..b50164e2c88d 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -767,7 +767,7 @@ smb2_cancelled_close_fid(struct work_struct *work)
int rc;
if (cancelled->mid)
- cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llx\n",
+ cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llu\n",
cancelled->mid);
else
cifs_tcon_dbg(VFS, "Close interrupted close\n");
@@ -844,14 +844,14 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
}
int
-smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server)
{
- struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
- struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+ struct smb2_sync_hdr *sync_hdr = mid->resp_buf;
+ struct smb2_create_rsp *rsp = mid->resp_buf;
struct cifs_tcon *tcon;
int rc;
- if (sync_hdr->Command != SMB2_CREATE ||
+ if ((mid->optype & CIFS_CP_CREATE_CLOSE_OP) || sync_hdr->Command != SMB2_CREATE ||
sync_hdr->Status != STATUS_SUCCESS)
return 0;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f19274857292..9bae7e8deb09 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -63,17 +63,19 @@ smb2_add_credits(struct TCP_Server_Info *server,
const struct cifs_credits *credits, const int optype)
{
int *val, rc = -1;
+ int scredits, in_flight;
unsigned int add = credits->value;
unsigned int instance = credits->instance;
bool reconnect_detected = false;
+ bool reconnect_with_invalid_credits = false;
spin_lock(&server->req_lock);
val = server->ops->get_credits_field(server, optype);
/* eg found case where write overlapping reconnect messed up credits */
if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
- trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
- server->hostname, *val, add);
+ reconnect_with_invalid_credits = true;
+
if ((instance == 0) || (instance == server->reconnect_instance))
*val += add;
else
@@ -84,7 +86,9 @@ smb2_add_credits(struct TCP_Server_Info *server,
pr_warn_once("server overflowed SMB3 credits\n");
}
server->in_flight--;
- if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
+ if (server->in_flight == 0 &&
+ ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
+ ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
rc = change_conf(server);
/*
* Sometimes server returns 0 credits on oplock break ack - we need to
@@ -97,14 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server,
server->oplock_credits++;
}
}
+ scredits = *val;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
if (reconnect_detected) {
+ trace_smb3_reconnect_detected(server->CurrentMid,
+ server->conn_id, server->hostname, scredits, add, in_flight);
+
cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
add, instance);
}
+ if (reconnect_with_invalid_credits) {
+ trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
+ server->conn_id, server->hostname, scredits, add, in_flight);
+ cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
+ optype, scredits, add);
+ }
+
if (server->tcpStatus == CifsNeedReconnect
|| server->tcpStatus == CifsExiting)
return;
@@ -123,23 +139,30 @@ smb2_add_credits(struct TCP_Server_Info *server,
cifs_dbg(FYI, "disabling oplocks\n");
break;
default:
- trace_smb3_add_credits(server->CurrentMid,
- server->hostname, rc, add);
- cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, rc);
+ /* change_conf rebalanced credits for different types */
+ break;
}
+
+ trace_smb3_add_credits(server->CurrentMid,
+ server->conn_id, server->hostname, scredits, add, in_flight);
+ cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
}
static void
smb2_set_credits(struct TCP_Server_Info *server, const int val)
{
+ int scredits, in_flight;
+
spin_lock(&server->req_lock);
server->credits = val;
if (val == 1)
server->reconnect_instance++;
+ scredits = server->credits;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
trace_smb3_set_credits(server->CurrentMid,
- server->hostname, val, val);
+ server->conn_id, server->hostname, scredits, val, in_flight);
cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
/* don't log while holding the lock */
@@ -171,7 +194,7 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
unsigned int *num, struct cifs_credits *credits)
{
int rc = 0;
- unsigned int scredits;
+ unsigned int scredits, in_flight;
spin_lock(&server->req_lock);
while (1) {
@@ -208,17 +231,18 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
credits->instance = server->reconnect_instance;
server->credits -= credits->value;
- scredits = server->credits;
server->in_flight++;
if (server->in_flight > server->max_in_flight)
server->max_in_flight = server->in_flight;
break;
}
}
+ scredits = server->credits;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
trace_smb3_add_credits(server->CurrentMid,
- server->hostname, scredits, -(credits->value));
+ server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
__func__, credits->value, scredits);
@@ -231,14 +255,14 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
const unsigned int payload_size)
{
int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
- int scredits;
+ int scredits, in_flight;
if (!credits->value || credits->value == new_val)
return 0;
if (credits->value < new_val) {
trace_smb3_too_many_credits(server->CurrentMid,
- server->hostname, 0, credits->value - new_val);
+ server->conn_id, server->hostname, 0, credits->value - new_val, 0);
cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
credits->value, new_val);
@@ -248,9 +272,13 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
spin_lock(&server->req_lock);
if (server->reconnect_instance != credits->instance) {
+ scredits = server->credits;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
+
trace_smb3_reconnect_detected(server->CurrentMid,
- server->hostname, 0, 0);
+ server->conn_id, server->hostname, scredits,
+ credits->value - new_val, in_flight);
cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
credits->value - new_val);
return -EAGAIN;
@@ -258,15 +286,18 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
server->credits += credits->value - new_val;
scredits = server->credits;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- credits->value = new_val;
trace_smb3_add_credits(server->CurrentMid,
- server->hostname, scredits, credits->value - new_val);
+ server->conn_id, server->hostname, scredits,
+ credits->value - new_val, in_flight);
cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
__func__, credits->value - new_val, scredits);
+ credits->value = new_val;
+
return 0;
}
@@ -1164,7 +1195,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
struct TCP_Server_Info *server = cifs_pick_channel(ses);
__le16 *utf16_path = NULL;
int ea_name_len = strlen(ea_name);
- int flags = 0;
+ int flags = CIFS_CP_CREATE_CLOSE_OP;
int len;
struct smb_rqst rqst[3];
int resp_buftype[3];
@@ -1542,7 +1573,7 @@ smb2_ioctl_query_info(const unsigned int xid,
struct smb_query_info qi;
struct smb_query_info __user *pqi;
int rc = 0;
- int flags = 0;
+ int flags = CIFS_CP_CREATE_CLOSE_OP;
struct smb2_query_info_rsp *qi_rsp = NULL;
struct smb2_ioctl_rsp *io_rsp = NULL;
void *buffer = NULL;
@@ -2369,7 +2400,7 @@ static bool
smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
{
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
- int scredits;
+ int scredits, in_flight;
if (shdr->Status != STATUS_PENDING)
return false;
@@ -2378,11 +2409,13 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
spin_lock(&server->req_lock);
server->credits += le16_to_cpu(shdr->CreditRequest);
scredits = server->credits;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
trace_smb3_add_credits(server->CurrentMid,
- server->hostname, scredits, le16_to_cpu(shdr->CreditRequest));
+ server->conn_id, server->hostname, scredits,
+ le16_to_cpu(shdr->CreditRequest), in_flight);
cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
__func__, le16_to_cpu(shdr->CreditRequest), scredits);
}
@@ -2418,6 +2451,34 @@ smb2_is_status_io_timeout(char *buf)
return false;
}
+static void
+smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
+{
+ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
+ struct list_head *tmp, *tmp1;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+ if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
+ return;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each(tmp, &server->smb_ses_list) {
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+ list_for_each(tmp1, &ses->tcon_list) {
+ tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+ if (tcon->tid == shdr->TreeId) {
+ tcon->need_reconnect = true;
+ spin_unlock(&cifs_tcp_ses_lock);
+ pr_warn_once("Server share %s deleted.\n",
+ tcon->treeName);
+ return;
+ }
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+}
+
static int
smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
struct cifsInodeInfo *cinode)
@@ -2516,7 +2577,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
{
struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *server = cifs_pick_channel(ses);
- int flags = 0;
+ int flags = CIFS_CP_CREATE_CLOSE_OP;
struct smb_rqst rqst[3];
int resp_buftype[3];
struct kvec rsp_iov[3];
@@ -2914,7 +2975,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int sub_offset;
unsigned int print_len;
unsigned int print_offset;
- int flags = 0;
+ int flags = CIFS_CP_CREATE_CLOSE_OP;
struct smb_rqst rqst[3];
int resp_buftype[3];
struct kvec rsp_iov[3];
@@ -3096,7 +3157,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_fid fid;
struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
- int flags = 0;
+ int flags = CIFS_CP_CREATE_CLOSE_OP;
struct smb_rqst rqst[3];
int resp_buftype[3];
struct kvec rsp_iov[3];
@@ -4605,6 +4666,10 @@ static void smb2_decrypt_offload(struct work_struct *work)
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
+ if (dw->server->ops->is_network_name_deleted)
+ dw->server->ops->is_network_name_deleted(dw->buf,
+ dw->server);
+
mid->callback(mid);
} else {
spin_lock(&GlobalMid_Lock);
@@ -4723,6 +4788,12 @@ non_offloaded_decrypt:
rc = handle_read_data(server, *mid, buf,
server->vals->read_rsp_size,
pages, npages, len, false);
+ if (rc >= 0) {
+ if (server->ops->is_network_name_deleted) {
+ server->ops->is_network_name_deleted(buf,
+ server);
+ }
+ }
}
free_pages:
@@ -5072,6 +5143,7 @@ struct smb_version_operations smb20_operations = {
.fiemap = smb3_fiemap,
.llseek = smb3_llseek,
.is_status_io_timeout = smb2_is_status_io_timeout,
+ .is_network_name_deleted = smb2_is_network_name_deleted,
};
struct smb_version_operations smb21_operations = {
@@ -5173,6 +5245,7 @@ struct smb_version_operations smb21_operations = {
.fiemap = smb3_fiemap,
.llseek = smb3_llseek,
.is_status_io_timeout = smb2_is_status_io_timeout,
+ .is_network_name_deleted = smb2_is_network_name_deleted,
};
struct smb_version_operations smb30_operations = {
@@ -5286,6 +5359,7 @@ struct smb_version_operations smb30_operations = {
.fiemap = smb3_fiemap,
.llseek = smb3_llseek,
.is_status_io_timeout = smb2_is_status_io_timeout,
+ .is_network_name_deleted = smb2_is_network_name_deleted,
};
struct smb_version_operations smb311_operations = {
@@ -5399,6 +5473,7 @@ struct smb_version_operations smb311_operations = {
.fiemap = smb3_fiemap,
.llseek = smb3_llseek,
.is_status_io_timeout = smb2_is_status_io_timeout,
+ .is_network_name_deleted = smb2_is_network_name_deleted,
};
struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 794fc3b68b4f..2199a9bfae8f 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -814,8 +814,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
SMB3ANY_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
- req->DialectCount = cpu_to_le16(2);
- total_len += 4;
+ req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
+ req->DialectCount = cpu_to_le16(3);
+ total_len += 6;
} else if (strcmp(server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
@@ -849,6 +850,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
SMB2_CLIENT_GUID_SIZE);
if ((server->vals->protocol_id == SMB311_PROT_ID) ||
(strcmp(server->vals->version_string,
+ SMB3ANY_VERSION_STRING) == 0) ||
+ (strcmp(server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0))
assemble_neg_contexts(req, server, &total_len);
}
@@ -883,6 +886,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
cifs_server_dbg(VFS,
"SMB2.1 dialect returned but not requested\n");
return -EIO;
+ } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
+ /* ops set to 3.0 by default for default so update */
+ server->ops = &smb311_operations;
+ server->vals = &smb311_values;
}
} else if (strcmp(server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
@@ -1042,10 +1049,11 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
SMB3ANY_VERSION_STRING) == 0) {
pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
- pneg_inbuf->DialectCount = cpu_to_le16(2);
- /* structure is big enough for 3 dialects, sending only 2 */
+ pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
+ pneg_inbuf->DialectCount = cpu_to_le16(3);
+ /* SMB 2.1 not included so subtract one dialect from len */
inbuflen = sizeof(*pneg_inbuf) -
- (2 * sizeof(pneg_inbuf->Dialects[0]));
+ (sizeof(pneg_inbuf->Dialects[0]));
} else if (strcmp(server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
@@ -1053,7 +1061,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
pneg_inbuf->DialectCount = cpu_to_le16(4);
- /* structure is big enough for 3 dialects */
+ /* structure is big enough for 4 dialects */
inbuflen = sizeof(*pneg_inbuf);
} else {
/* otherwise specific dialect was requested */
@@ -1253,7 +1261,7 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
cifs_ses_server(sess_data->ses),
&rqst,
&sess_data->buf0_type,
- CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
+ CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
cifs_small_buf_release(sess_data->iov[0].iov_base);
memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
@@ -4033,8 +4041,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
if (rdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
SMB2_MAX_BUFFER_SIZE));
- shdr->CreditRequest =
- cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
+ shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
if (rc)
@@ -4340,8 +4347,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
if (wdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
SMB2_MAX_BUFFER_SIZE));
- shdr->CreditRequest =
- cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
+ shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
rc = adjust_credits(server, &wdata->credits, wdata->bytes);
if (rc)
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 9565e27681a5..a2eb34a8d9c9 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -246,8 +246,7 @@ extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
__u64 persistent_fid,
__u64 volatile_fid);
-extern int smb2_handle_cancelled_mid(char *buffer,
- struct TCP_Server_Info *server);
+extern int smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server);
void smb2_cancelled_close_fid(struct work_struct *work);
extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index c3d1a584f251..d6df908dccad 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -851,17 +851,21 @@ DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
DECLARE_EVENT_CLASS(smb3_reconnect_class,
TP_PROTO(__u64 currmid,
+ __u64 conn_id,
char *hostname),
- TP_ARGS(currmid, hostname),
+ TP_ARGS(currmid, conn_id, hostname),
TP_STRUCT__entry(
__field(__u64, currmid)
+ __field(__u64, conn_id)
__field(char *, hostname)
),
TP_fast_assign(
__entry->currmid = currmid;
+ __entry->conn_id = conn_id;
__entry->hostname = hostname;
),
- TP_printk("server=%s current_mid=0x%llx",
+ TP_printk("conn_id=0x%llx server=%s current_mid=%llu",
+ __entry->conn_id,
__entry->hostname,
__entry->currmid)
)
@@ -869,44 +873,56 @@ DECLARE_EVENT_CLASS(smb3_reconnect_class,
#define DEFINE_SMB3_RECONNECT_EVENT(name) \
DEFINE_EVENT(smb3_reconnect_class, smb3_##name, \
TP_PROTO(__u64 currmid, \
- char *hostname), \
- TP_ARGS(currmid, hostname))
+ __u64 conn_id, \
+ char *hostname), \
+ TP_ARGS(currmid, conn_id, hostname))
DEFINE_SMB3_RECONNECT_EVENT(reconnect);
DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
DECLARE_EVENT_CLASS(smb3_credit_class,
TP_PROTO(__u64 currmid,
+ __u64 conn_id,
char *hostname,
int credits,
- int credits_to_add),
- TP_ARGS(currmid, hostname, credits, credits_to_add),
+ int credits_to_add,
+ int in_flight),
+ TP_ARGS(currmid, conn_id, hostname, credits, credits_to_add, in_flight),
TP_STRUCT__entry(
__field(__u64, currmid)
+ __field(__u64, conn_id)
__field(char *, hostname)
__field(int, credits)
__field(int, credits_to_add)
+ __field(int, in_flight)
),
TP_fast_assign(
__entry->currmid = currmid;
+ __entry->conn_id = conn_id;
__entry->hostname = hostname;
__entry->credits = credits;
__entry->credits_to_add = credits_to_add;
+ __entry->in_flight = in_flight;
),
- TP_printk("server=%s current_mid=0x%llx credits=%d credits_to_add=%d",
+ TP_printk("conn_id=0x%llx server=%s current_mid=%llu "
+ "credits=%d credit_change=%d in_flight=%d",
+ __entry->conn_id,
__entry->hostname,
__entry->currmid,
__entry->credits,
- __entry->credits_to_add)
+ __entry->credits_to_add,
+ __entry->in_flight)
)
#define DEFINE_SMB3_CREDIT_EVENT(name) \
DEFINE_EVENT(smb3_credit_class, smb3_##name, \
TP_PROTO(__u64 currmid, \
+ __u64 conn_id, \
char *hostname, \
int credits, \
- int credits_to_add), \
- TP_ARGS(currmid, hostname, credits, credits_to_add))
+ int credits_to_add, \
+ int in_flight), \
+ TP_ARGS(currmid, conn_id, hostname, credits, credits_to_add, in_flight))
DEFINE_SMB3_CREDIT_EVENT(reconnect_with_invalid_credits);
DEFINE_SMB3_CREDIT_EVENT(reconnect_detected);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 4a2b836eb017..c1725b55f364 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -101,7 +101,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
midEntry->mid_state == MID_RESPONSE_RECEIVED &&
server->ops->handle_cancelled_mid)
- server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
+ server->ops->handle_cancelled_mid(midEntry, server);
midEntry->mid_state = MID_FREE;
atomic_dec(&midCount);
@@ -445,7 +445,7 @@ unmask:
*/
server->tcpStatus = CifsNeedReconnect;
trace_smb3_partial_send_reconnect(server->CurrentMid,
- server->hostname);
+ server->conn_id, server->hostname);
}
smbd_done:
if (rc < 0 && rc != -EINTR)
@@ -527,7 +527,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
int *credits;
int optype;
long int t;
- int scredits = server->credits;
+ int scredits, in_flight;
if (timeout < 0)
t = MAX_JIFFY_OFFSET;
@@ -551,23 +551,39 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
server->max_in_flight = server->in_flight;
*credits -= 1;
*instance = server->reconnect_instance;
+ scredits = *credits;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
+
+ trace_smb3_add_credits(server->CurrentMid,
+ server->conn_id, server->hostname, scredits, -1, in_flight);
+ cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
+ __func__, 1, scredits);
+
return 0;
}
while (1) {
if (*credits < num_credits) {
+ scredits = *credits;
spin_unlock(&server->req_lock);
+
cifs_num_waiters_inc(server);
rc = wait_event_killable_timeout(server->request_q,
has_credits(server, credits, num_credits), t);
cifs_num_waiters_dec(server);
if (!rc) {
+ spin_lock(&server->req_lock);
+ scredits = *credits;
+ in_flight = server->in_flight;
+ spin_unlock(&server->req_lock);
+
trace_smb3_credit_timeout(server->CurrentMid,
- server->hostname, num_credits, 0);
+ server->conn_id, server->hostname, scredits,
+ num_credits, in_flight);
cifs_server_dbg(VFS, "wait timed out after %d ms\n",
- timeout);
- return -ENOTSUPP;
+ timeout);
+ return -EBUSY;
}
if (rc == -ERESTARTSYS)
return -ERESTARTSYS;
@@ -595,6 +611,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
server->in_flight > 2 * MAX_COMPOUND &&
*credits <= MAX_COMPOUND) {
spin_unlock(&server->req_lock);
+
cifs_num_waiters_inc(server);
rc = wait_event_killable_timeout(
server->request_q,
@@ -603,13 +620,18 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
t);
cifs_num_waiters_dec(server);
if (!rc) {
+ spin_lock(&server->req_lock);
+ scredits = *credits;
+ in_flight = server->in_flight;
+ spin_unlock(&server->req_lock);
+
trace_smb3_credit_timeout(
- server->CurrentMid,
- server->hostname, num_credits,
- 0);
+ server->CurrentMid,
+ server->conn_id, server->hostname,
+ scredits, num_credits, in_flight);
cifs_server_dbg(VFS, "wait timed out after %d ms\n",
- timeout);
- return -ENOTSUPP;
+ timeout);
+ return -EBUSY;
}
if (rc == -ERESTARTSYS)
return -ERESTARTSYS;
@@ -625,16 +647,18 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
/* update # of requests on the wire to server */
if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
*credits -= num_credits;
- scredits = *credits;
server->in_flight += num_credits;
if (server->in_flight > server->max_in_flight)
server->max_in_flight = server->in_flight;
*instance = server->reconnect_instance;
}
+ scredits = *credits;
+ in_flight = server->in_flight;
spin_unlock(&server->req_lock);
trace_smb3_add_credits(server->CurrentMid,
- server->hostname, scredits, -(num_credits));
+ server->conn_id, server->hostname, scredits,
+ -(num_credits), in_flight);
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
__func__, num_credits, scredits);
break;
@@ -656,13 +680,13 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
const int flags, unsigned int *instance)
{
int *credits;
- int scredits, sin_flight;
+ int scredits, in_flight;
credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
spin_lock(&server->req_lock);
scredits = *credits;
- sin_flight = server->in_flight;
+ in_flight = server->in_flight;
if (*credits < num) {
/*
@@ -684,10 +708,11 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
if (server->in_flight == 0) {
spin_unlock(&server->req_lock);
trace_smb3_insufficient_credits(server->CurrentMid,
- server->hostname, scredits, sin_flight);
+ server->conn_id, server->hostname, scredits,
+ num, in_flight);
cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
- __func__, sin_flight, num, scredits);
- return -ENOTSUPP;
+ __func__, in_flight, num, scredits);
+ return -EDEADLK;
}
}
spin_unlock(&server->req_lock);
@@ -1171,9 +1196,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
+ mutex_lock(&server->srv_mutex);
smb311_update_preauth_hash(ses, rqst[0].rq_iov,
rqst[0].rq_nvec);
+ mutex_unlock(&server->srv_mutex);
+ }
for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(server, midQ[i]);
@@ -1182,7 +1210,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
if (rc != 0) {
for (; i < num_rqst; i++) {
- cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
+ cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock);
@@ -1236,12 +1264,14 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
struct kvec iov = {
.iov_base = resp_iov[0].iov_base,
.iov_len = resp_iov[0].iov_len
};
+ mutex_lock(&server->srv_mutex);
smb311_update_preauth_hash(ses, &iov, 1);
+ mutex_unlock(&server->srv_mutex);
}
out:
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 6b658a1172ef..41a611e76bb7 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -101,6 +101,7 @@ static int cifs_creation_time_set(unsigned int xid, struct cifs_tcon *pTcon,
}
static int cifs_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h
index d5ebd36fb2cc..e7b27754ce78 100644
--- a/fs/coda/coda_linux.h
+++ b/fs/coda/coda_linux.h
@@ -46,10 +46,12 @@ extern const struct file_operations coda_ioctl_operations;
/* operations shared over more than one file */
int coda_open(struct inode *i, struct file *f);
int coda_release(struct inode *i, struct file *f);
-int coda_permission(struct inode *inode, int mask);
+int coda_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask);
int coda_revalidate_inode(struct inode *);
-int coda_getattr(const struct path *, struct kstat *, u32, unsigned int);
-int coda_setattr(struct dentry *, struct iattr *);
+int coda_getattr(struct user_namespace *, const struct path *, struct kstat *,
+ u32, unsigned int);
+int coda_setattr(struct user_namespace *, struct dentry *, struct iattr *);
/* this file: heloers */
char *coda_f2s(struct CodaFid *f);
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index ca40c2556ba6..d69989c1bac3 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -73,7 +73,8 @@ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, unsig
}
-int coda_permission(struct inode *inode, int mask)
+int coda_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
int error;
@@ -132,7 +133,8 @@ static inline void coda_dir_drop_nlink(struct inode *dir)
}
/* creation routines: create, mknod, mkdir, link, symlink */
-static int coda_create(struct inode *dir, struct dentry *de, umode_t mode, bool excl)
+static int coda_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *de, umode_t mode, bool excl)
{
int error;
const char *name=de->d_name.name;
@@ -164,7 +166,8 @@ err_out:
return error;
}
-static int coda_mkdir(struct inode *dir, struct dentry *de, umode_t mode)
+static int coda_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *de, umode_t mode)
{
struct inode *inode;
struct coda_vattr attrs;
@@ -225,7 +228,8 @@ static int coda_link(struct dentry *source_de, struct inode *dir_inode,
}
-static int coda_symlink(struct inode *dir_inode, struct dentry *de,
+static int coda_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir_inode, struct dentry *de,
const char *symname)
{
const char *name = de->d_name.name;
@@ -291,9 +295,9 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
}
/* rename */
-static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int coda_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
const char *old_name = old_dentry->d_name.name;
const char *new_name = new_dentry->d_name.name;
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index b1c70e2b9b1e..d9f1bd7153df 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -251,16 +251,17 @@ static void coda_evict_inode(struct inode *inode)
coda_cache_clear_inode(inode);
}
-int coda_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int coda_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
int err = coda_revalidate_inode(d_inode(path->dentry));
if (!err)
- generic_fillattr(d_inode(path->dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
return err;
}
-int coda_setattr(struct dentry *de, struct iattr *iattr)
+int coda_setattr(struct user_namespace *mnt_userns, struct dentry *de,
+ struct iattr *iattr)
{
struct inode *inode = d_inode(de);
struct coda_vattr vattr;
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 3aec27e5eb82..cb9fd59a688c 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -24,7 +24,8 @@
#include "coda_linux.h"
/* pioctl ops */
-static int coda_ioctl_permission(struct inode *inode, int mask);
+static int coda_ioctl_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask);
static long coda_pioctl(struct file *filp, unsigned int cmd,
unsigned long user_data);
@@ -40,7 +41,8 @@ const struct file_operations coda_ioctl_operations = {
};
/* the coda pioctl inode ops */
-static int coda_ioctl_permission(struct inode *inode, int mask)
+static int coda_ioctl_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
return (mask & MAY_EXEC) ? -EACCES : 0;
}
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index 2c557229696a..95e72d271b95 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -50,6 +50,7 @@
* which requires asm/elf.h to define compat_elf_gregset_t et al.
*/
#define elf_prstatus compat_elf_prstatus
+#define elf_prstatus_common compat_elf_prstatus_common
#define elf_prpsinfo compat_elf_prpsinfo
#undef ns_to_kernel_old_timeval
@@ -61,7 +62,6 @@
* differ from the native ones, or omitted when they match.
*/
-#undef ELF_ARCH
#undef elf_check_arch
#define elf_check_arch compat_elf_check_arch
@@ -90,11 +90,6 @@
#define ELF_ET_DYN_BASE COMPAT_ELF_ET_DYN_BASE
#endif
-#ifdef COMPAT_ELF_EXEC_PAGESIZE
-#undef ELF_EXEC_PAGESIZE
-#define ELF_EXEC_PAGESIZE COMPAT_ELF_EXEC_PAGESIZE
-#endif
-
#ifdef COMPAT_ELF_PLAT_INIT
#undef ELF_PLAT_INIT
#define ELF_PLAT_INIT COMPAT_ELF_PLAT_INIT
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 22dce2d35a4b..9a3aed249692 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -79,7 +79,8 @@ extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
extern void configfs_drop_dentry(struct configfs_dirent *sd, struct dentry *parent);
-extern int configfs_setattr(struct dentry *dentry, struct iattr *iattr);
+extern int configfs_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *iattr);
extern struct dentry *configfs_pin_fs(void);
extern void configfs_release_fs(void);
@@ -92,7 +93,8 @@ extern const struct inode_operations configfs_root_inode_operations;
extern const struct inode_operations configfs_symlink_inode_operations;
extern const struct dentry_operations configfs_dentry_ops;
-extern int configfs_symlink(struct inode *dir, struct dentry *dentry,
+extern int configfs_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
const char *symname);
extern int configfs_unlink(struct inode *dir, struct dentry *dentry);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index b839dd1b459f..b6098e02e20b 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1268,7 +1268,8 @@ out_root_unlock:
}
EXPORT_SYMBOL(configfs_depend_item_unlocked);
-static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int configfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int ret = 0;
int module_got = 0;
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 1f0270229d7b..da8351d1e455 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -378,7 +378,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
attr = to_attr(dentry);
if (!attr)
- goto out_put_item;
+ goto out_free_buffer;
if (type & CONFIGFS_ITEM_BIN_ATTR) {
buffer->bin_attr = to_bin_attr(dentry);
@@ -391,7 +391,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
/* Grab the module reference for this attribute if we have one */
error = -ENODEV;
if (!try_module_get(buffer->owner))
- goto out_put_item;
+ goto out_free_buffer;
error = -EACCES;
if (!buffer->item->ci_type)
@@ -435,8 +435,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
out_put_module:
module_put(buffer->owner);
-out_put_item:
- config_item_put(buffer->item);
out_free_buffer:
up_read(&frag->frag_sem);
kfree(buffer);
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 8bd6a883c94c..42c348bb2903 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -40,7 +40,8 @@ static const struct inode_operations configfs_inode_operations ={
.setattr = configfs_setattr,
};
-int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
+int configfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
struct inode * inode = d_inode(dentry);
struct configfs_dirent * sd = dentry->d_fsdata;
@@ -67,7 +68,7 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
}
/* attributes were changed atleast once in past */
- error = simple_setattr(dentry, iattr);
+ error = simple_setattr(mnt_userns, dentry, iattr);
if (error)
return error;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index cb61467478ca..77c854364e60 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -139,7 +139,8 @@ static int get_target(const char *symname, struct path *path,
}
-int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+int configfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
int ret;
struct path path;
@@ -197,7 +198,8 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
if (dentry->d_inode || d_unhashed(dentry))
ret = -EEXIST;
else
- ret = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ ret = inode_permission(&init_user_ns, dir,
+ MAY_WRITE | MAY_EXEC);
if (!ret)
ret = type->ct_item_ops->allow_link(parent_item, target_item);
if (!ret) {
diff --git a/fs/coredump.c b/fs/coredump.c
index a2f6ecc8e345..1c0fdc1aa70b 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -703,6 +703,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
goto close_fail;
}
} else {
+ struct user_namespace *mnt_userns;
struct inode *inode;
int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
O_LARGEFILE | O_EXCL;
@@ -780,13 +781,15 @@ void do_coredump(const kernel_siginfo_t *siginfo)
* a process dumps core while its cwd is e.g. on a vfat
* filesystem.
*/
- if (!uid_eq(inode->i_uid, current_fsuid()))
+ mnt_userns = file_mnt_user_ns(cprm.file);
+ if (!uid_eq(i_uid_into_mnt(mnt_userns, inode), current_fsuid()))
goto close_fail;
if ((inode->i_mode & 0677) != 0600)
goto close_fail;
if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
goto close_fail;
- if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
+ if (do_truncate(mnt_userns, cprm.file->f_path.dentry,
+ 0, 0, cprm.file))
goto close_fail;
}
@@ -894,10 +897,10 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
*/
page = get_dump_page(addr);
if (page) {
- void *kaddr = kmap(page);
+ void *kaddr = kmap_local_page(page);
stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
- kunmap(page);
+ kunmap_local(kaddr);
put_page(page);
} else {
stop = !dump_skip(cprm, PAGE_SIZE);
@@ -931,7 +934,8 @@ void dump_truncate(struct coredump_params *cprm)
if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
offset = file->f_op->llseek(file, 0, SEEK_CUR);
if (i_size_read(file->f_mapping->host) < offset)
- do_truncate(file->f_path.dentry, offset, 0, file);
+ do_truncate(file_mnt_user_ns(file), file->f_path.dentry,
+ offset, 0, file);
}
}
EXPORT_SYMBOL(dump_truncate);
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 4b90cfd1ec36..2be65269a987 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -392,8 +392,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
/* Don't map the last page if it contains some other data */
if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) {
- pr_debug("mmap: %s: last page is shared\n",
- file_dentry(file)->d_name.name);
+ pr_debug("mmap: %pD: last page is shared\n", file);
pages--;
}
@@ -430,16 +429,15 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
}
if (!ret)
- pr_debug("mapped %s[%lu] at 0x%08lx (%u/%lu pages) "
- "to vma 0x%08lx, page_prot 0x%llx\n",
- file_dentry(file)->d_name.name, pgoff,
- address, pages, vma_pages(vma), vma->vm_start,
+ pr_debug("mapped %pD[%lu] at 0x%08lx (%u/%lu pages) "
+ "to vma 0x%08lx, page_prot 0x%llx\n", file,
+ pgoff, address, pages, vma_pages(vma), vma->vm_start,
(unsigned long long)pgprot_val(vma->vm_page_prot));
return ret;
bailout:
- pr_debug("%s[%lu]: direct mmap impossible: %s\n",
- file_dentry(file)->d_name.name, pgoff, bailout_reason);
+ pr_debug("%pD[%lu]: direct mmap impossible: %s\n",
+ file, pgoff, bailout_reason);
/* Didn't manage any direct map, but normal paging is still possible */
return 0;
}
@@ -469,8 +467,8 @@ static unsigned long cramfs_physmem_get_unmapped_area(struct file *file,
if (!offset || block_pages != pages)
return -ENOSYS;
addr = sbi->linear_phys_addr + offset;
- pr_debug("get_unmapped for %s ofs %#lx siz %lu at 0x%08lx\n",
- file_dentry(file)->d_name.name, pgoff*PAGE_SIZE, len, addr);
+ pr_debug("get_unmapped for %pD ofs %#lx siz %lu at 0x%08lx\n",
+ file, pgoff*PAGE_SIZE, len, addr);
return addr;
}
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index b048a0e38516..68a2de6b5a9b 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -52,7 +52,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
int num_pages = 0;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
- bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
+ bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
while (len) {
unsigned int blocks_this_page = min(len, blocks_per_page);
@@ -74,7 +74,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
len -= blocks_this_page;
lblk += blocks_this_page;
pblk += blocks_this_page;
- if (num_pages == BIO_MAX_PAGES || !len ||
+ if (num_pages == BIO_MAX_VECS || !len ||
!fscrypt_mergeable_bio(bio, inode, lblk)) {
err = submit_bio_wait(bio);
if (err)
@@ -126,7 +126,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk,
len);
- BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES);
+ BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
(len + blocks_per_page - 1) >> blocks_per_page_bits);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index a51cef6bd27f..ed3d623724cd 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -465,7 +465,7 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
return -EFAULT;
policy.version = version;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
ret = mnt_want_write_file(filp);
diff --git a/fs/dax.c b/fs/dax.c
index 26d5dcd2d69e..b3d27fdc6775 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -810,11 +810,12 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
address = pgoff_address(index, vma);
/*
- * Note because we provide range to follow_pte it will call
+ * follow_invalidate_pte() will use the range to call
* mmu_notifier_invalidate_range_start() on our behalf before
* taking any lock.
*/
- if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl))
+ if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
+ &pmdp, &ptl))
continue;
/*
diff --git a/fs/dcache.c b/fs/dcache.c
index 97e81a844a96..7d24ff7eb206 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -456,23 +456,6 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
list_lru_isolate_move(lru, &dentry->d_lru, list);
}
-/**
- * d_drop - drop a dentry
- * @dentry: dentry to drop
- *
- * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
- * be found through a VFS lookup any more. Note that this is different from
- * deleting the dentry - d_delete will try to mark the dentry negative if
- * possible, giving a successful _negative_ lookup, while d_drop will
- * just make the cache lookup fail.
- *
- * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
- * reason (NFS timeouts or autofs deletes).
- *
- * __d_drop requires dentry->d_lock
- * ___d_drop doesn't mark dentry as "unhashed"
- * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
- */
static void ___d_drop(struct dentry *dentry)
{
struct hlist_bl_head *b;
@@ -501,6 +484,24 @@ void __d_drop(struct dentry *dentry)
}
EXPORT_SYMBOL(__d_drop);
+/**
+ * d_drop - drop a dentry
+ * @dentry: dentry to drop
+ *
+ * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
+ * be found through a VFS lookup any more. Note that this is different from
+ * deleting the dentry - d_delete will try to mark the dentry negative if
+ * possible, giving a successful _negative_ lookup, while d_drop will
+ * just make the cache lookup fail.
+ *
+ * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
+ * reason (NFS timeouts or autofs deletes).
+ *
+ * __d_drop requires dentry->d_lock
+ *
+ * ___d_drop doesn't mark dentry as "unhashed"
+ * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
+ */
void d_drop(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
@@ -996,20 +997,6 @@ struct dentry *d_find_any_alias(struct inode *inode)
}
EXPORT_SYMBOL(d_find_any_alias);
-/**
- * d_find_alias - grab a hashed alias of inode
- * @inode: inode in question
- *
- * If inode has a hashed alias, or is a directory and has any alias,
- * acquire the reference to alias and return it. Otherwise return NULL.
- * Notice that if inode is a directory there can be only one alias and
- * it can be unhashed only if it has no children, or if it is the root
- * of a filesystem, or if the directory was renamed and d_revalidate
- * was the first vfs operation to notice.
- *
- * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
- * any other hashed alias over that one.
- */
static struct dentry *__d_find_alias(struct inode *inode)
{
struct dentry *alias;
@@ -1029,6 +1016,20 @@ static struct dentry *__d_find_alias(struct inode *inode)
return NULL;
}
+/**
+ * d_find_alias - grab a hashed alias of inode
+ * @inode: inode in question
+ *
+ * If inode has a hashed alias, or is a directory and has any alias,
+ * acquire the reference to alias and return it. Otherwise return NULL.
+ * Notice that if inode is a directory there can be only one alias and
+ * it can be unhashed only if it has no children, or if it is the root
+ * of a filesystem, or if the directory was renamed and d_revalidate
+ * was the first vfs operation to notice.
+ *
+ * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
+ * any other hashed alias over that one.
+ */
struct dentry *d_find_alias(struct inode *inode)
{
struct dentry *de = NULL;
@@ -1043,6 +1044,31 @@ struct dentry *d_find_alias(struct inode *inode)
EXPORT_SYMBOL(d_find_alias);
/*
+ * Caller MUST be holding rcu_read_lock() and be guaranteed
+ * that inode won't get freed until rcu_read_unlock().
+ */
+struct dentry *d_find_alias_rcu(struct inode *inode)
+{
+ struct hlist_head *l = &inode->i_dentry;
+ struct dentry *de = NULL;
+
+ spin_lock(&inode->i_lock);
+ // ->i_dentry and ->i_rcu are colocated, but the latter won't be
+ // used without having I_FREEING set, which means no aliases left
+ if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) {
+ if (S_ISDIR(inode->i_mode)) {
+ de = hlist_entry(l->first, struct dentry, d_u.d_alias);
+ } else {
+ hlist_for_each_entry(de, l, d_u.d_alias)
+ if (!d_unhashed(de))
+ break;
+ }
+ }
+ spin_unlock(&inode->i_lock);
+ return de;
+}
+
+/*
* Try to kill dentries associated with this inode.
* WARNING: you must own a reference to inode.
*/
@@ -2150,8 +2176,8 @@ EXPORT_SYMBOL(d_obtain_root);
* same inode, only the actual correct case is stored in the dcache for
* case-insensitive filesystems.
*
- * For a case-insensitive lookup match and if the the case-exact dentry
- * already exists in in the dcache, use it and return it.
+ * For a case-insensitive lookup match and if the case-exact dentry
+ * already exists in the dcache, use it and return it.
*
* If no entry exists with the exact case name, allocate new dentry with
* the exact case, and return the spliced entry.
diff --git a/fs/dcookies.c b/fs/dcookies.c
deleted file mode 100644
index 6eeb61100a09..000000000000
--- a/fs/dcookies.c
+++ /dev/null
@@ -1,356 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * dcookies.c
- *
- * Copyright 2002 John Levon <levon@movementarian.org>
- *
- * Persistent cookie-path mappings. These are used by
- * profilers to convert a per-task EIP value into something
- * non-transitory that can be processed at a later date.
- * This is done by locking the dentry/vfsmnt pair in the
- * kernel until released by the tasks needing the persistent
- * objects. The tag is simply an unsigned long that refers
- * to the pair and can be looked up from userspace.
- */
-
-#include <linux/syscalls.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/mount.h>
-#include <linux/capability.h>
-#include <linux/dcache.h>
-#include <linux/mm.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/dcookies.h>
-#include <linux/mutex.h>
-#include <linux/path.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-
-/* The dcookies are allocated from a kmem_cache and
- * hashed onto a small number of lists. None of the
- * code here is particularly performance critical
- */
-struct dcookie_struct {
- struct path path;
- struct list_head hash_list;
-};
-
-static LIST_HEAD(dcookie_users);
-static DEFINE_MUTEX(dcookie_mutex);
-static struct kmem_cache *dcookie_cache __read_mostly;
-static struct list_head *dcookie_hashtable __read_mostly;
-static size_t hash_size __read_mostly;
-
-static inline int is_live(void)
-{
- return !(list_empty(&dcookie_users));
-}
-
-
-/* The dentry is locked, its address will do for the cookie */
-static inline unsigned long dcookie_value(struct dcookie_struct * dcs)
-{
- return (unsigned long)dcs->path.dentry;
-}
-
-
-static size_t dcookie_hash(unsigned long dcookie)
-{
- return (dcookie >> L1_CACHE_SHIFT) & (hash_size - 1);
-}
-
-
-static struct dcookie_struct * find_dcookie(unsigned long dcookie)
-{
- struct dcookie_struct *found = NULL;
- struct dcookie_struct * dcs;
- struct list_head * pos;
- struct list_head * list;
-
- list = dcookie_hashtable + dcookie_hash(dcookie);
-
- list_for_each(pos, list) {
- dcs = list_entry(pos, struct dcookie_struct, hash_list);
- if (dcookie_value(dcs) == dcookie) {
- found = dcs;
- break;
- }
- }
-
- return found;
-}
-
-
-static void hash_dcookie(struct dcookie_struct * dcs)
-{
- struct list_head * list = dcookie_hashtable + dcookie_hash(dcookie_value(dcs));
- list_add(&dcs->hash_list, list);
-}
-
-
-static struct dcookie_struct *alloc_dcookie(const struct path *path)
-{
- struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache,
- GFP_KERNEL);
- struct dentry *d;
- if (!dcs)
- return NULL;
-
- d = path->dentry;
- spin_lock(&d->d_lock);
- d->d_flags |= DCACHE_COOKIE;
- spin_unlock(&d->d_lock);
-
- dcs->path = *path;
- path_get(path);
- hash_dcookie(dcs);
- return dcs;
-}
-
-
-/* This is the main kernel-side routine that retrieves the cookie
- * value for a dentry/vfsmnt pair.
- */
-int get_dcookie(const struct path *path, unsigned long *cookie)
-{
- int err = 0;
- struct dcookie_struct * dcs;
-
- mutex_lock(&dcookie_mutex);
-
- if (!is_live()) {
- err = -EINVAL;
- goto out;
- }
-
- if (path->dentry->d_flags & DCACHE_COOKIE) {
- dcs = find_dcookie((unsigned long)path->dentry);
- } else {
- dcs = alloc_dcookie(path);
- if (!dcs) {
- err = -ENOMEM;
- goto out;
- }
- }
-
- *cookie = dcookie_value(dcs);
-
-out:
- mutex_unlock(&dcookie_mutex);
- return err;
-}
-
-
-/* And here is where the userspace process can look up the cookie value
- * to retrieve the path.
- */
-static int do_lookup_dcookie(u64 cookie64, char __user *buf, size_t len)
-{
- unsigned long cookie = (unsigned long)cookie64;
- int err = -EINVAL;
- char * kbuf;
- char * path;
- size_t pathlen;
- struct dcookie_struct * dcs;
-
- /* we could leak path information to users
- * without dir read permission without this
- */
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- mutex_lock(&dcookie_mutex);
-
- if (!is_live()) {
- err = -EINVAL;
- goto out;
- }
-
- if (!(dcs = find_dcookie(cookie)))
- goto out;
-
- err = -ENOMEM;
- kbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!kbuf)
- goto out;
-
- /* FIXME: (deleted) ? */
- path = d_path(&dcs->path, kbuf, PAGE_SIZE);
-
- mutex_unlock(&dcookie_mutex);
-
- if (IS_ERR(path)) {
- err = PTR_ERR(path);
- goto out_free;
- }
-
- err = -ERANGE;
-
- pathlen = kbuf + PAGE_SIZE - path;
- if (pathlen <= len) {
- err = pathlen;
- if (copy_to_user(buf, path, pathlen))
- err = -EFAULT;
- }
-
-out_free:
- kfree(kbuf);
- return err;
-out:
- mutex_unlock(&dcookie_mutex);
- return err;
-}
-
-SYSCALL_DEFINE3(lookup_dcookie, u64, cookie64, char __user *, buf, size_t, len)
-{
- return do_lookup_dcookie(cookie64, buf, len);
-}
-
-#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE4(lookup_dcookie, u32, w0, u32, w1, char __user *, buf, compat_size_t, len)
-{
-#ifdef __BIG_ENDIAN
- return do_lookup_dcookie(((u64)w0 << 32) | w1, buf, len);
-#else
- return do_lookup_dcookie(((u64)w1 << 32) | w0, buf, len);
-#endif
-}
-#endif
-
-static int dcookie_init(void)
-{
- struct list_head * d;
- unsigned int i, hash_bits;
- int err = -ENOMEM;
-
- dcookie_cache = kmem_cache_create("dcookie_cache",
- sizeof(struct dcookie_struct),
- 0, 0, NULL);
-
- if (!dcookie_cache)
- goto out;
-
- dcookie_hashtable = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!dcookie_hashtable)
- goto out_kmem;
-
- err = 0;
-
- /*
- * Find the power-of-two list-heads that can fit into the allocation..
- * We don't guarantee that "sizeof(struct list_head)" is necessarily
- * a power-of-two.
- */
- hash_size = PAGE_SIZE / sizeof(struct list_head);
- hash_bits = 0;
- do {
- hash_bits++;
- } while ((hash_size >> hash_bits) != 0);
- hash_bits--;
-
- /*
- * Re-calculate the actual number of entries and the mask
- * from the number of bits we can fit.
- */
- hash_size = 1UL << hash_bits;
-
- /* And initialize the newly allocated array */
- d = dcookie_hashtable;
- i = hash_size;
- do {
- INIT_LIST_HEAD(d);
- d++;
- i--;
- } while (i);
-
-out:
- return err;
-out_kmem:
- kmem_cache_destroy(dcookie_cache);
- goto out;
-}
-
-
-static void free_dcookie(struct dcookie_struct * dcs)
-{
- struct dentry *d = dcs->path.dentry;
-
- spin_lock(&d->d_lock);
- d->d_flags &= ~DCACHE_COOKIE;
- spin_unlock(&d->d_lock);
-
- path_put(&dcs->path);
- kmem_cache_free(dcookie_cache, dcs);
-}
-
-
-static void dcookie_exit(void)
-{
- struct list_head * list;
- struct list_head * pos;
- struct list_head * pos2;
- struct dcookie_struct * dcs;
- size_t i;
-
- for (i = 0; i < hash_size; ++i) {
- list = dcookie_hashtable + i;
- list_for_each_safe(pos, pos2, list) {
- dcs = list_entry(pos, struct dcookie_struct, hash_list);
- list_del(&dcs->hash_list);
- free_dcookie(dcs);
- }
- }
-
- kfree(dcookie_hashtable);
- kmem_cache_destroy(dcookie_cache);
-}
-
-
-struct dcookie_user {
- struct list_head next;
-};
-
-struct dcookie_user * dcookie_register(void)
-{
- struct dcookie_user * user;
-
- mutex_lock(&dcookie_mutex);
-
- user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL);
- if (!user)
- goto out;
-
- if (!is_live() && dcookie_init())
- goto out_free;
-
- list_add(&user->next, &dcookie_users);
-
-out:
- mutex_unlock(&dcookie_mutex);
- return user;
-out_free:
- kfree(user);
- user = NULL;
- goto out;
-}
-
-
-void dcookie_unregister(struct dcookie_user * user)
-{
- mutex_lock(&dcookie_mutex);
-
- list_del(&user->next);
- kfree(user);
-
- if (!is_live())
- dcookie_exit();
-
- mutex_unlock(&dcookie_mutex);
-}
-
-EXPORT_SYMBOL_GPL(dcookie_register);
-EXPORT_SYMBOL_GPL(dcookie_unregister);
-EXPORT_SYMBOL_GPL(get_dcookie);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 2fcf66473436..22e86ae4dd5a 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -42,13 +42,14 @@ static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS;
* so that we can use the file mode as part of a heuristic to determine whether
* to lock down individual files.
*/
-static int debugfs_setattr(struct dentry *dentry, struct iattr *ia)
+static int debugfs_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *ia)
{
int ret = security_locked_down(LOCKDOWN_DEBUGFS);
if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
return ret;
- return simple_setattr(dentry, ia);
+ return simple_setattr(&init_user_ns, dentry, ia);
}
static const struct inode_operations debugfs_file_inode_operations = {
@@ -297,7 +298,7 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
{
struct dentry *dentry;
- if (IS_ERR(parent))
+ if (!debugfs_initialized() || IS_ERR_OR_NULL(name) || IS_ERR(parent))
return NULL;
if (!parent)
@@ -318,6 +319,9 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
if (!(debugfs_allow & DEBUGFS_ALLOW_API))
return ERR_PTR(-EPERM);
+ if (!debugfs_initialized())
+ return ERR_PTR(-ENOENT);
+
pr_debug("creating file '%s'\n", name);
if (IS_ERR(parent))
@@ -775,8 +779,8 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
take_dentry_name_snapshot(&old_name, old_dentry);
- error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
- dentry, 0);
+ error = simple_rename(&init_user_ns, d_inode(old_dir), old_dentry,
+ d_inode(new_dir), dentry, 0);
if (error) {
release_dentry_name_snapshot(&old_name);
goto exit;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index d53fa92a1ab6..b61491bf3166 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -426,6 +426,8 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
unsigned long flags;
bio->bi_private = dio;
+ /* don't account direct I/O as memory stall */
+ bio_clear_flag(bio, BIO_WORKINGSET);
spin_lock_irqsave(&dio->bio_lock, flags);
dio->refcount++;
@@ -434,7 +436,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio);
- dio->bio_disk = bio->bi_disk;
+ dio->bio_disk = bio->bi_bdev->bd_disk;
if (sdio->submit_io) {
sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
@@ -460,7 +462,7 @@ static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
* Wait for the next BIO to complete. Remove it and return it. NULL is
* returned once all BIOs have been completed. This must only be called once
* all bios have been issued so that dio->refcount can only decrease. This
- * requires that that the caller hold a reference on the dio.
+ * requires that the caller hold a reference on the dio.
*/
static struct bio *dio_await_one(struct dio *dio)
{
@@ -693,7 +695,7 @@ static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
if (ret)
goto out;
sector = start_sector << (sdio->blkbits - 9);
- nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
+ nr_pages = bio_max_segs(sdio->pages_in_io);
BUG_ON(nr_pages <= 0);
dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
sdio->boundary = 0;
@@ -1277,7 +1279,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
if (retval == -ENOTBLK) {
/*
* The remaining part of the request will be
- * be handled by buffered I/O when we return
+ * handled by buffered I/O when we return
*/
retval = 0;
}
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 0681540c48d9..943e523f4c9d 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1110,8 +1110,8 @@ ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
}
inode_lock(lower_inode);
- rc = __vfs_setxattr(lower_dentry, lower_inode, ECRYPTFS_XATTR_NAME,
- page_virt, size, 0);
+ rc = __vfs_setxattr(&init_user_ns, lower_dentry, lower_inode,
+ ECRYPTFS_XATTR_NAME, page_virt, size, 0);
if (!rc && ecryptfs_inode)
fsstack_copy_attr_all(ecryptfs_inode, lower_inode);
inode_unlock(lower_inode);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 58d0f7187997..18e9285fbb4c 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -141,7 +141,8 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
else if (d_unhashed(lower_dentry))
rc = -EINVAL;
else
- rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
+ rc = vfs_unlink(&init_user_ns, lower_dir_inode, lower_dentry,
+ NULL);
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
@@ -180,7 +181,8 @@ ecryptfs_do_create(struct inode *directory_inode,
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_create(d_inode(lower_dir_dentry), lower_dentry, mode, true);
+ rc = vfs_create(&init_user_ns, d_inode(lower_dir_dentry), lower_dentry,
+ mode, true);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
@@ -190,7 +192,8 @@ ecryptfs_do_create(struct inode *directory_inode,
inode = __ecryptfs_get_inode(d_inode(lower_dentry),
directory_inode->i_sb);
if (IS_ERR(inode)) {
- vfs_unlink(d_inode(lower_dir_dentry), lower_dentry, NULL);
+ vfs_unlink(&init_user_ns, d_inode(lower_dir_dentry),
+ lower_dentry, NULL);
goto out_lock;
}
fsstack_copy_attr_times(directory_inode, d_inode(lower_dir_dentry));
@@ -254,7 +257,8 @@ out:
* Returns zero on success; non-zero on error condition
*/
static int
-ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
+ecryptfs_create(struct user_namespace *mnt_userns,
+ struct inode *directory_inode, struct dentry *ecryptfs_dentry,
umode_t mode, bool excl)
{
struct inode *ecryptfs_inode;
@@ -436,8 +440,8 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
dget(lower_old_dentry);
dget(lower_new_dentry);
lower_dir_dentry = lock_parent(lower_new_dentry);
- rc = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
- lower_new_dentry, NULL);
+ rc = vfs_link(lower_old_dentry, &init_user_ns,
+ d_inode(lower_dir_dentry), lower_new_dentry, NULL);
if (rc || d_really_is_negative(lower_new_dentry))
goto out_lock;
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb);
@@ -460,7 +464,8 @@ static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
return ecryptfs_do_unlink(dir, dentry, d_inode(dentry));
}
-static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
+static int ecryptfs_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
const char *symname)
{
int rc;
@@ -481,7 +486,7 @@ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
strlen(symname));
if (rc)
goto out_lock;
- rc = vfs_symlink(d_inode(lower_dir_dentry), lower_dentry,
+ rc = vfs_symlink(&init_user_ns, d_inode(lower_dir_dentry), lower_dentry,
encoded_symname);
kfree(encoded_symname);
if (rc || d_really_is_negative(lower_dentry))
@@ -499,7 +504,8 @@ out_lock:
return rc;
}
-static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int ecryptfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
@@ -507,7 +513,8 @@ static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_mkdir(d_inode(lower_dir_dentry), lower_dentry, mode);
+ rc = vfs_mkdir(&init_user_ns, d_inode(lower_dir_dentry), lower_dentry,
+ mode);
if (rc || d_really_is_negative(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
@@ -541,7 +548,7 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
else if (d_unhashed(lower_dentry))
rc = -EINVAL;
else
- rc = vfs_rmdir(lower_dir_inode, lower_dentry);
+ rc = vfs_rmdir(&init_user_ns, lower_dir_inode, lower_dentry);
if (!rc) {
clear_nlink(d_inode(dentry));
fsstack_copy_attr_times(dir, lower_dir_inode);
@@ -555,7 +562,8 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
}
static int
-ecryptfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+ecryptfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
{
int rc;
struct dentry *lower_dentry;
@@ -563,7 +571,8 @@ ecryptfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_mknod(d_inode(lower_dir_dentry), lower_dentry, mode, dev);
+ rc = vfs_mknod(&init_user_ns, d_inode(lower_dir_dentry), lower_dentry,
+ mode, dev);
if (rc || d_really_is_negative(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
@@ -579,9 +588,9 @@ out:
}
static int
-ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+ecryptfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
int rc;
struct dentry *lower_old_dentry;
@@ -590,6 +599,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dir_dentry;
struct dentry *trap;
struct inode *target_inode;
+ struct renamedata rd = {};
if (flags)
return -EINVAL;
@@ -619,9 +629,14 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
rc = -ENOTEMPTY;
goto out_lock;
}
- rc = vfs_rename(d_inode(lower_old_dir_dentry), lower_old_dentry,
- d_inode(lower_new_dir_dentry), lower_new_dentry,
- NULL, 0);
+
+ rd.old_mnt_userns = &init_user_ns;
+ rd.old_dir = d_inode(lower_old_dir_dentry);
+ rd.old_dentry = lower_old_dentry;
+ rd.new_mnt_userns = &init_user_ns;
+ rd.new_dir = d_inode(lower_new_dir_dentry);
+ rd.new_dentry = lower_new_dentry;
+ rc = vfs_rename(&rd);
if (rc)
goto out_lock;
if (target_inode)
@@ -855,16 +870,19 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
inode_lock(d_inode(lower_dentry));
- rc = notify_change(lower_dentry, &lower_ia, NULL);
+ rc = notify_change(&init_user_ns, lower_dentry,
+ &lower_ia, NULL);
inode_unlock(d_inode(lower_dentry));
}
return rc;
}
static int
-ecryptfs_permission(struct inode *inode, int mask)
+ecryptfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
- return inode_permission(ecryptfs_inode_to_lower(inode), mask);
+ return inode_permission(&init_user_ns,
+ ecryptfs_inode_to_lower(inode), mask);
}
/**
@@ -879,7 +897,8 @@ ecryptfs_permission(struct inode *inode, int mask)
* All other metadata changes will be passed right to the lower filesystem,
* and we will just update our inode to look like the lower.
*/
-static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
+static int ecryptfs_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *ia)
{
int rc = 0;
struct dentry *lower_dentry;
@@ -933,7 +952,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
}
mutex_unlock(&crypt_stat->cs_mutex);
- rc = setattr_prepare(dentry, ia);
+ rc = setattr_prepare(&init_user_ns, dentry, ia);
if (rc)
goto out;
if (ia->ia_valid & ATTR_SIZE) {
@@ -959,14 +978,15 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
lower_ia.ia_valid &= ~ATTR_MODE;
inode_lock(d_inode(lower_dentry));
- rc = notify_change(lower_dentry, &lower_ia, NULL);
+ rc = notify_change(&init_user_ns, lower_dentry, &lower_ia, NULL);
inode_unlock(d_inode(lower_dentry));
out:
fsstack_copy_attr_all(inode, lower_inode);
return rc;
}
-static int ecryptfs_getattr_link(const struct path *path, struct kstat *stat,
+static int ecryptfs_getattr_link(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
@@ -975,7 +995,7 @@ static int ecryptfs_getattr_link(const struct path *path, struct kstat *stat,
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
- generic_fillattr(d_inode(dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(dentry), stat);
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
char *target;
size_t targetsiz;
@@ -991,7 +1011,8 @@ static int ecryptfs_getattr_link(const struct path *path, struct kstat *stat,
return rc;
}
-static int ecryptfs_getattr(const struct path *path, struct kstat *stat,
+static int ecryptfs_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
@@ -1003,7 +1024,7 @@ static int ecryptfs_getattr(const struct path *path, struct kstat *stat,
if (!rc) {
fsstack_copy_attr_all(d_inode(dentry),
ecryptfs_inode_to_lower(d_inode(dentry)));
- generic_fillattr(d_inode(dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(dentry), stat);
stat->blocks = lower_stat.blocks;
}
return rc;
@@ -1025,7 +1046,7 @@ ecryptfs_setxattr(struct dentry *dentry, struct inode *inode,
goto out;
}
inode_lock(lower_inode);
- rc = __vfs_setxattr_locked(lower_dentry, name, value, size, flags, NULL);
+ rc = __vfs_setxattr_locked(&init_user_ns, lower_dentry, name, value, size, flags, NULL);
inode_unlock(lower_inode);
if (!rc && inode)
fsstack_copy_attr_all(inode, lower_inode);
@@ -1091,7 +1112,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, struct inode *inode,
goto out;
}
inode_lock(lower_inode);
- rc = __vfs_removexattr(lower_dentry, name);
+ rc = __vfs_removexattr(&init_user_ns, lower_dentry, name);
inode_unlock(lower_inode);
out:
return rc;
@@ -1135,6 +1156,7 @@ static int ecryptfs_xattr_get(const struct xattr_handler *handler,
}
static int ecryptfs_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value, size_t size,
int flags)
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index e63259fdef28..cdf40a54a35d 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -531,6 +531,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out_free;
}
+ if (mnt_user_ns(path.mnt) != &init_user_ns) {
+ rc = -EINVAL;
+ printk(KERN_ERR "Mounting on idmapped mounts currently disallowed\n");
+ goto out_free;
+ }
+
if (check_ruid && !uid_eq(d_inode(path.dentry)->i_uid, current_uid())) {
rc = -EPERM;
printk(KERN_ERR "Mount of device (uid: %d) not owned by "
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 019572c6b39a..2f333a40ff4d 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -426,8 +426,8 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
if (size < 0)
size = 8;
put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
- rc = __vfs_setxattr(lower_dentry, lower_inode, ECRYPTFS_XATTR_NAME,
- xattr_virt, size, 0);
+ rc = __vfs_setxattr(&init_user_ns, lower_dentry, lower_inode,
+ ECRYPTFS_XATTR_NAME, xattr_virt, size, 0);
inode_unlock(lower_inode);
if (rc)
printk(KERN_ERR "Error whilst attempting to write inode size "
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index feaa5e182b7b..e6bc0302643b 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -137,7 +137,7 @@ efivarfs_ioc_setxflags(struct file *file, void __user *arg)
unsigned int oldflags = efivarfs_getflags(inode);
int error;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
if (copy_from_user(&flags, arg, sizeof(flags)))
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 0297ad95eb5c..14e2947975fd 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -66,8 +66,8 @@ bool efivarfs_valid_name(const char *str, int len)
return uuid_is_valid(s);
}
-static int efivarfs_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
+static int efivarfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode = NULL;
struct efivar_entry *var;
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index ea4f693bee22..1249e74b3bf0 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -129,6 +129,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
struct page *page,
erofs_off_t *last_block,
unsigned int nblocks,
+ unsigned int *eblks,
bool ra)
{
struct inode *const inode = mapping->host;
@@ -145,8 +146,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
/* note that for readpage case, bio also equals to NULL */
if (bio &&
- /* not continuous */
- *last_block + 1 != current_block) {
+ (*last_block + 1 != current_block || !*eblks)) {
submit_bio_retry:
submit_bio(bio);
bio = NULL;
@@ -215,10 +215,9 @@ submit_bio_retry:
/* max # of continuous pages */
if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
- if (nblocks > BIO_MAX_PAGES)
- nblocks = BIO_MAX_PAGES;
- bio = bio_alloc(GFP_NOIO, nblocks);
+ *eblks = bio_max_segs(nblocks);
+ bio = bio_alloc(GFP_NOIO, *eblks);
bio->bi_end_io = erofs_readendio;
bio_set_dev(bio, sb->s_bdev);
@@ -231,16 +230,8 @@ submit_bio_retry:
/* out of the extent or bio is full */
if (err < PAGE_SIZE)
goto submit_bio_retry;
-
+ --*eblks;
*last_block = current_block;
-
- /* shift in advance in case of it followed by too many gaps */
- if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
- /* err should reassign to 0 after submitting */
- err = 0;
- goto submit_bio_out;
- }
-
return bio;
err_out:
@@ -254,7 +245,6 @@ has_updated:
/* if updated manually, continuous pages has a gap */
if (bio)
-submit_bio_out:
submit_bio(bio);
return err ? ERR_PTR(err) : NULL;
}
@@ -266,23 +256,26 @@ submit_bio_out:
static int erofs_raw_access_readpage(struct file *file, struct page *page)
{
erofs_off_t last_block;
+ unsigned int eblks;
struct bio *bio;
trace_erofs_readpage(page, true);
bio = erofs_read_raw_page(NULL, page->mapping,
- page, &last_block, 1, false);
+ page, &last_block, 1, &eblks, false);
if (IS_ERR(bio))
return PTR_ERR(bio);
- DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
+ if (bio)
+ submit_bio(bio);
return 0;
}
static void erofs_raw_access_readahead(struct readahead_control *rac)
{
erofs_off_t last_block;
+ unsigned int eblks;
struct bio *bio = NULL;
struct page *page;
@@ -293,7 +286,7 @@ static void erofs_raw_access_readahead(struct readahead_control *rac)
prefetchw(&page->flags);
bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
- readahead_count(rac), true);
+ readahead_count(rac), &eblks, true);
/* all the page errors are ignored when readahead */
if (IS_ERR(bio)) {
@@ -307,7 +300,6 @@ static void erofs_raw_access_readahead(struct readahead_control *rac)
put_page(page);
}
- /* the rare case (end in gaps) */
if (bio)
submit_bio(bio);
}
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 3e21c0e8adae..119fdce1b520 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -331,8 +331,9 @@ struct inode *erofs_iget(struct super_block *sb,
return inode;
}
-int erofs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags)
{
struct inode *const inode = d_inode(path->dentry);
@@ -343,7 +344,7 @@ int erofs_getattr(const struct path *path, struct kstat *stat,
stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
return 0;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 67a7ec945686..351dae524a0c 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -373,8 +373,9 @@ extern const struct inode_operations erofs_symlink_iops;
extern const struct inode_operations erofs_fast_symlink_iops;
struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
-int erofs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags);
+int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags);
/* namei.c */
extern const struct inode_operations erofs_dir_iops;
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
index 5f8cc7346c69..3a81e1f7fc06 100644
--- a/fs/erofs/namei.c
+++ b/fs/erofs/namei.c
@@ -234,8 +234,8 @@ static struct dentry *erofs_lookup(struct inode *dir,
} else if (err) {
inode = ERR_PTR(err);
} else {
- erofs_dbg("%s, %s (nid %llu) found, d_type %u", __func__,
- dentry->d_name.name, nid, d_type);
+ erofs_dbg("%s, %pd (nid %llu) found, d_type %u", __func__,
+ dentry, nid, d_type);
inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR);
}
return d_splice_alias(inode, dentry);
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index be10b16ea66e..d5a6b9b888a5 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -158,8 +158,8 @@ static int erofs_read_superblock(struct super_block *sb)
blkszbits = dsb->blkszbits;
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (blkszbits != LOG_BLOCK_SIZE) {
- erofs_err(sb, "blksize %u isn't supported on this platform",
- 1 << blkszbits);
+ erofs_err(sb, "blkszbits %u isn't supported on this platform",
+ blkszbits);
goto out;
}
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index 5bde77d70852..47314a26767a 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -48,8 +48,14 @@ static int init_inode_xattrs(struct inode *inode)
int ret = 0;
/* the most case is that xattrs of this inode are initialized. */
- if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
+ if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
+ /*
+ * paired with smp_mb() at the end of the function to ensure
+ * fields will only be observed after the bit is set.
+ */
+ smp_mb();
return 0;
+ }
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
return -ERESTARTSYS;
@@ -137,6 +143,8 @@ static int init_inode_xattrs(struct inode *inode)
}
xattr_iter_end(&it, atomic_map);
+ /* paired with smp_mb() at the beginning of the function. */
+ smp_mb();
set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
out_unlock:
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 6cb356c4217b..3851e1a64f73 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1235,7 +1235,7 @@ submit_bio_retry:
}
if (!bio) {
- bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+ bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
bio->bi_end_io = z_erofs_decompressqueue_endio;
bio_set_dev(bio, sb->s_bdev);
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index ae325541884e..14d2de35110c 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -36,8 +36,14 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
void *kaddr;
struct z_erofs_map_header *h;
- if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
+ /*
+ * paired with smp_mb() at the end of the function to ensure
+ * fields will only be observed after the bit is set.
+ */
+ smp_mb();
return 0;
+ }
if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
return -ERESTARTSYS;
@@ -83,6 +89,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
((h->h_clusterbits >> 5) & 7);
+ /* paired with smp_mb() at the beginning of the function */
+ smp_mb();
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
unmap_done:
kunmap_atomic(kaddr);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a829af074eb5..3196474cbe24 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -979,7 +979,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
return epir;
}
-#ifdef CONFIG_CHECKPOINT_RESTORE
+#ifdef CONFIG_KCMP
static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
{
struct rb_node *rbp;
@@ -1021,7 +1021,7 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
return file_raw;
}
-#endif /* CONFIG_CHECKPOINT_RESTORE */
+#endif /* CONFIG_KCMP */
/**
* Adds a new entry to the tail of the list in a lockless way, i.e.
diff --git a/fs/exec.c b/fs/exec.c
index 5d4d52039105..18594f11c31f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -708,7 +708,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
return -ENOMEM;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, old_start, old_end);
+ tlb_gather_mmu(&tlb, mm);
if (new_end > old_start) {
/*
* when the old and new regions overlap clear from new_end.
@@ -725,7 +725,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
free_pgd_range(&tlb, old_start, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
}
- tlb_finish_mmu(&tlb, old_start, old_end);
+ tlb_finish_mmu(&tlb);
/*
* Shrink the vma to just the new range. Always succeeds.
@@ -1404,14 +1404,15 @@ EXPORT_SYMBOL(begin_new_exec);
void would_dump(struct linux_binprm *bprm, struct file *file)
{
struct inode *inode = file_inode(file);
- if (inode_permission(inode, MAY_READ) < 0) {
+ struct user_namespace *mnt_userns = file_mnt_user_ns(file);
+ if (inode_permission(mnt_userns, inode, MAY_READ) < 0) {
struct user_namespace *old, *user_ns;
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
/* Ensure mm->user_ns contains the executable */
user_ns = old = bprm->mm->user_ns;
while ((user_ns != &init_user_ns) &&
- !privileged_wrt_inode_uidgid(user_ns, inode))
+ !privileged_wrt_inode_uidgid(user_ns, mnt_userns, inode))
user_ns = user_ns->parent;
if (old != user_ns) {
@@ -1454,7 +1455,7 @@ EXPORT_SYMBOL(finalize_exec);
/*
* Prepare credentials and lock ->cred_guard_mutex.
* setup_new_exec() commits the new creds and drops the lock.
- * Or, if exec fails before, free_bprm() should release ->cred and
+ * Or, if exec fails before, free_bprm() should release ->cred
* and unlock.
*/
static int prepare_bprm_creds(struct linux_binprm *bprm)
@@ -1579,6 +1580,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
{
/* Handle suid and sgid on files */
+ struct user_namespace *mnt_userns;
struct inode *inode;
unsigned int mode;
kuid_t uid;
@@ -1595,13 +1597,15 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
if (!(mode & (S_ISUID|S_ISGID)))
return;
+ mnt_userns = file_mnt_user_ns(file);
+
/* Be careful if suid/sgid is set */
inode_lock(inode);
/* reload atomically mode/uid/gid now that lock held */
mode = inode->i_mode;
- uid = inode->i_uid;
- gid = inode->i_gid;
+ uid = i_uid_into_mnt(mnt_userns, inode);
+ gid = i_gid_into_mnt(mnt_userns, inode);
inode_unlock(inode);
/* We ignore suid/sgid if there are no mappings for them in the ns */
@@ -1837,7 +1841,7 @@ static int bprm_execve(struct linux_binprm *bprm,
out:
/*
- * If past the point of no return ensure the the code never
+ * If past the point of no return ensure the code never
* returns to the userspace process. Use an existing fatal
* signal if present otherwise terminate the process with
* SIGSEGV.
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index a987919686c0..761c79c3a4ba 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -166,7 +166,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
* If the value of "clu" is 0, it means cluster 2 which is the first cluster of
* the cluster heap.
*/
-void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
+void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
@@ -180,7 +180,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
clear_bit_le(b, sbi->vol_amap[i]->b_data);
- exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode));
+ exfat_update_bh(sbi->vol_amap[i], sync);
if (opts->discard) {
int ret_discard;
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index b8f0e829ecbd..fa21421a14d9 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -408,7 +408,7 @@ int exfat_count_num_clusters(struct super_block *sb,
int exfat_load_bitmap(struct super_block *sb);
void exfat_free_bitmap(struct exfat_sb_info *sbi);
int exfat_set_bitmap(struct inode *inode, unsigned int clu);
-void exfat_clear_bitmap(struct inode *inode, unsigned int clu);
+void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync);
unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);
@@ -416,9 +416,11 @@ int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);
extern const struct file_operations exfat_file_operations;
int __exfat_truncate(struct inode *inode, loff_t new_size);
void exfat_truncate(struct inode *inode, loff_t size);
-int exfat_setattr(struct dentry *dentry, struct iattr *attr);
-int exfat_getattr(const struct path *path, struct kstat *stat,
- unsigned int request_mask, unsigned int query_flags);
+int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
+int exfat_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, unsigned int request_mask,
+ unsigned int query_flags);
int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
/* namei.c */
diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h
index 6aec6288e1f2..7f39b1c6469c 100644
--- a/fs/exfat/exfat_raw.h
+++ b/fs/exfat/exfat_raw.h
@@ -77,6 +77,10 @@
#define EXFAT_FILE_NAME_LEN 15
+#define EXFAT_MIN_SECT_SIZE_BITS 9
+#define EXFAT_MAX_SECT_SIZE_BITS 12
+#define EXFAT_MAX_SECT_PER_CLUS_BITS(x) (25 - (x)->sect_size_bits)
+
/* EXFAT: Main and Backup Boot Sector (512 bytes) */
struct boot_sector {
__u8 jmp_boot[BOOTSEC_JUMP_BOOT_LEN];
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index c3c9afee7418..7b2e8af17193 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -157,6 +157,7 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
unsigned int clu;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ int cur_cmap_i, next_cmap_i;
/* invalid cluster number */
if (p_chain->dir == EXFAT_FREE_CLUSTER ||
@@ -176,21 +177,51 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
clu = p_chain->dir;
+ cur_cmap_i = next_cmap_i =
+ BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu));
+
if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
+ unsigned int last_cluster = p_chain->dir + p_chain->size - 1;
do {
- exfat_clear_bitmap(inode, clu);
- clu++;
+ bool sync = false;
+
+ if (clu < last_cluster)
+ next_cmap_i =
+ BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu+1));
+ /* flush bitmap only if index would be changed or for last cluster */
+ if (clu == last_cluster || cur_cmap_i != next_cmap_i) {
+ sync = true;
+ cur_cmap_i = next_cmap_i;
+ }
+
+ exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ clu++;
num_clusters++;
} while (num_clusters < p_chain->size);
} else {
do {
- exfat_clear_bitmap(inode, clu);
-
- if (exfat_get_next_cluster(sb, &clu))
- goto dec_used_clus;
+ bool sync = false;
+ unsigned int n_clu = clu;
+ int err = exfat_get_next_cluster(sb, &n_clu);
+
+ if (err || n_clu == EXFAT_EOF_CLUSTER)
+ sync = true;
+ else
+ next_cmap_i =
+ BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(n_clu));
+
+ if (cur_cmap_i != next_cmap_i) {
+ sync = true;
+ cur_cmap_i = next_cmap_i;
+ }
+ exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)));
+ clu = n_clu;
num_clusters++;
+
+ if (err)
+ goto dec_used_clus;
} while (clu != EXFAT_EOF_CLUSTER);
}
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index a92478eabfa4..f783cf38dd8e 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -267,13 +267,14 @@ write_size:
mutex_unlock(&sbi->s_lock);
}
-int exfat_getattr(const struct path *path, struct kstat *stat,
- unsigned int request_mask, unsigned int query_flags)
+int exfat_getattr(struct user_namespace *mnt_uerns, const struct path *path,
+ struct kstat *stat, unsigned int request_mask,
+ unsigned int query_flags)
{
struct inode *inode = d_backing_inode(path->dentry);
struct exfat_inode_info *ei = EXFAT_I(inode);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
exfat_truncate_atime(&stat->atime);
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = ei->i_crtime.tv_sec;
@@ -282,7 +283,8 @@ int exfat_getattr(const struct path *path, struct kstat *stat,
return 0;
}
-int exfat_setattr(struct dentry *dentry, struct iattr *attr)
+int exfat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb);
struct inode *inode = dentry->d_inode;
@@ -305,7 +307,7 @@ int exfat_setattr(struct dentry *dentry, struct iattr *attr)
ATTR_TIMES_SET);
}
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
attr->ia_valid = ia_valid;
if (error)
goto out;
@@ -340,7 +342,7 @@ int exfat_setattr(struct dentry *dentry, struct iattr *attr)
up_write(&EXFAT_I(inode)->truncate_lock);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
exfat_truncate_atime(&inode->i_atime);
mark_inode_dirty(inode);
@@ -361,7 +363,7 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
if (err)
return err;
- return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ return blkdev_issue_flush(inode->i_sb->s_bdev);
}
const struct file_operations exfat_file_operations = {
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 2932b23a3b6c..d9e8ec689c55 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -541,8 +541,8 @@ out:
return ret;
}
-static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int exfat_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -827,7 +827,8 @@ unlock:
return err;
}
-static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int exfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -1318,9 +1319,10 @@ out:
return ret;
}
-static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int exfat_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
struct inode *old_inode, *new_inode;
struct super_block *sb = old_dir->i_sb;
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 87be5bfc31eb..c6d8d2e53486 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -381,8 +381,7 @@ static int exfat_calibrate_blocksize(struct super_block *sb, int logical_sect)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- if (!is_power_of_2(logical_sect) ||
- logical_sect < 512 || logical_sect > 4096) {
+ if (!is_power_of_2(logical_sect)) {
exfat_err(sb, "bogus logical sector size %u", logical_sect);
return -EIO;
}
@@ -451,6 +450,25 @@ static int exfat_read_boot_sector(struct super_block *sb)
return -EINVAL;
}
+ /*
+ * sect_size_bits could be at least 9 and at most 12.
+ */
+ if (p_boot->sect_size_bits < EXFAT_MIN_SECT_SIZE_BITS ||
+ p_boot->sect_size_bits > EXFAT_MAX_SECT_SIZE_BITS) {
+ exfat_err(sb, "bogus sector size bits : %u\n",
+ p_boot->sect_size_bits);
+ return -EINVAL;
+ }
+
+ /*
+ * sect_per_clus_bits could be at least 0 and at most 25 - sect_size_bits.
+ */
+ if (p_boot->sect_per_clus_bits > EXFAT_MAX_SECT_PER_CLUS_BITS(p_boot)) {
+ exfat_err(sb, "bogus sectors bits per cluster : %u\n",
+ p_boot->sect_per_clus_bits);
+ return -EINVAL;
+ }
+
sbi->sect_per_clus = 1 << p_boot->sect_per_clus_bits;
sbi->sect_per_clus_bits = p_boot->sect_per_clus_bits;
sbi->cluster_size_bits = p_boot->sect_per_clus_bits +
@@ -477,16 +495,19 @@ static int exfat_read_boot_sector(struct super_block *sb)
sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
/* check consistencies */
- if (sbi->num_FAT_sectors << p_boot->sect_size_bits <
- sbi->num_clusters * 4) {
+ if ((u64)sbi->num_FAT_sectors << p_boot->sect_size_bits <
+ (u64)sbi->num_clusters * 4) {
exfat_err(sb, "bogus fat length");
return -EINVAL;
}
+
if (sbi->data_start_sector <
- sbi->FAT1_start_sector + sbi->num_FAT_sectors * p_boot->num_fats) {
+ (u64)sbi->FAT1_start_sector +
+ (u64)sbi->num_FAT_sectors * p_boot->num_fats) {
exfat_err(sb, "bogus data start sector");
return -EINVAL;
}
+
if (sbi->vol_flags & VOLUME_DIRTY)
exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
if (sbi->vol_flags & MEDIA_FAILURE)
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index cf4c77f8dd08..b9a9db98e94b 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -216,14 +216,16 @@ __ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
* inode->i_mutex: down
*/
int
-ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ext2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int error;
int update_mode = 0;
umode_t mode = inode->i_mode;
if (type == ACL_TYPE_ACCESS && acl) {
- error = posix_acl_update_mode(inode, &mode, &acl);
+ error = posix_acl_update_mode(&init_user_ns, inode, &mode,
+ &acl);
if (error)
return error;
update_mode = 1;
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index 0f01c759daac..917db5f6630a 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -56,7 +56,8 @@ static inline int ext2_acl_count(size_t size)
/* acl.c */
extern struct posix_acl *ext2_get_acl(struct inode *inode, int type);
-extern int ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int ext2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
extern int ext2_init_acl (struct inode *, struct inode *);
#else
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 2a4175fbaf5e..3309fb2d327a 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -764,8 +764,9 @@ extern struct inode *ext2_iget (struct super_block *, unsigned long);
extern int ext2_write_inode (struct inode *, struct writeback_control *);
extern void ext2_evict_inode(struct inode *);
extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
-extern int ext2_setattr (struct dentry *, struct iattr *);
-extern int ext2_getattr (const struct path *, struct kstat *, u32, unsigned int);
+extern int ext2_setattr (struct user_namespace *, struct dentry *, struct iattr *);
+extern int ext2_getattr (struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern void ext2_set_inode_flags(struct inode *inode);
extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 432c3febea6d..df14e750e9fe 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -551,7 +551,7 @@ got:
inode->i_uid = current_fsuid();
inode->i_gid = dir->i_gid;
} else
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_ino = ino;
inode->i_blocks = 0;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 78c417d3c898..68178b2234bd 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1638,8 +1638,8 @@ int ext2_write_inode(struct inode *inode, struct writeback_control *wbc)
return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
}
-int ext2_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int ext2_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct ext2_inode_info *ei = EXT2_I(inode);
@@ -1660,16 +1660,17 @@ int ext2_getattr(const struct path *path, struct kstat *stat,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
return 0;
}
-int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
+int ext2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(dentry, iattr);
+ error = setattr_prepare(&init_user_ns, dentry, iattr);
if (error)
return error;
@@ -1689,9 +1690,9 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
if (error)
return error;
}
- setattr_copy(inode, iattr);
+ setattr_copy(&init_user_ns, inode, iattr);
if (iattr->ia_valid & ATTR_MODE)
- error = posix_acl_chmod(inode, inode->i_mode);
+ error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
mark_inode_dirty(inode);
return error;
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 32a8d10b579d..b399cbb7022d 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -39,7 +39,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
return ret;
- if (!inode_owner_or_capable(inode)) {
+ if (!inode_owner_or_capable(&init_user_ns, inode)) {
ret = -EACCES;
goto setflags_out;
}
@@ -84,7 +84,7 @@ setflags_out:
case EXT2_IOC_SETVERSION: {
__u32 generation;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EPERM;
ret = mnt_want_write_file(filp);
if (ret)
@@ -117,7 +117,7 @@ setversion_out:
if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
return -ENOTTY;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index ea980f1e2e99..3367384d344d 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -100,7 +100,9 @@ struct dentry *ext2_get_parent(struct dentry *child)
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, bool excl)
+static int ext2_create (struct user_namespace * mnt_userns,
+ struct inode * dir, struct dentry * dentry,
+ umode_t mode, bool excl)
{
struct inode *inode;
int err;
@@ -118,7 +120,8 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode
return ext2_add_nondir(dentry, inode);
}
-static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int ext2_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode = ext2_new_inode(dir, mode, NULL);
if (IS_ERR(inode))
@@ -131,7 +134,8 @@ static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
}
-static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
+static int ext2_mknod (struct user_namespace * mnt_userns, struct inode * dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode * inode;
int err;
@@ -151,8 +155,8 @@ static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode,
return err;
}
-static int ext2_symlink (struct inode * dir, struct dentry * dentry,
- const char * symname)
+static int ext2_symlink (struct user_namespace * mnt_userns, struct inode * dir,
+ struct dentry * dentry, const char * symname)
{
struct super_block * sb = dir->i_sb;
int err = -ENAMETOOLONG;
@@ -225,7 +229,8 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
return err;
}
-static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+static int ext2_mkdir(struct user_namespace * mnt_userns,
+ struct inode * dir, struct dentry * dentry, umode_t mode)
{
struct inode * inode;
int err;
@@ -315,8 +320,9 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
return err;
}
-static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
- struct inode * new_dir, struct dentry * new_dentry,
+static int ext2_rename (struct user_namespace * mnt_userns,
+ struct inode * old_dir, struct dentry * old_dentry,
+ struct inode * new_dir, struct dentry * new_dentry,
unsigned int flags)
{
struct inode * old_inode = d_inode(old_dentry);
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 9a682e440acb..ebade1f52451 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -19,6 +19,7 @@ ext2_xattr_security_get(const struct xattr_handler *handler,
static int
ext2_xattr_security_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 49add1107850..18a87d5dd1ab 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -26,6 +26,7 @@ ext2_xattr_trusted_get(const struct xattr_handler *handler,
static int
ext2_xattr_trusted_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index c243a3b4d69d..58092449f8ff 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -30,6 +30,7 @@ ext2_xattr_user_get(const struct xattr_handler *handler,
static int
ext2_xattr_user_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext4/.kunitconfig b/fs/ext4/.kunitconfig
new file mode 100644
index 000000000000..bf51da7cd9fc
--- /dev/null
+++ b/fs/ext4/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_KUNIT_TESTS=y
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 619dd35ddd48..86699c8cab28 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -103,8 +103,7 @@ config EXT4_DEBUG
config EXT4_KUNIT_TESTS
tristate "KUnit tests for ext4" if !KUNIT_ALL_TESTS
- select EXT4_FS
- depends on KUNIT
+ depends on EXT4_FS && KUNIT
default KUNIT_ALL_TESTS
help
This builds the ext4 KUnit tests.
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 68aaed48315f..c5eaffccecc3 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -222,7 +222,8 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
}
int
-ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ext4_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
handle_t *handle;
int error, credits, retries = 0;
@@ -245,7 +246,7 @@ retry:
ext4_fc_start_update(inode);
if ((type == ACL_TYPE_ACCESS) && acl) {
- error = posix_acl_update_mode(inode, &mode, &acl);
+ error = posix_acl_update_mode(mnt_userns, inode, &mode, &acl);
if (error)
goto out_stop;
if (mode != inode->i_mode)
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index 9b63f5416a2f..84b8942a57f2 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -56,7 +56,8 @@ static inline int ext4_acl_count(size_t size)
/* acl.c */
struct posix_acl *ext4_get_acl(struct inode *inode, int type);
-int ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int ext4_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
#else /* CONFIG_EXT4_FS_POSIX_ACL */
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index f45f9feebe59..74a5172c2d83 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -626,27 +626,41 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
/**
* ext4_should_retry_alloc() - check if a block allocation should be retried
- * @sb: super block
- * @retries: number of attemps has been made
+ * @sb: superblock
+ * @retries: number of retry attempts made so far
*
- * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
- * it is profitable to retry the operation, this function will wait
- * for the current or committing transaction to complete, and then
- * return TRUE. We will only retry once.
+ * ext4_should_retry_alloc() is called when ENOSPC is returned while
+ * attempting to allocate blocks. If there's an indication that a pending
+ * journal transaction might free some space and allow another attempt to
+ * succeed, this function will wait for the current or committing transaction
+ * to complete and then return TRUE.
*/
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
{
- if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
- (*retries)++ > 1 ||
- !EXT4_SB(sb)->s_journal)
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (!sbi->s_journal)
return 0;
- smp_mb();
- if (EXT4_SB(sb)->s_mb_free_pending == 0)
+ if (++(*retries) > 3) {
+ percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);
return 0;
+ }
+ /*
+ * if there's no indication that blocks are about to be freed it's
+ * possible we just missed a transaction commit that did so
+ */
+ smp_mb();
+ if (sbi->s_mb_free_pending == 0)
+ return ext4_has_free_clusters(sbi, 1, 0);
+
+ /*
+ * it's possible we've just missed a transaction commit here,
+ * so ignore the returned status
+ */
jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
- jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
+ (void) jbd2_journal_force_commit_nested(sbi->s_journal);
return 1;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 2866d249f3d2..826a56e3bbd2 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1484,6 +1484,7 @@ struct ext4_sb_info {
struct percpu_counter s_freeinodes_counter;
struct percpu_counter s_dirs_counter;
struct percpu_counter s_dirtyclusters_counter;
+ struct percpu_counter s_sra_exceeded_retry_limit;
struct blockgroup_lock *s_blockgroup_lock;
struct proc_dir_entry *s_proc;
struct kobject s_kobj;
@@ -2755,18 +2756,19 @@ extern int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
/* ialloc.c */
extern int ext4_mark_inode_used(struct super_block *sb, int ino);
-extern struct inode *__ext4_new_inode(handle_t *, struct inode *, umode_t,
+extern struct inode *__ext4_new_inode(struct user_namespace *, handle_t *,
+ struct inode *, umode_t,
const struct qstr *qstr, __u32 goal,
uid_t *owner, __u32 i_flags,
int handle_type, unsigned int line_no,
int nblocks);
-#define ext4_new_inode(handle, dir, mode, qstr, goal, owner, i_flags) \
- __ext4_new_inode((handle), (dir), (mode), (qstr), (goal), (owner), \
- i_flags, 0, 0, 0)
-#define ext4_new_inode_start_handle(dir, mode, qstr, goal, owner, \
+#define ext4_new_inode(handle, dir, mode, qstr, goal, owner, i_flags) \
+ __ext4_new_inode(&init_user_ns, (handle), (dir), (mode), (qstr), \
+ (goal), (owner), i_flags, 0, 0, 0)
+#define ext4_new_inode_start_handle(mnt_userns, dir, mode, qstr, goal, owner, \
type, nblocks) \
- __ext4_new_inode(NULL, (dir), (mode), (qstr), (goal), (owner), \
+ __ext4_new_inode((mnt_userns), NULL, (dir), (mode), (qstr), (goal), (owner), \
0, (type), __LINE__, (nblocks))
@@ -2792,6 +2794,8 @@ void __ext4_fc_track_link(handle_t *handle, struct inode *inode,
struct dentry *dentry);
void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry);
void ext4_fc_track_link(handle_t *handle, struct dentry *dentry);
+void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
+ struct dentry *dentry);
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
@@ -2877,11 +2881,14 @@ extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
__ext4_iget((sb), (ino), (flags), __func__, __LINE__)
extern int ext4_write_inode(struct inode *, struct writeback_control *);
-extern int ext4_setattr(struct dentry *, struct iattr *);
-extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int ext4_setattr(struct user_namespace *, struct dentry *,
+ struct iattr *);
+extern int ext4_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern void ext4_evict_inode(struct inode *);
extern void ext4_clear_inode(struct inode *);
-extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int ext4_file_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern int ext4_sync_inode(handle_t *, struct inode *);
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 3960b7ec3ab7..77c84d6f1af6 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4382,8 +4382,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
{
struct inode *inode = file_inode(file);
handle_t *handle;
- int ret = 0;
- int ret2 = 0, ret3 = 0;
+ int ret = 0, ret2 = 0, ret3 = 0;
int retries = 0;
int depth = 0;
struct ext4_map_blocks map;
@@ -4408,7 +4407,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
depth = ext_depth(inode);
retry:
- while (ret >= 0 && len) {
+ while (len) {
/*
* Recalculate credits when extent tree depth changes.
*/
@@ -4430,9 +4429,13 @@ retry:
inode->i_ino, map.m_lblk,
map.m_len, ret);
ext4_mark_inode_dirty(handle, inode);
- ret2 = ext4_journal_stop(handle);
+ ext4_journal_stop(handle);
break;
}
+ /*
+ * allow a full retry cycle for any remaining allocations
+ */
+ retries = 0;
map.m_lblk += ret;
map.m_len = len = len - ret;
epos = (loff_t)map.m_lblk << inode->i_blkbits;
@@ -4450,11 +4453,8 @@ retry:
if (unlikely(ret2))
break;
}
- if (ret == -ENOSPC &&
- ext4_should_retry_alloc(inode->i_sb, &retries)) {
- ret = 0;
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
- }
return ret > 0 ? ret2 : ret;
}
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 0a14a7c87bf8..7541d0b5d706 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -513,10 +513,10 @@ void ext4_fc_track_link(handle_t *handle, struct dentry *dentry)
__ext4_fc_track_link(handle, d_inode(dentry), dentry);
}
-void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
+void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
+ struct dentry *dentry)
{
struct __track_dentry_update_args args;
- struct inode *inode = d_inode(dentry);
int ret;
args.dentry = dentry;
@@ -527,6 +527,11 @@ void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
trace_ext4_fc_track_create(inode, dentry, ret);
}
+void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
+{
+ __ext4_fc_track_create(handle, d_inode(dentry), dentry);
+}
+
/* __track_fn for inode tracking */
static int __track_inode(struct inode *inode, void *arg, bool update)
{
@@ -915,13 +920,11 @@ static int ext4_fc_submit_inode_data_all(journal_t *journal)
struct super_block *sb = (struct super_block *)(journal->j_private);
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_inode_info *ei;
- struct list_head *pos;
int ret = 0;
spin_lock(&sbi->s_fc_lock);
ext4_set_mount_flag(sb, EXT4_MF_FC_COMMITTING);
- list_for_each(pos, &sbi->s_fc_q[FC_Q_MAIN]) {
- ei = list_entry(pos, struct ext4_inode_info, i_fc_list);
+ list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
while (atomic_read(&ei->i_fc_updates)) {
DEFINE_WAIT(wait);
@@ -978,17 +981,15 @@ __releases(&sbi->s_fc_lock)
{
struct super_block *sb = (struct super_block *)(journal->j_private);
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_fc_dentry_update *fc_dentry;
+ struct ext4_fc_dentry_update *fc_dentry, *fc_dentry_n;
struct inode *inode;
- struct list_head *pos, *n, *fcd_pos, *fcd_n;
- struct ext4_inode_info *ei;
+ struct ext4_inode_info *ei, *ei_n;
int ret;
if (list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN]))
return 0;
- list_for_each_safe(fcd_pos, fcd_n, &sbi->s_fc_dentry_q[FC_Q_MAIN]) {
- fc_dentry = list_entry(fcd_pos, struct ext4_fc_dentry_update,
- fcd_list);
+ list_for_each_entry_safe(fc_dentry, fc_dentry_n,
+ &sbi->s_fc_dentry_q[FC_Q_MAIN], fcd_list) {
if (fc_dentry->fcd_op != EXT4_FC_TAG_CREAT) {
spin_unlock(&sbi->s_fc_lock);
if (!ext4_fc_add_dentry_tlv(
@@ -1004,8 +1005,8 @@ __releases(&sbi->s_fc_lock)
}
inode = NULL;
- list_for_each_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN]) {
- ei = list_entry(pos, struct ext4_inode_info, i_fc_list);
+ list_for_each_entry_safe(ei, ei_n, &sbi->s_fc_q[FC_Q_MAIN],
+ i_fc_list) {
if (ei->vfs_inode.i_ino == fc_dentry->fcd_ino) {
inode = &ei->vfs_inode;
break;
@@ -1057,7 +1058,6 @@ static int ext4_fc_perform_commit(journal_t *journal)
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_inode_info *iter;
struct ext4_fc_head head;
- struct list_head *pos;
struct inode *inode;
struct blk_plug plug;
int ret = 0;
@@ -1076,7 +1076,7 @@ static int ext4_fc_perform_commit(journal_t *journal)
* flush before we start writing fast commit blocks.
*/
if (journal->j_fs_dev != journal->j_dev)
- blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
+ blkdev_issue_flush(journal->j_fs_dev);
blk_start_plug(&plug);
if (sbi->s_fc_bytes == 0) {
@@ -1099,8 +1099,7 @@ static int ext4_fc_perform_commit(journal_t *journal)
goto out;
}
- list_for_each(pos, &sbi->s_fc_q[FC_Q_MAIN]) {
- iter = list_entry(pos, struct ext4_inode_info, i_fc_list);
+ list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
inode = &iter->vfs_inode;
if (!ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
continue;
@@ -1226,9 +1225,8 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_inode_info *iter;
+ struct ext4_inode_info *iter, *iter_n;
struct ext4_fc_dentry_update *fc_dentry;
- struct list_head *pos, *n;
if (full && sbi->s_fc_bh)
sbi->s_fc_bh = NULL;
@@ -1236,8 +1234,8 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
jbd2_fc_release_bufs(journal);
spin_lock(&sbi->s_fc_lock);
- list_for_each_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN]) {
- iter = list_entry(pos, struct ext4_inode_info, i_fc_list);
+ list_for_each_entry_safe(iter, iter_n, &sbi->s_fc_q[FC_Q_MAIN],
+ i_fc_list) {
list_del_init(&iter->i_fc_list);
ext4_clear_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_COMMITTING);
@@ -1535,7 +1533,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
out:
iput(inode);
if (!ret)
- blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
+ blkdev_issue_flush(sb->s_bdev);
return 0;
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 349b27f0dda0..194f5d00fa32 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -74,8 +74,7 @@ static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
return generic_file_read_iter(iocb, to);
}
- ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
- is_sync_kiocb(iocb));
+ ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0);
inode_unlock_shared(inode);
file_accessed(iocb->ki_filp);
@@ -550,7 +549,7 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ilock_shared)
iomap_ops = &ext4_iomap_overwrite_ops;
ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
- is_sync_kiocb(iocb) || unaligned_io || extend);
+ (unaligned_io || extend) ? IOMAP_DIO_FORCE_WAIT : 0);
if (ret == -ENOTBLK)
ret = 0;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 113bfb023a4a..027a7d7037a0 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -174,7 +174,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
if (needs_barrier) {
- err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ err = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!ret)
ret = err;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index b215c564bc31..633ae7becd61 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -919,7 +919,8 @@ static int ext4_xattr_credits_for_new_inode(struct inode *dir, mode_t mode,
* For other inodes, search forward from the parent directory's block
* group to find a free inode.
*/
-struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
+struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
+ handle_t *handle, struct inode *dir,
umode_t mode, const struct qstr *qstr,
__u32 goal, uid_t *owner, __u32 i_flags,
int handle_type, unsigned int line_no,
@@ -969,10 +970,10 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
i_gid_write(inode, owner[1]);
} else if (test_opt(sb, GRPID)) {
inode->i_mode = mode;
- inode->i_uid = current_fsuid();
+ inode->i_uid = fsuid_into_mnt(mnt_userns);
inode->i_gid = dir->i_gid;
} else
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(mnt_userns, inode, dir, mode);
if (ext4_has_feature_project(sb) &&
ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
@@ -1583,7 +1584,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
if (ret < 0)
goto err_out;
if (barrier)
- blkdev_issue_flush(sb->s_bdev, GFP_NOFS);
+ blkdev_issue_flush(sb->s_bdev);
skip_zeroout:
ext4_lock_group(sb, group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c173c8405856..0948a43f1b3d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -20,6 +20,7 @@
*/
#include <linux/fs.h>
+#include <linux/mount.h>
#include <linux/time.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
@@ -1937,13 +1938,13 @@ static int __ext4_journalled_writepage(struct page *page,
if (!ret)
ret = err;
- if (!ext4_has_inline_data(inode))
- ext4_walk_page_buffers(NULL, page_bufs, 0, len,
- NULL, bput_one);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
unlock_page(page);
out_no_pagelock:
+ if (!inline_data && page_bufs)
+ ext4_walk_page_buffers(NULL, page_bufs, 0, len,
+ NULL, bput_one);
brelse(inode_bh);
return ret;
}
@@ -4961,15 +4962,11 @@ static void __ext4_update_other_inode_time(struct super_block *sb,
if (!inode)
return;
- if ((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
- I_DIRTY_INODE)) ||
- ((inode->i_state & I_DIRTY_TIME) == 0))
+ if (!inode_is_dirtytime_only(inode))
return;
spin_lock(&inode->i_lock);
- if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
- I_DIRTY_INODE)) == 0) &&
- (inode->i_state & I_DIRTY_TIME)) {
+ if (inode_is_dirtytime_only(inode)) {
struct ext4_inode_info *ei = EXT4_I(inode);
inode->i_state &= ~I_DIRTY_TIME;
@@ -5029,7 +5026,7 @@ static int ext4_do_update_inode(handle_t *handle,
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
struct super_block *sb = inode->i_sb;
- int err = 0, rc, block;
+ int err = 0, block;
int need_datasync = 0, set_large_file = 0;
uid_t i_uid;
gid_t i_gid;
@@ -5141,9 +5138,9 @@ static int ext4_do_update_inode(handle_t *handle,
bh->b_data);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- rc = ext4_handle_dirty_metadata(handle, NULL, bh);
- if (!err)
- err = rc;
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (err)
+ goto out_brelse;
ext4_clear_inode_state(inode, EXT4_STATE_NEW);
if (set_large_file) {
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
@@ -5319,7 +5316,8 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
*
* Called with inode->i_mutex down.
*/
-int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error, rc = 0;
@@ -5337,7 +5335,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
ATTR_GID | ATTR_TIMES_SET))))
return -EPERM;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(mnt_userns, dentry, attr);
if (error)
return error;
@@ -5389,8 +5387,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_gid = attr->ia_gid;
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
- if (unlikely(error))
+ if (unlikely(error)) {
+ ext4_fc_stop_update(inode);
return error;
+ }
}
if (attr->ia_valid & ATTR_SIZE) {
@@ -5512,7 +5512,7 @@ out_mmap_sem:
}
if (!error) {
- setattr_copy(inode, attr);
+ setattr_copy(mnt_userns, inode, attr);
mark_inode_dirty(inode);
}
@@ -5524,7 +5524,7 @@ out_mmap_sem:
ext4_orphan_del(NULL, inode);
if (!error && (ia_valid & ATTR_MODE))
- rc = posix_acl_chmod(inode, inode->i_mode);
+ rc = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
err_out:
if (error)
@@ -5535,8 +5535,8 @@ err_out:
return error;
}
-int ext4_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int ext4_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct ext4_inode *raw_inode;
@@ -5571,17 +5571,18 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
STATX_ATTR_NODUMP |
STATX_ATTR_VERITY);
- generic_fillattr(inode, stat);
+ generic_fillattr(mnt_userns, inode, stat);
return 0;
}
-int ext4_file_getattr(const struct path *path, struct kstat *stat,
+int ext4_file_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
u64 delalloc_blocks;
- ext4_getattr(path, stat, request_mask, query_flags);
+ ext4_getattr(mnt_userns, path, stat, request_mask, query_flags);
/*
* If there is inline data in the inode, the inode will normally not
@@ -5937,26 +5938,16 @@ out:
* If the inode is marked synchronous, we don't honour that here - doing
* so would cause a commit on atime updates, which we don't bother doing.
* We handle synchronous inodes at the highest possible level.
- *
- * If only the I_DIRTY_TIME flag is set, we can skip everything. If
- * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
- * to copy into the on-disk inode structure are the timestamp files.
*/
void ext4_dirty_inode(struct inode *inode, int flags)
{
handle_t *handle;
- if (flags == I_DIRTY_TIME)
- return;
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle))
- goto out;
-
+ return;
ext4_mark_inode_dirty(handle, inode);
-
ext4_journal_stop(handle);
-out:
- return;
}
int ext4_change_inode_journal_flag(struct inode *inode, int val)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index d9665d2f82db..a2cf35066f46 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -107,10 +107,12 @@ void ext4_reset_inode_seed(struct inode *inode)
* important fields of the inodes.
*
* @sb: the super block of the filesystem
+ * @mnt_userns: user namespace of the mount the inode was found from
* @inode: the inode to swap with EXT4_BOOT_LOADER_INO
*
*/
static long swap_inode_boot_loader(struct super_block *sb,
+ struct user_namespace *mnt_userns,
struct inode *inode)
{
handle_t *handle;
@@ -139,7 +141,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
}
if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
- !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
+ !inode_owner_or_capable(mnt_userns, inode) ||
+ !capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto journal_err_out;
}
@@ -814,6 +817,7 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct ext4_inode_info *ei = EXT4_I(inode);
+ struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
unsigned int flags;
ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
@@ -829,7 +833,7 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case FS_IOC_SETFLAGS: {
int err;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
@@ -871,7 +875,7 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
__u32 generation;
int err;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EPERM;
if (ext4_has_metadata_csum(inode->i_sb)) {
@@ -1010,7 +1014,7 @@ mext_out:
case EXT4_IOC_MIGRATE:
{
int err;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EACCES;
err = mnt_want_write_file(filp);
@@ -1032,7 +1036,7 @@ mext_out:
case EXT4_IOC_ALLOC_DA_BLKS:
{
int err;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EACCES;
err = mnt_want_write_file(filp);
@@ -1051,7 +1055,7 @@ mext_out:
err = mnt_want_write_file(filp);
if (err)
return err;
- err = swap_inode_boot_loader(sb, inode);
+ err = swap_inode_boot_loader(sb, mnt_userns, inode);
mnt_drop_write_file(filp);
return err;
}
@@ -1217,7 +1221,7 @@ resizefs_out:
case EXT4_IOC_CLEAR_ES_CACHE:
{
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EACCES;
ext4_clear_inode_es(inode);
return 0;
@@ -1263,7 +1267,7 @@ resizefs_out:
return -EFAULT;
/* Make sure caller has proper permission */
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EACCES;
if (fa.fsx_xflags & ~EXT4_SUPPORTED_FS_XFLAGS)
@@ -1309,6 +1313,12 @@ out:
return -EOPNOTSUPP;
return fsverity_ioctl_measure(filp, (void __user *)arg);
+ case FS_IOC_READ_VERITY_METADATA:
+ if (!ext4_has_feature_verity(sb))
+ return -EOPNOTSUPP;
+ return fsverity_ioctl_read_metadata(filp,
+ (const void __user *)arg);
+
default:
return -ENOTTY;
}
@@ -1391,6 +1401,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC_GETFSMAP:
case FS_IOC_ENABLE_VERITY:
case FS_IOC_MEASURE_VERITY:
+ case FS_IOC_READ_VERITY_METADATA:
case EXT4_IOC_CLEAR_ES_CACHE:
case EXT4_IOC_GETSTATE:
case EXT4_IOC_GET_ES_CACHE:
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 99bf091fee10..a02fadf4fc84 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2709,8 +2709,15 @@ static int ext4_mb_init_backend(struct super_block *sb)
}
if (ext4_has_feature_flex_bg(sb)) {
- /* a single flex group is supposed to be read by a single IO */
- sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex,
+ /* a single flex group is supposed to be read by a single IO.
+ * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
+ * unsigned integer, so the maximum shift is 32.
+ */
+ if (sbi->s_es->s_log_groups_per_flex >= 32) {
+ ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
+ goto err_freesgi;
+ }
+ sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
} else {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index cf652ba3e74d..883e2a7cd4ab 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -731,6 +731,29 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
(space/bcount)*100/blocksize);
return (struct stats) { names, space, bcount};
}
+
+/*
+ * Linear search cross check
+ */
+static inline void htree_rep_invariant_check(struct dx_entry *at,
+ struct dx_entry *target,
+ u32 hash, unsigned int n)
+{
+ while (n--) {
+ dxtrace(printk(KERN_CONT ","));
+ if (dx_get_hash(++at) > hash) {
+ at--;
+ break;
+ }
+ }
+ ASSERT(at == target - 1);
+}
+#else /* DX_DEBUG */
+static inline void htree_rep_invariant_check(struct dx_entry *at,
+ struct dx_entry *target,
+ u32 hash, unsigned int n)
+{
+}
#endif /* DX_DEBUG */
/*
@@ -827,20 +850,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
p = m + 1;
}
- if (0) { // linear search cross check
- unsigned n = count - 1;
- at = entries;
- while (n--)
- {
- dxtrace(printk(KERN_CONT ","));
- if (dx_get_hash(++at) > hash)
- {
- at--;
- break;
- }
- }
- ASSERT(at == p - 1);
- }
+ htree_rep_invariant_check(entries, p, hash, count - 1);
at = p - 1;
dxtrace(printk(KERN_CONT " %x->%u\n",
@@ -2401,11 +2411,10 @@ again:
(frame - 1)->bh);
if (err)
goto journal_error;
- if (restart) {
- err = ext4_handle_dirty_dx_node(handle, dir,
- frame->bh);
+ err = ext4_handle_dirty_dx_node(handle, dir,
+ frame->bh);
+ if (err)
goto journal_error;
- }
} else {
struct dx_root *dxroot;
memcpy((char *) entries2, (char *) entries,
@@ -2596,8 +2605,8 @@ static int ext4_add_nondir(handle_t *handle,
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int ext4_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
handle_t *handle;
struct inode *inode;
@@ -2610,8 +2619,8 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
retry:
- inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
- NULL, EXT4_HT_DIR, credits);
+ inode = ext4_new_inode_start_handle(mnt_userns, dir, mode, &dentry->d_name,
+ 0, NULL, EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
@@ -2631,8 +2640,8 @@ retry:
return err;
}
-static int ext4_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int ext4_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
handle_t *handle;
struct inode *inode;
@@ -2645,8 +2654,8 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry,
credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
retry:
- inode = ext4_new_inode_start_handle(dir, mode, &dentry->d_name, 0,
- NULL, EXT4_HT_DIR, credits);
+ inode = ext4_new_inode_start_handle(mnt_userns, dir, mode, &dentry->d_name,
+ 0, NULL, EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
@@ -2665,7 +2674,8 @@ retry:
return err;
}
-static int ext4_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int ext4_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
@@ -2676,7 +2686,7 @@ static int ext4_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
return err;
retry:
- inode = ext4_new_inode_start_handle(dir, mode,
+ inode = ext4_new_inode_start_handle(mnt_userns, dir, mode,
NULL, 0, NULL,
EXT4_HT_DIR,
EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
@@ -2774,7 +2784,8 @@ out:
return err;
}
-static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int ext4_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
@@ -2790,7 +2801,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
retry:
- inode = ext4_new_inode_start_handle(dir, S_IFDIR | mode,
+ inode = ext4_new_inode_start_handle(mnt_userns, dir, S_IFDIR | mode,
&dentry->d_name,
0, NULL, EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
@@ -3292,7 +3303,7 @@ out_trace:
return retval;
}
-static int ext4_symlink(struct inode *dir,
+static int ext4_symlink(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, const char *symname)
{
handle_t *handle;
@@ -3333,7 +3344,7 @@ static int ext4_symlink(struct inode *dir,
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
}
- inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
+ inode = ext4_new_inode_start_handle(mnt_userns, dir, S_IFLNK|S_IRWXUGO,
&dentry->d_name, 0, NULL,
EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
@@ -3602,6 +3613,31 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
return retval;
}
+static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
+ unsigned ino, unsigned file_type)
+{
+ struct ext4_renament old = *ent;
+ int retval = 0;
+
+ /*
+ * old->de could have moved from under us during make indexed dir,
+ * so the old->de may no longer valid and need to find it again
+ * before reset old inode info.
+ */
+ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+ if (IS_ERR(old.bh))
+ retval = PTR_ERR(old.bh);
+ if (!old.bh)
+ retval = -ENOENT;
+ if (retval) {
+ ext4_std_error(old.dir->i_sb, retval);
+ return;
+ }
+
+ ext4_setent(handle, &old, ino, file_type);
+ brelse(old.bh);
+}
+
static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
const struct qstr *d_name)
{
@@ -3662,7 +3698,8 @@ static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent)
}
}
-static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent,
+static struct inode *ext4_whiteout_for_rename(struct user_namespace *mnt_userns,
+ struct ext4_renament *ent,
int credits, handle_t **h)
{
struct inode *wh;
@@ -3676,7 +3713,8 @@ static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent,
credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) +
EXT4_XATTR_TRANS_BLOCKS + 4);
retry:
- wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE,
+ wh = ext4_new_inode_start_handle(mnt_userns, ent->dir,
+ S_IFCHR | WHITEOUT_MODE,
&ent->dentry->d_name, 0, NULL,
EXT4_HT_DIR, credits);
@@ -3703,9 +3741,9 @@ retry:
* while new_{dentry,inode) refers to the destination dentry/inode
* This comes from rename(const char *oldpath, const char *newpath)
*/
-static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
handle_t *handle = NULL;
struct ext4_renament old = {
@@ -3761,14 +3799,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
retval = -ENOENT;
if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
- goto end_rename;
+ goto release_bh;
new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
retval = PTR_ERR(new.bh);
new.bh = NULL;
- goto end_rename;
+ goto release_bh;
}
if (new.bh) {
if (!new.inode) {
@@ -3785,15 +3823,13 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
- handle = NULL;
- goto end_rename;
+ goto release_bh;
}
} else {
- whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
+ whiteout = ext4_whiteout_for_rename(mnt_userns, &old, credits, &handle);
if (IS_ERR(whiteout)) {
retval = PTR_ERR(whiteout);
- whiteout = NULL;
- goto end_rename;
+ goto release_bh;
}
}
@@ -3837,6 +3873,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
retval = ext4_mark_inode_dirty(handle, whiteout);
if (unlikely(retval))
goto end_rename;
+
}
if (!new.bh) {
retval = ext4_add_entry(handle, new.dentry, old.inode);
@@ -3910,6 +3947,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
ext4_fc_track_unlink(handle, new.dentry);
__ext4_fc_track_link(handle, old.inode, new.dentry);
__ext4_fc_track_unlink(handle, old.inode, old.dentry);
+ if (whiteout)
+ __ext4_fc_track_create(handle, whiteout, old.dentry);
}
if (new.inode) {
@@ -3924,19 +3963,21 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
end_rename:
if (whiteout) {
if (retval) {
- ext4_setent(handle, &old,
- old.inode->i_ino, old_file_type);
+ ext4_resetent(handle, &old,
+ old.inode->i_ino, old_file_type);
drop_nlink(whiteout);
+ ext4_orphan_add(handle, whiteout);
}
unlock_new_inode(whiteout);
+ ext4_journal_stop(handle);
iput(whiteout);
-
+ } else {
+ ext4_journal_stop(handle);
}
+release_bh:
brelse(old.dir_bh);
brelse(old.bh);
brelse(new.bh);
- if (handle)
- ext4_journal_stop(handle);
return retval;
}
@@ -4085,7 +4126,8 @@ end_rename:
return retval;
}
-static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry,
+static int ext4_rename2(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -4107,7 +4149,7 @@ static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry,
new_dir, new_dentry);
}
- return ext4_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+ return ext4_rename(mnt_userns, old_dir, old_dentry, new_dir, new_dentry, flags);
}
/*
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 03a44a0de86a..f038d578d8d8 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -398,7 +398,7 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/
- bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+ bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index f014c5e473a9..3db923403505 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -371,8 +371,7 @@ int ext4_mpage_readpages(struct inode *inode,
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
*/
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
+ bio = bio_alloc(GFP_KERNEL, bio_max_segs(nr_pages));
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, page->index);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9a6f9875aa34..b9693680463a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -59,7 +59,7 @@
#include <trace/events/ext4.h>
static struct ext4_lazy_init *ext4_li_info;
-static struct mutex ext4_li_mtx;
+static DEFINE_MUTEX(ext4_li_mtx);
static struct ratelimit_state ext4_mount_msg_ratelimit;
static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
@@ -1210,6 +1210,7 @@ static void ext4_put_super(struct super_block *sb)
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
percpu_free_rwsem(&sbi->s_writepages_rwsem);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
@@ -4875,7 +4876,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
- sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
sbi->s_journal->j_submit_inode_data_buffers =
ext4_journal_submit_inode_data_buffers;
sbi->s_journal->j_finish_inode_data_buffers =
@@ -4987,6 +4987,14 @@ no_journal:
goto failed_mount5;
}
+ /*
+ * We can only set up the journal commit callback once
+ * mballoc is initialized
+ */
+ if (sbi->s_journal)
+ sbi->s_journal->j_commit_callback =
+ ext4_journal_commit_callback;
+
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
@@ -5005,6 +5013,9 @@ no_journal:
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
GFP_KERNEL);
if (!err)
+ err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
+ GFP_KERNEL);
+ if (!err)
err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
if (err) {
@@ -5117,6 +5128,7 @@ failed_mount6:
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
percpu_free_rwsem(&sbi->s_writepages_rwsem);
failed_mount5:
ext4_ext_release(sb);
@@ -5142,8 +5154,8 @@ failed_mount_wq:
failed_mount3a:
ext4_es_unregister_shrinker(sbi);
failed_mount3:
- del_timer_sync(&sbi->s_err_report);
flush_work(&sbi->s_error_work);
+ del_timer_sync(&sbi->s_err_report);
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
failed_mount2:
@@ -5709,7 +5721,7 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
needs_barrier = true;
if (needs_barrier) {
int err;
- err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
+ err = blkdev_issue_flush(sb->s_bdev);
if (!ret)
ret = err;
}
@@ -6654,7 +6666,7 @@ static struct file_system_type ext4_fs_type = {
.name = "ext4",
.mount = ext4_mount,
.kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("ext4");
@@ -6667,7 +6679,6 @@ static int __init ext4_init_fs(void)
ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
ext4_li_info = NULL;
- mutex_init(&ext4_li_mtx);
/* Build-time check for flags consistency */
ext4_check_flag_values();
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 075aa3a19ff5..a3d08276d441 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -24,6 +24,7 @@ typedef enum {
attr_session_write_kbytes,
attr_lifetime_write_kbytes,
attr_reserved_clusters,
+ attr_sra_exceeded_retry_limit,
attr_inode_readahead,
attr_trigger_test_error,
attr_first_error_time,
@@ -202,6 +203,7 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444);
EXT4_ATTR_FUNC(session_write_kbytes, 0444);
EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);
EXT4_ATTR_FUNC(reserved_clusters, 0644);
+EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
ext4_sb_info, s_inode_readahead_blks);
@@ -251,6 +253,7 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(session_write_kbytes),
ATTR_LIST(lifetime_write_kbytes),
ATTR_LIST(reserved_clusters),
+ ATTR_LIST(sra_exceeded_retry_limit),
ATTR_LIST(inode_readahead_blks),
ATTR_LIST(inode_goal),
ATTR_LIST(mb_stats),
@@ -374,6 +377,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)
atomic64_read(&sbi->s_resv_clusters));
+ case attr_sra_exceeded_retry_limit:
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)
+ percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
case attr_inode_readahead:
case attr_pointer_ui:
if (!ptr)
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index 5b7ba8f71153..00e3cbde472e 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -201,55 +201,76 @@ static int ext4_end_enable_verity(struct file *filp, const void *desc,
struct inode *inode = file_inode(filp);
const int credits = 2; /* superblock and inode for ext4_orphan_del() */
handle_t *handle;
+ struct ext4_iloc iloc;
int err = 0;
- int err2;
- if (desc != NULL) {
- /* Succeeded; write the verity descriptor. */
- err = ext4_write_verity_descriptor(inode, desc, desc_size,
- merkle_tree_size);
-
- /* Write all pages before clearing VERITY_IN_PROGRESS. */
- if (!err)
- err = filemap_write_and_wait(inode->i_mapping);
- }
+ /*
+ * If an error already occurred (which fs/verity/ signals by passing
+ * desc == NULL), then only clean-up is needed.
+ */
+ if (desc == NULL)
+ goto cleanup;
- /* If we failed, truncate anything we wrote past i_size. */
- if (desc == NULL || err)
- ext4_truncate(inode);
+ /* Append the verity descriptor. */
+ err = ext4_write_verity_descriptor(inode, desc, desc_size,
+ merkle_tree_size);
+ if (err)
+ goto cleanup;
/*
- * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and
- * deleting the inode from the orphan list, even if something failed.
- * If everything succeeded, we'll also set the verity bit in the same
- * transaction.
+ * Write all pages (both data and verity metadata). Note that this must
+ * happen before clearing EXT4_STATE_VERITY_IN_PROGRESS; otherwise pages
+ * beyond i_size won't be written properly. For crash consistency, this
+ * also must happen before the verity inode flag gets persisted.
*/
+ err = filemap_write_and_wait(inode->i_mapping);
+ if (err)
+ goto cleanup;
- ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+ /*
+ * Finally, set the verity inode flag and remove the inode from the
+ * orphan list (in a single transaction).
+ */
handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
if (IS_ERR(handle)) {
- ext4_orphan_del(NULL, inode);
- return PTR_ERR(handle);
+ err = PTR_ERR(handle);
+ goto cleanup;
}
- err2 = ext4_orphan_del(handle, inode);
- if (err2)
- goto out_stop;
+ err = ext4_orphan_del(handle, inode);
+ if (err)
+ goto stop_and_cleanup;
- if (desc != NULL && !err) {
- struct ext4_iloc iloc;
+ err = ext4_reserve_inode_write(handle, inode, &iloc);
+ if (err)
+ goto stop_and_cleanup;
- err = ext4_reserve_inode_write(handle, inode, &iloc);
- if (err)
- goto out_stop;
- ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
- ext4_set_inode_flags(inode, false);
- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
- }
-out_stop:
+ ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
+ ext4_set_inode_flags(inode, false);
+ err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+ if (err)
+ goto stop_and_cleanup;
+
+ ext4_journal_stop(handle);
+
+ ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+ return 0;
+
+stop_and_cleanup:
ext4_journal_stop(handle);
- return err ?: err2;
+cleanup:
+ /*
+ * Verity failed to be enabled, so clean up by truncating any verity
+ * metadata that was written beyond i_size (both from cache and from
+ * disk), removing the inode from the orphan list (if it wasn't done
+ * already), and clearing EXT4_STATE_VERITY_IN_PROGRESS.
+ */
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
+ ext4_truncate(inode);
+ ext4_orphan_del(NULL, inode);
+ ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+ return err;
}
static int ext4_get_verity_descriptor_location(struct inode *inode,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 372208500f4e..6c1018223c54 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1462,6 +1462,9 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
if (!ce)
return NULL;
+ WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
+ !(current->flags & PF_MEMALLOC_NOFS));
+
ea_data = kvmalloc(value_len, GFP_KERNEL);
if (!ea_data) {
mb_cache_entry_put(ea_inode_cache, ce);
@@ -2327,6 +2330,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
error = -ENOSPC;
goto cleanup;
}
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
}
error = ext4_reserve_inode_write(handle, inode, &is.iloc);
@@ -2400,7 +2404,7 @@ retry_inode:
* external inode if possible.
*/
if (ext4_has_feature_ea_inode(inode->i_sb) &&
- !i.in_inode) {
+ i.value_len && !i.in_inode) {
i.in_inode = 1;
goto retry_inode;
}
diff --git a/fs/ext4/xattr_hurd.c b/fs/ext4/xattr_hurd.c
index 8cfa74a56361..c78df5790377 100644
--- a/fs/ext4/xattr_hurd.c
+++ b/fs/ext4/xattr_hurd.c
@@ -32,6 +32,7 @@ ext4_xattr_hurd_get(const struct xattr_handler *handler,
static int
ext4_xattr_hurd_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 197a9d8a15ef..8213f66f7b2d 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -23,6 +23,7 @@ ext4_xattr_security_get(const struct xattr_handler *handler,
static int
ext4_xattr_security_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index e9389e5d75c3..7c21ffb26d25 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -30,6 +30,7 @@ ext4_xattr_trusted_get(const struct xattr_handler *handler,
static int
ext4_xattr_trusted_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index d4546184b34b..2fe7ff0a479c 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -31,6 +31,7 @@ ext4_xattr_user_get(const struct xattr_handler *handler,
static int
ext4_xattr_user_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index d13c5c6a9787..62e638a49bbf 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -76,16 +76,6 @@ config F2FS_CHECK_FS
If you want to improve the performance, say N.
-config F2FS_IO_TRACE
- bool "F2FS IO tracer"
- depends on F2FS_FS
- depends on FUNCTION_TRACER
- help
- F2FS IO trace is based on a function trace, which gathers process
- information and block IO patterns in the filesystem level.
-
- If unsure, say N.
-
config F2FS_FAULT_INJECTION
bool "F2FS fault injection facility"
depends on F2FS_FS
@@ -119,6 +109,16 @@ config F2FS_FS_LZ4
help
Support LZ4 compress algorithm, if unsure, say Y.
+config F2FS_FS_LZ4HC
+ bool "LZ4HC compression support"
+ depends on F2FS_FS_COMPRESSION
+ depends on F2FS_FS_LZ4
+ select LZ4HC_COMPRESS
+ default y
+ help
+ Support LZ4HC compress algorithm, LZ4HC has compatible on-disk
+ layout with LZ4, if unsure, say Y.
+
config F2FS_FS_ZSTD
bool "ZSTD compression support"
depends on F2FS_FS_COMPRESSION
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index ee7316b42f69..e5295746208b 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -7,6 +7,5 @@ f2fs-y += shrinker.o extent_cache.o sysfs.o
f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
-f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
f2fs-$(CONFIG_FS_VERITY) += verity.o
f2fs-$(CONFIG_F2FS_FS_COMPRESSION) += compress.o
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 1e5e9b1136ee..965037a9c205 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -200,6 +200,27 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
return __f2fs_get_acl(inode, type, NULL);
}
+static int f2fs_acl_update_mode(struct inode *inode, umode_t *mode_p,
+ struct posix_acl **acl)
+{
+ umode_t mode = inode->i_mode;
+ int error;
+
+ if (is_inode_flag_set(inode, FI_ACL_MODE))
+ mode = F2FS_I(inode)->i_acl_mode;
+
+ error = posix_acl_equiv_mode(*acl, &mode);
+ if (error < 0)
+ return error;
+ if (error == 0)
+ *acl = NULL;
+ if (!in_group_p(i_gid_into_mnt(&init_user_ns, inode)) &&
+ !capable_wrt_inode_uidgid(&init_user_ns, inode, CAP_FSETID))
+ mode &= ~S_ISGID;
+ *mode_p = mode;
+ return 0;
+}
+
static int __f2fs_set_acl(struct inode *inode, int type,
struct posix_acl *acl, struct page *ipage)
{
@@ -213,7 +234,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl && !ipage) {
- error = posix_acl_update_mode(inode, &mode, &acl);
+ error = f2fs_acl_update_mode(inode, &mode, &acl);
if (error)
return error;
set_acl_inode(inode, mode);
@@ -248,7 +269,8 @@ static int __f2fs_set_acl(struct inode *inode, int type,
return error;
}
-int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int f2fs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 124868c13f80..986fd1bc780b 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -34,7 +34,8 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL
extern struct posix_acl *f2fs_get_acl(struct inode *, int);
-extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
+extern int f2fs_set_acl(struct user_namespace *, struct inode *,
+ struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *);
#else
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 897edb7c951a..be5415a0dbbc 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -13,13 +13,15 @@
#include <linux/f2fs_fs.h>
#include <linux/pagevec.h>
#include <linux/swap.h>
+#include <linux/kthread.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
-#include "trace.h"
#include <trace/events/f2fs.h>
+#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
+
static struct kmem_cache *ino_entry_slab;
struct kmem_cache *f2fs_inode_entry_slab;
@@ -290,7 +292,7 @@ void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
f2fs_put_page(page, 0);
if (readahead)
- f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
+ f2fs_ra_meta_pages(sbi, index, BIO_MAX_VECS, META_POR, true);
}
static int __f2fs_write_meta_page(struct page *page,
@@ -443,7 +445,6 @@ static int f2fs_set_meta_page_dirty(struct page *page)
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
f2fs_set_page_private(page, 0);
- f2fs_trace_pid(page);
return 1;
}
return 0;
@@ -1017,7 +1018,6 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
spin_unlock(&sbi->inode_lock[type]);
f2fs_set_page_private(page, 0);
- f2fs_trace_pid(page);
}
void f2fs_remove_dirty_inode(struct inode *inode)
@@ -1707,3 +1707,174 @@ void f2fs_destroy_checkpoint_caches(void)
kmem_cache_destroy(ino_entry_slab);
kmem_cache_destroy(f2fs_inode_entry_slab);
}
+
+static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
+{
+ struct cp_control cpc = { .reason = CP_SYNC, };
+ int err;
+
+ down_write(&sbi->gc_lock);
+ err = f2fs_write_checkpoint(sbi, &cpc);
+ up_write(&sbi->gc_lock);
+
+ return err;
+}
+
+static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+ struct ckpt_req *req, *next;
+ struct llist_node *dispatch_list;
+ u64 sum_diff = 0, diff, count = 0;
+ int ret;
+
+ dispatch_list = llist_del_all(&cprc->issue_list);
+ if (!dispatch_list)
+ return;
+ dispatch_list = llist_reverse_order(dispatch_list);
+
+ ret = __write_checkpoint_sync(sbi);
+ atomic_inc(&cprc->issued_ckpt);
+
+ llist_for_each_entry_safe(req, next, dispatch_list, llnode) {
+ diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time);
+ req->ret = ret;
+ complete(&req->wait);
+
+ sum_diff += diff;
+ count++;
+ }
+ atomic_sub(count, &cprc->queued_ckpt);
+ atomic_add(count, &cprc->total_ckpt);
+
+ spin_lock(&cprc->stat_lock);
+ cprc->cur_time = (unsigned int)div64_u64(sum_diff, count);
+ if (cprc->peak_time < cprc->cur_time)
+ cprc->peak_time = cprc->cur_time;
+ spin_unlock(&cprc->stat_lock);
+}
+
+static int issue_checkpoint_thread(void *data)
+{
+ struct f2fs_sb_info *sbi = data;
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+ wait_queue_head_t *q = &cprc->ckpt_wait_queue;
+repeat:
+ if (kthread_should_stop())
+ return 0;
+
+ if (!llist_empty(&cprc->issue_list))
+ __checkpoint_and_complete_reqs(sbi);
+
+ wait_event_interruptible(*q,
+ kthread_should_stop() || !llist_empty(&cprc->issue_list));
+ goto repeat;
+}
+
+static void flush_remained_ckpt_reqs(struct f2fs_sb_info *sbi,
+ struct ckpt_req *wait_req)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ if (!llist_empty(&cprc->issue_list)) {
+ __checkpoint_and_complete_reqs(sbi);
+ } else {
+ /* already dispatched by issue_checkpoint_thread */
+ if (wait_req)
+ wait_for_completion(&wait_req->wait);
+ }
+}
+
+static void init_ckpt_req(struct ckpt_req *req)
+{
+ memset(req, 0, sizeof(struct ckpt_req));
+
+ init_completion(&req->wait);
+ req->queue_time = ktime_get();
+}
+
+int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+ struct ckpt_req req;
+ struct cp_control cpc;
+
+ cpc.reason = __get_cp_reason(sbi);
+ if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
+ int ret;
+
+ down_write(&sbi->gc_lock);
+ ret = f2fs_write_checkpoint(sbi, &cpc);
+ up_write(&sbi->gc_lock);
+
+ return ret;
+ }
+
+ if (!cprc->f2fs_issue_ckpt)
+ return __write_checkpoint_sync(sbi);
+
+ init_ckpt_req(&req);
+
+ llist_add(&req.llnode, &cprc->issue_list);
+ atomic_inc(&cprc->queued_ckpt);
+
+ /* update issue_list before we wake up issue_checkpoint thread */
+ smp_mb();
+
+ if (waitqueue_active(&cprc->ckpt_wait_queue))
+ wake_up(&cprc->ckpt_wait_queue);
+
+ if (cprc->f2fs_issue_ckpt)
+ wait_for_completion(&req.wait);
+ else
+ flush_remained_ckpt_reqs(sbi, &req);
+
+ return req.ret;
+}
+
+int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
+{
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ if (cprc->f2fs_issue_ckpt)
+ return 0;
+
+ cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi,
+ "f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev));
+ if (IS_ERR(cprc->f2fs_issue_ckpt)) {
+ cprc->f2fs_issue_ckpt = NULL;
+ return -ENOMEM;
+ }
+
+ set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio);
+
+ return 0;
+}
+
+void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ if (cprc->f2fs_issue_ckpt) {
+ struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt;
+
+ cprc->f2fs_issue_ckpt = NULL;
+ kthread_stop(ckpt_task);
+
+ flush_remained_ckpt_reqs(sbi, NULL);
+ }
+}
+
+void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
+{
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+
+ atomic_set(&cprc->issued_ckpt, 0);
+ atomic_set(&cprc->total_ckpt, 0);
+ atomic_set(&cprc->queued_ckpt, 0);
+ cprc->ckpt_thread_ioprio = DEFAULT_CHECKPOINT_IOPRIO;
+ init_waitqueue_head(&cprc->ckpt_wait_queue);
+ init_llist_head(&cprc->issue_list);
+ spin_lock_init(&cprc->stat_lock);
+}
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 4bcbacfe3325..77fa342de38f 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -252,8 +252,14 @@ static const struct f2fs_compress_ops f2fs_lzo_ops = {
#ifdef CONFIG_F2FS_FS_LZ4
static int lz4_init_compress_ctx(struct compress_ctx *cc)
{
- cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
- LZ4_MEM_COMPRESS, GFP_NOFS);
+ unsigned int size = LZ4_MEM_COMPRESS;
+
+#ifdef CONFIG_F2FS_FS_LZ4HC
+ if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
+ size = LZ4HC_MEM_COMPRESS;
+#endif
+
+ cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
if (!cc->private)
return -ENOMEM;
@@ -272,10 +278,34 @@ static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
cc->private = NULL;
}
+#ifdef CONFIG_F2FS_FS_LZ4HC
+static int lz4hc_compress_pages(struct compress_ctx *cc)
+{
+ unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+ COMPRESS_LEVEL_OFFSET;
+ int len;
+
+ if (level)
+ len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+ cc->clen, level, cc->private);
+ else
+ len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+ cc->clen, cc->private);
+ if (!len)
+ return -EAGAIN;
+
+ cc->clen = len;
+ return 0;
+}
+#endif
+
static int lz4_compress_pages(struct compress_ctx *cc)
{
int len;
+#ifdef CONFIG_F2FS_FS_LZ4HC
+ return lz4hc_compress_pages(cc);
+#endif
len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
cc->clen, cc->private);
if (!len)
@@ -325,8 +355,13 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
ZSTD_CStream *stream;
void *workspace;
unsigned int workspace_size;
+ unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
+ COMPRESS_LEVEL_OFFSET;
- params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
+ if (!level)
+ level = F2FS_ZSTD_DEFAULT_CLEVEL;
+
+ params = ZSTD_getParams(level, cc->rlen, 0);
workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
@@ -721,38 +756,27 @@ out:
return ret;
}
-void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
+static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
{
- struct decompress_io_ctx *dic =
- (struct decompress_io_ctx *)page_private(page);
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
- struct f2fs_inode_info *fi= F2FS_I(dic->inode);
+ struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
int ret;
int i;
- dec_page_count(sbi, F2FS_RD_DATA);
-
- if (bio->bi_status || PageError(page))
- dic->failed = true;
-
- if (atomic_dec_return(&dic->pending_pages))
- return;
-
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
dic->cluster_size, fi->i_compress_algorithm);
- /* submit partial compressed pages */
if (dic->failed) {
ret = -EIO;
- goto out_free_dic;
+ goto out_end_io;
}
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages) {
ret = -ENOMEM;
- goto out_free_dic;
+ goto out_end_io;
}
for (i = 0; i < dic->cluster_size; i++) {
@@ -764,20 +788,20 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
dic->tpages[i] = f2fs_compress_alloc_page();
if (!dic->tpages[i]) {
ret = -ENOMEM;
- goto out_free_dic;
+ goto out_end_io;
}
}
if (cops->init_decompress_ctx) {
ret = cops->init_decompress_ctx(dic);
if (ret)
- goto out_free_dic;
+ goto out_end_io;
}
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
if (!dic->rbuf) {
ret = -ENOMEM;
- goto destroy_decompress_ctx;
+ goto out_destroy_decompress_ctx;
}
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
@@ -816,18 +840,34 @@ out_vunmap_cbuf:
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
out_vunmap_rbuf:
vm_unmap_ram(dic->rbuf, dic->cluster_size);
-destroy_decompress_ctx:
+out_destroy_decompress_ctx:
if (cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic);
-out_free_dic:
- if (!verity)
- f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
- ret, false);
-
+out_end_io:
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
dic->clen, ret);
- if (!verity)
- f2fs_free_dic(dic);
+ f2fs_decompress_end_io(dic, ret);
+}
+
+/*
+ * This is called when a page of a compressed cluster has been read from disk
+ * (or failed to be read from disk). It checks whether this page was the last
+ * page being waited on in the cluster, and if so, it decompresses the cluster
+ * (or in the case of a failure, cleans up without actually decompressing).
+ */
+void f2fs_end_read_compressed_page(struct page *page, bool failed)
+{
+ struct decompress_io_ctx *dic =
+ (struct decompress_io_ctx *)page_private(page);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+
+ dec_page_count(sbi, F2FS_RD_DATA);
+
+ if (failed)
+ WRITE_ONCE(dic->failed, true);
+
+ if (atomic_dec_and_test(&dic->remaining_pages))
+ f2fs_decompress_cluster(dic);
}
static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
@@ -1415,7 +1455,7 @@ retry_write:
ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
NULL, NULL, wbc, io_type,
- compr_blocks);
+ compr_blocks, false);
if (ret) {
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(cc->rpages[i]);
@@ -1450,6 +1490,9 @@ retry_write:
*submitted += _submitted;
}
+
+ f2fs_balance_fs(F2FS_M_SB(mapping), true);
+
return 0;
out_err:
for (++i; i < cc->cluster_size; i++) {
@@ -1494,6 +1537,8 @@ destroy_out:
return err;
}
+static void f2fs_free_dic(struct decompress_io_ctx *dic);
+
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{
struct decompress_io_ctx *dic;
@@ -1512,12 +1557,14 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
dic->inode = cc->inode;
- atomic_set(&dic->pending_pages, cc->nr_cpages);
+ atomic_set(&dic->remaining_pages, cc->nr_cpages);
dic->cluster_idx = cc->cluster_idx;
dic->cluster_size = cc->cluster_size;
dic->log_cluster_size = cc->log_cluster_size;
dic->nr_cpages = cc->nr_cpages;
+ refcount_set(&dic->refcnt, 1);
dic->failed = false;
+ dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
for (i = 0; i < dic->cluster_size; i++)
dic->rpages[i] = cc->rpages[i];
@@ -1546,7 +1593,7 @@ out_free:
return ERR_PTR(-ENOMEM);
}
-void f2fs_free_dic(struct decompress_io_ctx *dic)
+static void f2fs_free_dic(struct decompress_io_ctx *dic)
{
int i;
@@ -1574,30 +1621,88 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
kmem_cache_free(dic_entry_slab, dic);
}
-void f2fs_decompress_end_io(struct page **rpages,
- unsigned int cluster_size, bool err, bool verity)
+static void f2fs_put_dic(struct decompress_io_ctx *dic)
+{
+ if (refcount_dec_and_test(&dic->refcnt))
+ f2fs_free_dic(dic);
+}
+
+/*
+ * Update and unlock the cluster's pagecache pages, and release the reference to
+ * the decompress_io_ctx that was being held for I/O completion.
+ */
+static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
{
int i;
- for (i = 0; i < cluster_size; i++) {
- struct page *rpage = rpages[i];
+ for (i = 0; i < dic->cluster_size; i++) {
+ struct page *rpage = dic->rpages[i];
if (!rpage)
continue;
- if (err || PageError(rpage))
- goto clear_uptodate;
-
- if (!verity || fsverity_verify_page(rpage)) {
+ /* PG_error was set if verity failed. */
+ if (failed || PageError(rpage)) {
+ ClearPageUptodate(rpage);
+ /* will re-read again later */
+ ClearPageError(rpage);
+ } else {
SetPageUptodate(rpage);
- goto unlock;
}
-clear_uptodate:
- ClearPageUptodate(rpage);
- ClearPageError(rpage);
-unlock:
unlock_page(rpage);
}
+
+ f2fs_put_dic(dic);
+}
+
+static void f2fs_verify_cluster(struct work_struct *work)
+{
+ struct decompress_io_ctx *dic =
+ container_of(work, struct decompress_io_ctx, verity_work);
+ int i;
+
+ /* Verify the cluster's decompressed pages with fs-verity. */
+ for (i = 0; i < dic->cluster_size; i++) {
+ struct page *rpage = dic->rpages[i];
+
+ if (rpage && !fsverity_verify_page(rpage))
+ SetPageError(rpage);
+ }
+
+ __f2fs_decompress_end_io(dic, false);
+}
+
+/*
+ * This is called when a compressed cluster has been decompressed
+ * (or failed to be read and/or decompressed).
+ */
+void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
+{
+ if (!failed && dic->need_verity) {
+ /*
+ * Note that to avoid deadlocks, the verity work can't be done
+ * on the decompression workqueue. This is because verifying
+ * the data pages can involve reading metadata pages from the
+ * file, and these metadata pages may be compressed.
+ */
+ INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
+ fsverity_enqueue_verify_work(&dic->verity_work);
+ } else {
+ __f2fs_decompress_end_io(dic, failed);
+ }
+}
+
+/*
+ * Put a reference to a compressed page's decompress_io_ctx.
+ *
+ * This is called when the page is no longer needed and can be freed.
+ */
+void f2fs_put_page_dic(struct page *page)
+{
+ struct decompress_io_ctx *dic =
+ (struct decompress_io_ctx *)page_private(page);
+
+ f2fs_put_dic(dic);
}
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index aa34d620bec9..4e5257c763d0 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -25,7 +25,6 @@
#include "f2fs.h"
#include "node.h"
#include "segment.h"
-#include "trace.h"
#include <trace/events/f2fs.h>
#define NUM_PREALLOC_POST_READ_CTXS 128
@@ -50,27 +49,6 @@ void f2fs_destroy_bioset(void)
bioset_exit(&f2fs_bioset);
}
-static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
- unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
-}
-
-struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio)
-{
- if (noio) {
- /* No failure on bio allocation */
- return __f2fs_bio_alloc(GFP_NOIO, npages);
- }
-
- if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
- f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
- return NULL;
- }
-
- return __f2fs_bio_alloc(GFP_KERNEL, npages);
-}
-
static bool __is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
@@ -115,10 +93,21 @@ static enum count_type __read_io_type(struct page *page)
/* postprocessing steps for read bios */
enum bio_post_read_step {
- STEP_DECRYPT,
- STEP_DECOMPRESS_NOWQ, /* handle normal cluster data inplace */
- STEP_DECOMPRESS, /* handle compressed cluster data in workqueue */
- STEP_VERITY,
+#ifdef CONFIG_FS_ENCRYPTION
+ STEP_DECRYPT = 1 << 0,
+#else
+ STEP_DECRYPT = 0, /* compile out the decryption-related code */
+#endif
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ STEP_DECOMPRESS = 1 << 1,
+#else
+ STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
+#endif
+#ifdef CONFIG_FS_VERITY
+ STEP_VERITY = 1 << 2,
+#else
+ STEP_VERITY = 0, /* compile out the verity-related code */
+#endif
};
struct bio_post_read_ctx {
@@ -128,25 +117,26 @@ struct bio_post_read_ctx {
unsigned int enabled_steps;
};
-static void __read_end_io(struct bio *bio, bool compr, bool verity)
+static void f2fs_finish_read_bio(struct bio *bio)
{
- struct page *page;
struct bio_vec *bv;
struct bvec_iter_all iter_all;
+ /*
+ * Update and unlock the bio's pagecache pages, and put the
+ * decompression context for any compressed pages.
+ */
bio_for_each_segment_all(bv, bio, iter_all) {
- page = bv->bv_page;
+ struct page *page = bv->bv_page;
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- if (compr && f2fs_is_compressed_page(page)) {
- f2fs_decompress_pages(bio, page, verity);
+ if (f2fs_is_compressed_page(page)) {
+ if (bio->bi_status)
+ f2fs_end_read_compressed_page(page, true);
+ f2fs_put_page_dic(page);
continue;
}
- if (verity)
- continue;
-#endif
- /* PG_error was set if any post_read step failed */
+ /* PG_error was set if decryption or verity failed. */
if (bio->bi_status || PageError(page)) {
ClearPageUptodate(page);
/* will re-read again later */
@@ -157,181 +147,141 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
dec_page_count(F2FS_P_SB(page), __read_io_type(page));
unlock_page(page);
}
-}
-
-static void f2fs_release_read_bio(struct bio *bio);
-static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
-{
- if (!compr)
- __read_end_io(bio, false, verity);
- f2fs_release_read_bio(bio);
-}
-
-static void f2fs_decompress_bio(struct bio *bio, bool verity)
-{
- __read_end_io(bio, true, verity);
-}
-
-static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
-
-static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
-{
- fscrypt_decrypt_bio(ctx->bio);
-}
-
-static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
-{
- f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
-}
-
-#ifdef CONFIG_F2FS_FS_COMPRESSION
-static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
-{
- f2fs_decompress_end_io(rpages, cluster_size, false, true);
-}
-
-static void f2fs_verify_bio(struct bio *bio)
-{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
- struct decompress_io_ctx *dic;
-
- dic = (struct decompress_io_ctx *)page_private(page);
-
- if (dic) {
- if (atomic_dec_return(&dic->verity_pages))
- continue;
- f2fs_verify_pages(dic->rpages,
- dic->cluster_size);
- f2fs_free_dic(dic);
- continue;
- }
-
- if (bio->bi_status || PageError(page))
- goto clear_uptodate;
-
- if (fsverity_verify_page(page)) {
- SetPageUptodate(page);
- goto unlock;
- }
-clear_uptodate:
- ClearPageUptodate(page);
- ClearPageError(page);
-unlock:
- dec_page_count(F2FS_P_SB(page), __read_io_type(page));
- unlock_page(page);
- }
+ if (bio->bi_private)
+ mempool_free(bio->bi_private, bio_post_read_ctx_pool);
+ bio_put(bio);
}
-#endif
-static void f2fs_verity_work(struct work_struct *work)
+static void f2fs_verify_bio(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
struct bio *bio = ctx->bio;
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- unsigned int enabled_steps = ctx->enabled_steps;
-#endif
+ bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
/*
* fsverity_verify_bio() may call readpages() again, and while verity
- * will be disabled for this, decryption may still be needed, resulting
- * in another bio_post_read_ctx being allocated. So to prevent
- * deadlocks we need to release the current ctx to the mempool first.
- * This assumes that verity is the last post-read step.
+ * will be disabled for this, decryption and/or decompression may still
+ * be needed, resulting in another bio_post_read_ctx being allocated.
+ * So to prevent deadlocks we need to release the current ctx to the
+ * mempool first. This assumes that verity is the last post-read step.
*/
mempool_free(ctx, bio_post_read_ctx_pool);
bio->bi_private = NULL;
-#ifdef CONFIG_F2FS_FS_COMPRESSION
- /* previous step is decompression */
- if (enabled_steps & (1 << STEP_DECOMPRESS)) {
- f2fs_verify_bio(bio);
- f2fs_release_read_bio(bio);
- return;
+ /*
+ * Verify the bio's pages with fs-verity. Exclude compressed pages,
+ * as those were handled separately by f2fs_end_read_compressed_page().
+ */
+ if (may_have_compressed_pages) {
+ struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bv, bio, iter_all) {
+ struct page *page = bv->bv_page;
+
+ if (!f2fs_is_compressed_page(page) &&
+ !PageError(page) && !fsverity_verify_page(page))
+ SetPageError(page);
+ }
+ } else {
+ fsverity_verify_bio(bio);
}
-#endif
- fsverity_verify_bio(bio);
- __f2fs_read_end_io(bio, false, false);
+ f2fs_finish_read_bio(bio);
}
-static void f2fs_post_read_work(struct work_struct *work)
+/*
+ * If the bio's data needs to be verified with fs-verity, then enqueue the
+ * verity work for the bio. Otherwise finish the bio now.
+ *
+ * Note that to avoid deadlocks, the verity work can't be done on the
+ * decryption/decompression workqueue. This is because verifying the data pages
+ * can involve reading verity metadata pages from the file, and these verity
+ * metadata pages may be encrypted and/or compressed.
+ */
+static void f2fs_verify_and_finish_bio(struct bio *bio)
{
- struct bio_post_read_ctx *ctx =
- container_of(work, struct bio_post_read_ctx, work);
+ struct bio_post_read_ctx *ctx = bio->bi_private;
- if (ctx->enabled_steps & (1 << STEP_DECRYPT))
- f2fs_decrypt_work(ctx);
-
- if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
- f2fs_decompress_work(ctx);
-
- if (ctx->enabled_steps & (1 << STEP_VERITY)) {
- INIT_WORK(&ctx->work, f2fs_verity_work);
+ if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
+ INIT_WORK(&ctx->work, f2fs_verify_bio);
fsverity_enqueue_verify_work(&ctx->work);
- return;
+ } else {
+ f2fs_finish_read_bio(bio);
}
-
- __f2fs_read_end_io(ctx->bio,
- ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
}
-static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
- struct work_struct *work)
-{
- queue_work(sbi->post_read_wq, work);
-}
-
-static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
+/*
+ * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
+ * remaining page was read by @ctx->bio.
+ *
+ * Note that a bio may span clusters (even a mix of compressed and uncompressed
+ * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
+ * that the bio includes at least one compressed page. The actual decompression
+ * is done on a per-cluster basis, not a per-bio basis.
+ */
+static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
{
- /*
- * We use different work queues for decryption and for verity because
- * verity may require reading metadata pages that need decryption, and
- * we shouldn't recurse to the same workqueue.
- */
+ struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
+ bool all_compressed = true;
- if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
- ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
- INIT_WORK(&ctx->work, f2fs_post_read_work);
- f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
- return;
- }
+ bio_for_each_segment_all(bv, ctx->bio, iter_all) {
+ struct page *page = bv->bv_page;
- if (ctx->enabled_steps & (1 << STEP_VERITY)) {
- INIT_WORK(&ctx->work, f2fs_verity_work);
- fsverity_enqueue_verify_work(&ctx->work);
- return;
+ /* PG_error was set if decryption failed. */
+ if (f2fs_is_compressed_page(page))
+ f2fs_end_read_compressed_page(page, PageError(page));
+ else
+ all_compressed = false;
}
- __f2fs_read_end_io(ctx->bio, false, false);
+ /*
+ * Optimization: if all the bio's pages are compressed, then scheduling
+ * the per-bio verity work is unnecessary, as verity will be fully
+ * handled at the compression cluster level.
+ */
+ if (all_compressed)
+ ctx->enabled_steps &= ~STEP_VERITY;
}
-static bool f2fs_bio_post_read_required(struct bio *bio)
+static void f2fs_post_read_work(struct work_struct *work)
{
- return bio->bi_private;
+ struct bio_post_read_ctx *ctx =
+ container_of(work, struct bio_post_read_ctx, work);
+
+ if (ctx->enabled_steps & STEP_DECRYPT)
+ fscrypt_decrypt_bio(ctx->bio);
+
+ if (ctx->enabled_steps & STEP_DECOMPRESS)
+ f2fs_handle_step_decompress(ctx);
+
+ f2fs_verify_and_finish_bio(ctx->bio);
}
static void f2fs_read_end_io(struct bio *bio)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
+ struct bio_post_read_ctx *ctx = bio->bi_private;
if (time_to_inject(sbi, FAULT_READ_IO)) {
f2fs_show_injection_info(sbi, FAULT_READ_IO);
bio->bi_status = BLK_STS_IOERR;
}
- if (f2fs_bio_post_read_required(bio)) {
- struct bio_post_read_ctx *ctx = bio->bi_private;
-
- bio_post_read_processing(ctx);
+ if (bio->bi_status) {
+ f2fs_finish_read_bio(bio);
return;
}
- __f2fs_read_end_io(bio, false, false);
+ if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
+ INIT_WORK(&ctx->work, f2fs_post_read_work);
+ queue_work(ctx->sbi->post_read_wq, &ctx->work);
+ } else {
+ f2fs_verify_and_finish_bio(bio);
+ }
}
static void f2fs_write_end_io(struct bio *bio)
@@ -427,22 +377,12 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
return 0;
}
-/*
- * Return true, if pre_bio's bdev is same as its target device.
- */
-static bool __same_bdev(struct f2fs_sb_info *sbi,
- block_t blk_addr, struct bio *bio)
-{
- struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
- return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
-}
-
static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
{
struct f2fs_sb_info *sbi = fio->sbi;
struct bio *bio;
- bio = f2fs_bio_alloc(sbi, npages, true);
+ bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
f2fs_target_device(sbi, fio->new_blkaddr, bio);
if (is_read_io(fio->op)) {
@@ -499,7 +439,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
if (f2fs_lfs_mode(sbi) && current->plug)
blk_finish_plug(current->plug);
- if (F2FS_IO_ALIGNED(sbi))
+ if (!F2FS_IO_ALIGNED(sbi))
goto submit_io;
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
@@ -707,7 +647,6 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
return -EFSCORRUPTED;
trace_f2fs_submit_page_bio(page, fio);
- f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
bio = __bio_alloc(fio, 1);
@@ -741,7 +680,7 @@ static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
return false;
if (last_blkaddr + 1 != cur_blkaddr)
return false;
- return __same_bdev(sbi, cur_blkaddr, bio);
+ return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
}
static bool io_type_is_mergeable(struct f2fs_bio_info *io,
@@ -912,14 +851,13 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
return -EFSCORRUPTED;
trace_f2fs_submit_page_bio(page, fio);
- f2fs_trace_ios(fio, 0);
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
fio->new_blkaddr))
f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
alloc_new:
if (!bio) {
- bio = __bio_alloc(fio, BIO_MAX_PAGES);
+ bio = __bio_alloc(fio, BIO_MAX_VECS);
__attach_io_flag(fio);
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
fio->page->index, fio, GFP_NOIO);
@@ -994,7 +932,7 @@ alloc_new:
fio->retry = true;
goto skip;
}
- io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
+ io->bio = __bio_alloc(fio, BIO_MAX_VECS);
f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
bio_page->index, fio, GFP_NOIO);
io->fio = *fio;
@@ -1009,7 +947,6 @@ alloc_new:
wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
io->last_block_in_bio = fio->new_blkaddr;
- f2fs_trace_ios(fio, 0);
trace_f2fs_submit_page_write(fio->page, fio);
skip:
@@ -1022,24 +959,17 @@ out:
up_write(&io->io_rwsem);
}
-static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
-{
- return fsverity_active(inode) &&
- idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
-}
-
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
unsigned nr_pages, unsigned op_flag,
- pgoff_t first_idx, bool for_write,
- bool for_verity)
+ pgoff_t first_idx, bool for_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
struct bio_post_read_ctx *ctx;
unsigned int post_read_steps = 0;
- bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES),
- for_write);
+ bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
+ bio_max_segs(nr_pages), &f2fs_bioset);
if (!bio)
return ERR_PTR(-ENOMEM);
@@ -1050,13 +980,19 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
if (fscrypt_inode_uses_fs_layer_crypto(inode))
- post_read_steps |= 1 << STEP_DECRYPT;
- if (f2fs_compressed_file(inode))
- post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
- if (for_verity && f2fs_need_verity(inode, first_idx))
- post_read_steps |= 1 << STEP_VERITY;
+ post_read_steps |= STEP_DECRYPT;
+
+ if (f2fs_need_verity(inode, first_idx))
+ post_read_steps |= STEP_VERITY;
+
+ /*
+ * STEP_DECOMPRESS is handled specially, since a compressed file might
+ * contain both compressed and uncompressed clusters. We'll allocate a
+ * bio_post_read_ctx if the file is compressed, but the caller is
+ * responsible for enabling STEP_DECOMPRESS if it's actually needed.
+ */
- if (post_read_steps) {
+ if (post_read_steps || f2fs_compressed_file(inode)) {
/* Due to the mempool, this never fails. */
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
ctx->bio = bio;
@@ -1068,13 +1004,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
return bio;
}
-static void f2fs_release_read_bio(struct bio *bio)
-{
- if (bio->bi_private)
- mempool_free(bio->bi_private, bio_post_read_ctx_pool);
- bio_put(bio);
-}
-
/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
block_t blkaddr, int op_flags, bool for_write)
@@ -1083,7 +1012,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
struct bio *bio;
bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
- page->index, for_write, true);
+ page->index, for_write);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -1964,6 +1893,7 @@ next:
}
if (size) {
+ flags |= FIEMAP_EXTENT_MERGED;
if (IS_ENCRYPTED(inode))
flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
@@ -2121,7 +2051,7 @@ submit_and_realloc:
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
is_readahead ? REQ_RAHEAD : 0, page->index,
- false, true);
+ false);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
bio = NULL;
@@ -2167,8 +2097,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
sector_t last_block_in_file;
const unsigned blocksize = blks_to_bytes(inode, 1);
struct decompress_io_ctx *dic = NULL;
- struct bio_post_read_ctx *ctx;
- bool for_verity = false;
int i;
int ret = 0;
@@ -2234,29 +2162,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
goto out_put_dnode;
}
- /*
- * It's possible to enable fsverity on the fly when handling a cluster,
- * which requires complicated error handling. Instead of adding more
- * complexity, let's give a rule where end_io post-processes fsverity
- * per cluster. In order to do that, we need to submit bio, if previous
- * bio sets a different post-process policy.
- */
- if (fsverity_active(cc->inode)) {
- atomic_set(&dic->verity_pages, cc->nr_cpages);
- for_verity = true;
-
- if (bio) {
- ctx = bio->bi_private;
- if (!(ctx->enabled_steps & (1 << STEP_VERITY))) {
- __submit_bio(sbi, bio, DATA);
- bio = NULL;
- }
- }
- }
-
for (i = 0; i < dic->nr_cpages; i++) {
struct page *page = dic->cpages[i];
block_t blkaddr;
+ struct bio_post_read_ctx *ctx;
blkaddr = data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i + 1);
@@ -2272,31 +2181,10 @@ submit_and_realloc:
if (!bio) {
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
is_readahead ? REQ_RAHEAD : 0,
- page->index, for_write, for_verity);
+ page->index, for_write);
if (IS_ERR(bio)) {
- unsigned int remained = dic->nr_cpages - i;
- bool release = false;
-
ret = PTR_ERR(bio);
- dic->failed = true;
-
- if (for_verity) {
- if (!atomic_sub_return(remained,
- &dic->verity_pages))
- release = true;
- } else {
- if (!atomic_sub_return(remained,
- &dic->pending_pages))
- release = true;
- }
-
- if (release) {
- f2fs_decompress_end_io(dic->rpages,
- cc->cluster_size, true,
- false);
- f2fs_free_dic(dic);
- }
-
+ f2fs_decompress_end_io(dic, ret);
f2fs_put_dnode(&dn);
*bio_ret = NULL;
return ret;
@@ -2308,10 +2196,9 @@ submit_and_realloc:
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
- /* tag STEP_DECOMPRESS to handle IO in wq */
ctx = bio->bi_private;
- if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
- ctx->enabled_steps |= 1 << STEP_DECOMPRESS;
+ ctx->enabled_steps |= STEP_DECOMPRESS;
+ refcount_inc(&dic->refcnt);
inc_page_count(sbi, F2FS_RD_DATA);
f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
@@ -2328,7 +2215,13 @@ submit_and_realloc:
out_put_dnode:
f2fs_put_dnode(&dn);
out:
- f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (cc->rpages[i]) {
+ ClearPageUptodate(cc->rpages[i]);
+ ClearPageError(cc->rpages[i]);
+ unlock_page(cc->rpages[i]);
+ }
+ }
*bio_ret = bio;
return ret;
}
@@ -2337,11 +2230,6 @@ out:
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
- *
- * Note that the aops->readpages() function is ONLY used for read-ahead. If
- * this function ever deviates from doing just read-ahead, it should either
- * use ->readpage() or do the necessary surgery to decouple ->readpages()
- * from read-ahead.
*/
static int f2fs_mpage_readpages(struct inode *inode,
struct readahead_control *rac, struct page *page)
@@ -2364,7 +2252,6 @@ static int f2fs_mpage_readpages(struct inode *inode,
unsigned nr_pages = rac ? readahead_count(rac) : 1;
unsigned max_nr_pages = nr_pages;
int ret = 0;
- bool drop_ra = false;
map.m_pblk = 0;
map.m_lblk = 0;
@@ -2375,26 +2262,10 @@ static int f2fs_mpage_readpages(struct inode *inode,
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- /*
- * Two readahead threads for same address range can cause race condition
- * which fragments sequential read IOs. So let's avoid each other.
- */
- if (rac && readahead_count(rac)) {
- if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
- drop_ra = true;
- else
- WRITE_ONCE(F2FS_I(inode)->ra_offset,
- readahead_index(rac));
- }
-
for (; nr_pages; nr_pages--) {
if (rac) {
page = readahead_page(rac);
prefetchw(&page->flags);
- if (drop_ra) {
- f2fs_put_page(page, 1);
- continue;
- }
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -2457,9 +2328,6 @@ next_page:
}
if (bio)
__submit_bio(F2FS_I_SB(inode), bio, DATA);
-
- if (rac && readahead_count(rac) && !drop_ra)
- WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
return ret;
}
@@ -2743,7 +2611,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
sector_t *last_block,
struct writeback_control *wbc,
enum iostat_type io_type,
- int compr_blocks)
+ int compr_blocks,
+ bool allow_balance)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -2881,7 +2750,7 @@ out:
}
unlock_page(page);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
- !F2FS_I(inode)->cp_task)
+ !F2FS_I(inode)->cp_task && allow_balance)
f2fs_balance_fs(sbi, need_balance_fs);
if (unlikely(f2fs_cp_error(sbi))) {
@@ -2928,7 +2797,7 @@ out:
#endif
return f2fs_write_single_data_page(page, NULL, NULL, NULL,
- wbc, FS_DATA_IO, 0);
+ wbc, FS_DATA_IO, 0, true);
}
/*
@@ -3096,7 +2965,8 @@ continue_unlock:
}
#endif
ret = f2fs_write_single_data_page(page, &submitted,
- &bio, &last_block, wbc, io_type, 0);
+ &bio, &last_block, wbc, io_type,
+ 0, true);
if (ret == AOP_WRITEPAGE_ACTIVATE)
unlock_page(page);
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -3831,7 +3701,7 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
filemap_write_and_wait(mapping);
/* Block number less than F2FS MAX BLOCKS */
- if (unlikely(block >= F2FS_I_SB(inode)->max_file_blocks))
+ if (unlikely(block >= max_file_blocks(inode)))
goto out;
if (f2fs_compressed_file(inode)) {
@@ -4108,12 +3978,13 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (!f2fs_disable_compressed_file(inode))
return -EINVAL;
+ f2fs_precache_extents(inode);
+
ret = check_swap_activate(sis, file, span);
if (ret < 0)
return ret;
set_inode_flag(inode, FI_PIN_FILE);
- f2fs_precache_extents(inode);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return ret;
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 197c914119da..91855d5721cd 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -120,6 +120,13 @@ static void update_general_status(struct f2fs_sb_info *sbi)
atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
}
+ si->nr_issued_ckpt = atomic_read(&sbi->cprc_info.issued_ckpt);
+ si->nr_total_ckpt = atomic_read(&sbi->cprc_info.total_ckpt);
+ si->nr_queued_ckpt = atomic_read(&sbi->cprc_info.queued_ckpt);
+ spin_lock(&sbi->cprc_info.stat_lock);
+ si->cur_ckpt_time = sbi->cprc_info.cur_time;
+ si->peak_ckpt_time = sbi->cprc_info.peak_time;
+ spin_unlock(&sbi->cprc_info.stat_lock);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
@@ -417,6 +424,11 @@ static int stat_show(struct seq_file *s, void *v)
si->meta_count[META_NAT]);
seq_printf(s, " - ssa blocks : %u\n",
si->meta_count[META_SSA]);
+ seq_printf(s, "CP merge (Queued: %4d, Issued: %4d, Total: %4d, "
+ "Cur time: %4d(ms), Peak time: %4d(ms))\n",
+ si->nr_queued_ckpt, si->nr_issued_ckpt,
+ si->nr_total_ckpt, si->cur_ckpt_time,
+ si->peak_ckpt_time);
seq_printf(s, "GC calls: %d (BG: %d)\n",
si->call_count, si->bg_gc);
seq_printf(s, " - data segments : %d (%d)\n",
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index bb11759191dc..e2d302ae3a46 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -43,7 +43,6 @@ enum {
FAULT_KVMALLOC,
FAULT_PAGE_ALLOC,
FAULT_PAGE_GET,
- FAULT_ALLOC_BIO,
FAULT_ALLOC_NID,
FAULT_ORPHAN,
FAULT_BLOCK,
@@ -97,6 +96,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
#define F2FS_MOUNT_NORECOVERY 0x04000000
#define F2FS_MOUNT_ATGC 0x08000000
+#define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
@@ -146,6 +146,7 @@ struct f2fs_mount_info {
/* For compression */
unsigned char compress_algorithm; /* algorithm type */
unsigned char compress_log_size; /* cluster log size */
+ unsigned char compress_level; /* compress level */
bool compress_chksum; /* compressed data chksum */
unsigned char compress_ext_cnt; /* extension count */
int compress_mode; /* compression mode */
@@ -266,6 +267,26 @@ struct fsync_node_entry {
unsigned int seq_id; /* sequence id */
};
+struct ckpt_req {
+ struct completion wait; /* completion for checkpoint done */
+ struct llist_node llnode; /* llist_node to be linked in wait queue */
+ int ret; /* return code of checkpoint */
+ ktime_t queue_time; /* request queued time */
+};
+
+struct ckpt_req_control {
+ struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
+ int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
+ wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
+ atomic_t issued_ckpt; /* # of actually issued ckpts */
+ atomic_t total_ckpt; /* # of total ckpts */
+ atomic_t queued_ckpt; /* # of queued ckpts */
+ struct llist_head issue_list; /* list for command issue */
+ spinlock_t stat_lock; /* lock for below checkpoint time stats */
+ unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
+ unsigned int peak_time; /* peak wait time in msec until now */
+};
+
/* for the bitmap indicate blocks to be discarded */
struct discard_entry {
struct list_head list; /* list head */
@@ -717,7 +738,6 @@ struct f2fs_inode_info {
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
struct task_struct *inmem_task; /* store inmemory task */
struct mutex inmem_lock; /* lock for inmemory pages */
- pgoff_t ra_offset; /* ongoing readahead offset */
struct extent_tree *extent_tree; /* cached extent_tree entry */
/* avoid racing between foreground op and gc */
@@ -735,6 +755,7 @@ struct f2fs_inode_info {
atomic_t i_compr_blocks; /* # of compressed blocks */
unsigned char i_compress_algorithm; /* algorithm type */
unsigned char i_log_cluster_size; /* log of cluster size */
+ unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
unsigned short i_compress_flag; /* compress flag */
unsigned int i_cluster_size; /* cluster size */
};
@@ -1310,6 +1331,8 @@ struct compress_data {
#define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
+#define COMPRESS_LEVEL_OFFSET 8
+
/* compress context */
struct compress_ctx {
struct inode *inode; /* inode the context belong to */
@@ -1337,7 +1360,7 @@ struct compress_io_ctx {
atomic_t pending_pages; /* in-flight compressed page count */
};
-/* decompress io context for read IO path */
+/* Context for decompressing one cluster on the read IO path */
struct decompress_io_ctx {
u32 magic; /* magic number to indicate page is compressed */
struct inode *inode; /* inode the context belong to */
@@ -1353,11 +1376,37 @@ struct decompress_io_ctx {
struct compress_data *cbuf; /* virtual mapped address on cpages */
size_t rlen; /* valid data length in rbuf */
size_t clen; /* valid data length in cbuf */
- atomic_t pending_pages; /* in-flight compressed page count */
- atomic_t verity_pages; /* in-flight page count for verity */
- bool failed; /* indicate IO error during decompression */
+
+ /*
+ * The number of compressed pages remaining to be read in this cluster.
+ * This is initially nr_cpages. It is decremented by 1 each time a page
+ * has been read (or failed to be read). When it reaches 0, the cluster
+ * is decompressed (or an error is reported).
+ *
+ * If an error occurs before all the pages have been submitted for I/O,
+ * then this will never reach 0. In this case the I/O submitter is
+ * responsible for calling f2fs_decompress_end_io() instead.
+ */
+ atomic_t remaining_pages;
+
+ /*
+ * Number of references to this decompress_io_ctx.
+ *
+ * One reference is held for I/O completion. This reference is dropped
+ * after the pagecache pages are updated and unlocked -- either after
+ * decompression (and verity if enabled), or after an error.
+ *
+ * In addition, each compressed page holds a reference while it is in a
+ * bio. These references are necessary prevent compressed pages from
+ * being freed while they are still in a bio.
+ */
+ refcount_t refcnt;
+
+ bool failed; /* IO error occurred before decompression? */
+ bool need_verity; /* need fs-verity verification after decompression? */
void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */
+ struct work_struct verity_work; /* work to verify the decompressed pages */
};
#define NULL_CLUSTER ((unsigned int)(~0))
@@ -1404,6 +1453,7 @@ struct f2fs_sb_info {
wait_queue_head_t cp_wait;
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
long interval_time[MAX_TIME]; /* to store thresholds */
+ struct ckpt_req_control cprc_info; /* for checkpoint request control */
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
@@ -1444,7 +1494,6 @@ struct f2fs_sb_info {
unsigned int total_sections; /* total section count */
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
- loff_t max_file_blocks; /* max block index of file */
int dir_level; /* directory level */
int readdir_ra; /* readahead inode in readdir */
u64 max_io_bytes; /* max io bytes to merge IOs */
@@ -1541,9 +1590,12 @@ struct f2fs_sb_info {
unsigned int node_io_flag;
/* For sysfs suppport */
- struct kobject s_kobj;
+ struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
struct completion s_kobj_unregister;
+ struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
+ struct completion s_stat_kobj_unregister;
+
/* For shrinker support */
struct list_head s_list;
int s_ndevs; /* number of devices */
@@ -3135,9 +3187,10 @@ void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate(struct inode *inode);
-int f2fs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags);
-int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
+int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags);
+int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
int f2fs_precache_extents(struct inode *inode);
@@ -3232,6 +3285,7 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
int f2fs_quota_sync(struct super_block *sb, int type);
+loff_t max_file_blocks(struct inode *inode);
void f2fs_quota_off_umount(struct super_block *sb);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
@@ -3418,13 +3472,16 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
int __init f2fs_create_checkpoint_caches(void);
void f2fs_destroy_checkpoint_caches(void);
+int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
+int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
+void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
+void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
/*
* data.c
*/
int __init f2fs_init_bioset(void);
void f2fs_destroy_bioset(void);
-struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio);
int f2fs_init_bio_entry_cache(void);
void f2fs_destroy_bio_entry_cache(void);
void f2fs_submit_bio(struct f2fs_sb_info *sbi,
@@ -3469,7 +3526,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
struct bio **bio, sector_t *last_block,
struct writeback_control *wbc,
enum iostat_type io_type,
- int compr_blocks);
+ int compr_blocks, bool allow_balance);
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length);
int f2fs_release_page(struct page *page, gfp_t wait);
@@ -3530,6 +3587,8 @@ struct f2fs_stat_info {
int nr_discarding, nr_discarded;
int nr_discard_cmd;
unsigned int undiscard_blks;
+ int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
+ unsigned int cur_ckpt_time, peak_ckpt_time;
int inline_xattr, inline_inode, inline_dir, append, update, orphans;
int compr_inode;
unsigned long long compr_blocks;
@@ -3715,8 +3774,6 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#define stat_dec_compr_inode(inode) do { } while (0)
#define stat_add_compr_blocks(inode, blocks) do { } while (0)
#define stat_sub_compr_blocks(inode, blocks) do { } while (0)
-#define stat_inc_atomic_write(inode) do { } while (0)
-#define stat_dec_atomic_write(inode) do { } while (0)
#define stat_update_max_atomic_write(inode) do { } while (0)
#define stat_inc_volatile_write(inode) do { } while (0)
#define stat_dec_volatile_write(inode) do { } while (0)
@@ -3876,7 +3933,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode);
int f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void);
-void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
+void f2fs_end_read_compressed_page(struct page *page, bool failed);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
@@ -3889,9 +3946,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio,
bool is_readahead, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
-void f2fs_free_dic(struct decompress_io_ctx *dic);
-void f2fs_decompress_end_io(struct page **rpages,
- unsigned int cluster_size, bool err, bool verity);
+void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
+void f2fs_put_page_dic(struct page *page);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
@@ -3915,6 +3971,14 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
}
static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
+static inline void f2fs_end_read_compressed_page(struct page *page, bool failed)
+{
+ WARN_ON_ONCE(1);
+}
+static inline void f2fs_put_page_dic(struct page *page)
+{
+ WARN_ON_ONCE(1);
+}
static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
static inline int __init f2fs_init_compress_cache(void) { return 0; }
@@ -3934,6 +3998,11 @@ static inline void set_compress_context(struct inode *inode)
1 << COMPRESS_CHKSUM : 0;
F2FS_I(inode)->i_cluster_size =
1 << F2FS_I(inode)->i_log_cluster_size;
+ if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
+ F2FS_OPTION(sbi).compress_level)
+ F2FS_I(inode)->i_compress_flag |=
+ F2FS_OPTION(sbi).compress_level <<
+ COMPRESS_LEVEL_OFFSET;
F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
set_inode_flag(inode, FI_COMPRESSED_FILE);
stat_inc_compr_inode(inode);
@@ -4114,6 +4183,12 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return false;
}
+static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
+{
+ return fsverity_active(inode) &&
+ idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
+}
+
#ifdef CONFIG_F2FS_FAULT_INJECTION
extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
unsigned int type);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f585545277d7..d26ff2ae3f5e 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -29,7 +29,6 @@
#include "xattr.h"
#include "acl.h"
#include "gc.h"
-#include "trace.h"
#include <trace/events/f2fs.h>
#include <uapi/linux/f2fs.h>
@@ -60,6 +59,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
bool need_alloc = true;
int err = 0;
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return VM_FAULT_SIGBUS;
+
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
goto err;
@@ -70,6 +72,10 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
goto err;
}
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ goto err;
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
int ret = f2fs_is_compressed_cluster(inode, page->index);
@@ -366,7 +372,6 @@ flush_out:
f2fs_update_time(sbi, REQ_TIME);
out:
trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
- f2fs_trace_ios(NULL, 1);
return ret;
}
@@ -483,6 +488,9 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
struct inode *inode = file->f_mapping->host;
loff_t maxbytes = inode->i_sb->s_maxbytes;
+ if (f2fs_compressed_file(inode))
+ maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
+
switch (whence) {
case SEEK_SET:
case SEEK_CUR:
@@ -502,7 +510,6 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
- int err;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
@@ -510,11 +517,6 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
- /* we don't need to use inline_data strictly */
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
-
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
set_inode_flag(inode, FI_MMAP_FILE);
@@ -667,7 +669,7 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
- if (free_from >= sbi->max_file_blocks)
+ if (free_from >= max_file_blocks(inode))
goto free_partial;
if (lock)
@@ -767,6 +769,10 @@ int f2fs_truncate(struct inode *inode)
return -EIO;
}
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
@@ -783,8 +789,8 @@ int f2fs_truncate(struct inode *inode)
return 0;
}
-int f2fs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -820,7 +826,7 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
STATX_ATTR_NODUMP |
STATX_ATTR_VERITY);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
/* we need to show initial sectors used for inline_data/dentries */
if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
@@ -831,7 +837,8 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
}
#ifdef CONFIG_F2FS_FS_POSIX_ACL
-static void __setattr_copy(struct inode *inode, const struct iattr *attr)
+static void __setattr_copy(struct user_namespace *mnt_userns,
+ struct inode *inode, const struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
@@ -847,8 +854,9 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
inode->i_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
+ kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
mode &= ~S_ISGID;
set_acl_inode(inode, mode);
}
@@ -857,7 +865,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
#define __setattr_copy setattr_copy
#endif
-int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int err;
@@ -865,11 +874,19 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ if (unlikely(IS_APPEND(inode) &&
+ (attr->ia_valid & (ATTR_MODE | ATTR_UID |
+ ATTR_GID | ATTR_TIMES_SET))))
+ return -EPERM;
+
if ((attr->ia_valid & ATTR_SIZE) &&
!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
- err = setattr_prepare(dentry, attr);
+ err = setattr_prepare(&init_user_ns, dentry, attr);
if (err)
return err;
@@ -945,12 +962,14 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
spin_unlock(&F2FS_I(inode)->i_size_lock);
}
- __setattr_copy(inode, attr);
+ __setattr_copy(&init_user_ns, inode, attr);
if (attr->ia_valid & ATTR_MODE) {
- err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
- if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
- inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ err = posix_acl_chmod(&init_user_ns, inode, f2fs_get_inode_mode(inode));
+
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+ if (!err)
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
clear_inode_flag(inode, FI_ACL_MODE);
}
}
@@ -1961,7 +1980,7 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
u32 iflags;
int ret;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
if (get_user(fsflags, (int __user *)arg))
@@ -2008,7 +2027,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
if (!S_ISREG(inode->i_mode))
@@ -2075,7 +2094,7 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
struct inode *inode = file_inode(filp);
int ret;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
ret = mnt_want_write_file(filp);
@@ -2117,7 +2136,7 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
struct inode *inode = file_inode(filp);
int ret;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
if (!S_ISREG(inode->i_mode))
@@ -2152,7 +2171,7 @@ static int f2fs_ioc_release_volatile_write(struct file *filp)
struct inode *inode = file_inode(filp);
int ret;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
ret = mnt_want_write_file(filp);
@@ -2181,7 +2200,7 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
struct inode *inode = file_inode(filp);
int ret;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
ret = mnt_want_write_file(filp);
@@ -2730,7 +2749,7 @@ static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
return -EINVAL;
if (unlikely((range.start + range.len) >> PAGE_SHIFT >
- sbi->max_file_blocks))
+ max_file_blocks(inode)))
return -EINVAL;
err = mnt_want_write_file(filp);
@@ -3158,7 +3177,7 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
return -EFAULT;
/* Make sure caller has proper permission */
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
@@ -3293,7 +3312,7 @@ int f2fs_precache_extents(struct inode *inode)
map.m_next_extent = &m_next_extent;
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- end = F2FS_I_SB(inode)->max_file_blocks;
+ end = max_file_blocks(inode);
while (map.m_lblk < end) {
map.m_len = end - map.m_lblk;
@@ -3357,6 +3376,14 @@ static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
return fsverity_ioctl_measure(filp, (void __user *)arg);
}
+static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
+{
+ if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
+ return -EOPNOTSUPP;
+
+ return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
+}
+
static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -4043,8 +4070,10 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
for (i = 0; i < page_len; i++, redirty_idx++) {
page = find_lock_page(mapping, redirty_idx);
- if (!page)
- ret = -ENOENT;
+ if (!page) {
+ ret = -ENOMEM;
+ break;
+ }
set_page_dirty(page);
f2fs_put_page(page, 1);
f2fs_put_page(page, 0);
@@ -4272,6 +4301,8 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_enable_verity(filp, arg);
case FS_IOC_MEASURE_VERITY:
return f2fs_ioc_measure_verity(filp, arg);
+ case FS_IOC_READ_VERITY_METADATA:
+ return f2fs_ioc_read_verity_metadata(filp, arg);
case FS_IOC_GETFSLABEL:
return f2fs_ioc_getfslabel(filp, arg);
case FS_IOC_SETFSLABEL:
@@ -4349,6 +4380,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode);
}
+ if (unlikely(IS_IMMUTABLE(inode))) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
ret = generic_write_checks(iocb, from);
if (ret > 0) {
bool preallocated = false;
@@ -4413,6 +4449,7 @@ write:
if (ret > 0)
f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
}
+unlock:
inode_unlock(inode);
out:
trace_f2fs_file_write_iter(inode, iocb->ki_pos,
@@ -4523,6 +4560,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_RESIZE_FS:
case FS_IOC_ENABLE_VERITY:
case FS_IOC_MEASURE_VERITY:
+ case FS_IOC_READ_VERITY_METADATA:
case FS_IOC_GETFSLABEL:
case FS_IOC_SETFSLABEL:
case F2FS_IOC_GET_COMPRESS_BLOCKS:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 3ef84e6ded41..39330ad3c44e 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1169,8 +1169,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
if (err)
goto put_out;
- set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
-
/* read page */
fio.page = page;
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
@@ -1207,6 +1205,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
}
}
+ set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
+
+ /* allocate block address */
f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
&sum, type, NULL);
@@ -1233,9 +1234,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
set_page_writeback(fio.encrypted_page);
ClearPageError(page);
- /* allocate block address */
- f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
-
fio.op = REQ_OP_WRITE;
fio.op_flags = REQ_SYNC;
fio.new_blkaddr = newaddr;
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 806ebabf5870..993caefcd2bb 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -192,6 +192,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
return 0;
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
+
page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
if (!page)
return -ENOMEM;
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 6edb1ab579a1..17bd072a5d39 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -46,7 +46,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
nid_free = true;
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_ino = ino;
inode->i_blocks = 0;
@@ -314,8 +314,8 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
}
}
-static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
@@ -637,8 +637,8 @@ static const char *f2fs_get_link(struct dentry *dentry,
return link;
}
-static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int f2fs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
@@ -717,7 +717,8 @@ out_free_encrypted_link:
return err;
}
-static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
@@ -770,8 +771,8 @@ static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
return -ENOTEMPTY;
}
-static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int f2fs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
@@ -855,7 +856,11 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
if (whiteout) {
f2fs_i_links_write(inode, false);
+
+ spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
+ spin_unlock(&inode->i_lock);
+
*whiteout = inode;
} else {
d_tmpfile(dentry, inode);
@@ -874,7 +879,8 @@ out:
return err;
}
-static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int f2fs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
@@ -1041,7 +1047,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
err = f2fs_add_link(old_dentry, whiteout);
if (err)
goto put_out_dir;
+
+ spin_lock(&whiteout->i_lock);
whiteout->i_state &= ~I_LINKABLE;
+ spin_unlock(&whiteout->i_lock);
+
iput(whiteout);
}
@@ -1247,7 +1257,8 @@ out:
return err;
}
-static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry,
+static int f2fs_rename2(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3a24423ac65f..4b0e2e3c2c88 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -17,7 +17,6 @@
#include "node.h"
#include "segment.h"
#include "xattr.h"
-#include "trace.h"
#include <trace/events/f2fs.h>
#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
@@ -2089,7 +2088,6 @@ static int f2fs_set_node_page_dirty(struct page *page)
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
f2fs_set_page_private(page, 0);
- f2fs_trace_pid(page);
return 1;
}
return 0;
@@ -2696,7 +2694,7 @@ retry:
src = F2FS_INODE(page);
dst = F2FS_INODE(ipage);
- memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
+ memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
dst->i_size = 0;
dst->i_blocks = cpu_to_le64(1);
dst->i_links = cpu_to_le32(1);
@@ -2749,7 +2747,7 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
sum_entry = &sum->entries[0];
for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
- nrpages = min(last_offset - i, BIO_MAX_PAGES);
+ nrpages = bio_max_segs(last_offset - i);
/* readahead node pages */
f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index deca74cb17df..c2866561263e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -20,7 +20,6 @@
#include "segment.h"
#include "node.h"
#include "gc.h"
-#include "trace.h"
#include <trace/events/f2fs.h>
#define __reverse_ffz(x) __reverse_ffs(~(x))
@@ -187,8 +186,6 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
{
struct inmem_pages *new;
- f2fs_trace_pid(page);
-
f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
@@ -566,17 +563,7 @@ do_sync:
static int __submit_flush_wait(struct f2fs_sb_info *sbi,
struct block_device *bdev)
{
- struct bio *bio;
- int ret;
-
- bio = f2fs_bio_alloc(sbi, 0, false);
- if (!bio)
- return -ENOMEM;
-
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
- bio_set_dev(bio, bdev);
- ret = submit_bio_wait(bio);
- bio_put(bio);
+ int ret = blkdev_issue_flush(bdev);
trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
test_opt(sbi, FLUSH_MERGE), ret);
@@ -610,8 +597,6 @@ repeat:
if (kthread_should_stop())
return 0;
- sb_start_intwrite(sbi->sb);
-
if (!llist_empty(&fcc->issue_list)) {
struct flush_cmd *cmd, *next;
int ret;
@@ -632,8 +617,6 @@ repeat:
fcc->dispatch_list = NULL;
}
- sb_end_intwrite(sbi->sb);
-
wait_event_interruptible(*q,
kthread_should_stop() || !llist_empty(&fcc->issue_list));
goto repeat;
@@ -4398,7 +4381,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
block_t total_node_blocks = 0;
do {
- readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+ readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
META_SIT, true);
start = start_blk * sit_i->sents_per_block;
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index e81eb0748e2a..e9a7a637d688 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -101,11 +101,11 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
#define BLKS_PER_SEC(sbi) \
((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
#define GET_SEC_FROM_SEG(sbi, segno) \
- ((segno) / (sbi)->segs_per_sec)
+ (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
#define GET_SEG_FROM_SEC(sbi, secno) \
((secno) * (sbi)->segs_per_sec)
#define GET_ZONE_FROM_SEC(sbi, secno) \
- ((secno) / (sbi)->secs_per_zone)
+ (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
#define GET_ZONE_FROM_SEG(sbi, segno) \
GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
@@ -851,7 +851,7 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
else if (type == NODE)
return 8 * sbi->blocks_per_seg;
else if (type == META)
- return 8 * BIO_MAX_PAGES;
+ return 8 * BIO_MAX_VECS;
else
return 0;
}
@@ -868,7 +868,7 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
return 0;
nr_to_write = wbc->nr_to_write;
- desired = BIO_MAX_PAGES;
+ desired = BIO_MAX_VECS;
if (type == NODE)
desired <<= 1;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index b4a07fe62d1a..82592b19b4e0 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -25,13 +25,14 @@
#include <linux/quota.h>
#include <linux/unicode.h>
#include <linux/part_stat.h>
+#include <linux/zstd.h>
+#include <linux/lz4.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "gc.h"
-#include "trace.h"
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>
@@ -45,7 +46,6 @@ const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_PAGE_GET] = "page get",
- [FAULT_ALLOC_BIO] = "alloc bio",
[FAULT_ALLOC_NID] = "alloc nid",
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
@@ -143,6 +143,8 @@ enum {
Opt_checkpoint_disable_cap,
Opt_checkpoint_disable_cap_perc,
Opt_checkpoint_enable,
+ Opt_checkpoint_merge,
+ Opt_nocheckpoint_merge,
Opt_compress_algorithm,
Opt_compress_log_size,
Opt_compress_extension,
@@ -213,6 +215,8 @@ static match_table_t f2fs_tokens = {
{Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
{Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
{Opt_checkpoint_enable, "checkpoint=enable"},
+ {Opt_checkpoint_merge, "checkpoint_merge"},
+ {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
{Opt_compress_algorithm, "compress_algorithm=%s"},
{Opt_compress_log_size, "compress_log_size=%u"},
{Opt_compress_extension, "compress_extension=%s"},
@@ -464,6 +468,74 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
return 0;
}
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+#ifdef CONFIG_F2FS_FS_LZ4
+static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
+{
+#ifdef CONFIG_F2FS_FS_LZ4HC
+ unsigned int level;
+#endif
+
+ if (strlen(str) == 3) {
+ F2FS_OPTION(sbi).compress_level = 0;
+ return 0;
+ }
+
+#ifdef CONFIG_F2FS_FS_LZ4HC
+ str += 3;
+
+ if (str[0] != ':') {
+ f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+ return -EINVAL;
+ }
+ if (kstrtouint(str + 1, 10, &level))
+ return -EINVAL;
+
+ if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
+ f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
+ return -EINVAL;
+ }
+
+ F2FS_OPTION(sbi).compress_level = level;
+ return 0;
+#else
+ f2fs_info(sbi, "kernel doesn't support lz4hc compression");
+ return -EINVAL;
+#endif
+}
+#endif
+
+#ifdef CONFIG_F2FS_FS_ZSTD
+static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
+{
+ unsigned int level;
+ int len = 4;
+
+ if (strlen(str) == len) {
+ F2FS_OPTION(sbi).compress_level = 0;
+ return 0;
+ }
+
+ str += len;
+
+ if (str[0] != ':') {
+ f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
+ return -EINVAL;
+ }
+ if (kstrtouint(str + 1, 10, &level))
+ return -EINVAL;
+
+ if (!level || level > ZSTD_maxCLevel()) {
+ f2fs_info(sbi, "invalid zstd compress level: %d", level);
+ return -EINVAL;
+ }
+
+ F2FS_OPTION(sbi).compress_level = level;
+ return 0;
+}
+#endif
+#endif
+
static int parse_options(struct super_block *sb, char *options, bool is_remount)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -681,9 +753,9 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
case Opt_io_size_bits:
if (args->from && match_int(args, &arg))
return -EINVAL;
- if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
+ if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
f2fs_warn(sbi, "Not support %d, larger than %d",
- 1 << arg, BIO_MAX_PAGES);
+ 1 << arg, BIO_MAX_VECS);
return -EINVAL;
}
F2FS_OPTION(sbi).write_io_size_bits = arg;
@@ -872,6 +944,12 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
case Opt_checkpoint_enable:
clear_opt(sbi, DISABLE_CHECKPOINT);
break;
+ case Opt_checkpoint_merge:
+ set_opt(sbi, MERGE_CHECKPOINT);
+ break;
+ case Opt_nocheckpoint_merge:
+ clear_opt(sbi, MERGE_CHECKPOINT);
+ break;
#ifdef CONFIG_F2FS_FS_COMPRESSION
case Opt_compress_algorithm:
if (!f2fs_sb_has_compression(sbi)) {
@@ -882,17 +960,45 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
if (!strcmp(name, "lzo")) {
+#ifdef CONFIG_F2FS_FS_LZO
+ F2FS_OPTION(sbi).compress_level = 0;
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_LZO;
- } else if (!strcmp(name, "lz4")) {
+#else
+ f2fs_info(sbi, "kernel doesn't support lzo compression");
+#endif
+ } else if (!strncmp(name, "lz4", 3)) {
+#ifdef CONFIG_F2FS_FS_LZ4
+ ret = f2fs_set_lz4hc_level(sbi, name);
+ if (ret) {
+ kfree(name);
+ return -EINVAL;
+ }
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_LZ4;
- } else if (!strcmp(name, "zstd")) {
+#else
+ f2fs_info(sbi, "kernel doesn't support lz4 compression");
+#endif
+ } else if (!strncmp(name, "zstd", 4)) {
+#ifdef CONFIG_F2FS_FS_ZSTD
+ ret = f2fs_set_zstd_level(sbi, name);
+ if (ret) {
+ kfree(name);
+ return -EINVAL;
+ }
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_ZSTD;
+#else
+ f2fs_info(sbi, "kernel doesn't support zstd compression");
+#endif
} else if (!strcmp(name, "lzo-rle")) {
+#ifdef CONFIG_F2FS_FS_LZORLE
+ F2FS_OPTION(sbi).compress_level = 0;
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_LZORLE;
+#else
+ f2fs_info(sbi, "kernel doesn't support lzorle compression");
+#endif
} else {
kfree(name);
return -EINVAL;
@@ -1076,8 +1182,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
- fi->ra_offset = -1;
-
return &fi->vfs_inode;
}
@@ -1196,9 +1300,6 @@ static void f2fs_dirty_inode(struct inode *inode, int flags)
inode->i_ino == F2FS_META_INO(sbi))
return;
- if (flags == I_DIRTY_TIME)
- return;
-
if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
clear_inode_flag(inode, FI_AUTO_RECOVER);
@@ -1246,6 +1347,12 @@ static void f2fs_put_super(struct super_block *sb)
mutex_lock(&sbi->umount_mutex);
/*
+ * flush all issued checkpoints and stop checkpoint issue thread.
+ * after then, all checkpoints should be done by each process context.
+ */
+ f2fs_stop_ckpt_thread(sbi);
+
+ /*
* We don't need to do checkpoint when superblock is clean.
* But, the previous checkpoint was not done by umount, it needs to do
* clean checkpoint again.
@@ -1343,16 +1450,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return -EAGAIN;
- if (sync) {
- struct cp_control cpc;
-
- cpc.reason = __get_cp_reason(sbi);
-
- down_write(&sbi->gc_lock);
- err = f2fs_write_checkpoint(sbi, &cpc);
- up_write(&sbi->gc_lock);
- }
- f2fs_trace_ios(NULL, 1);
+ if (sync)
+ err = f2fs_issue_checkpoint(sbi);
return err;
}
@@ -1369,6 +1468,10 @@ static int f2fs_freeze(struct super_block *sb)
/* must be clean, since sync_filesystem() was already called */
if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
return -EINVAL;
+
+ /* ensure no checkpoint required */
+ if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
+ return -EINVAL;
return 0;
}
@@ -1539,6 +1642,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
}
seq_printf(seq, ",compress_algorithm=%s", algtype);
+ if (F2FS_OPTION(sbi).compress_level)
+ seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
+
seq_printf(seq, ",compress_log_size=%u",
F2FS_OPTION(sbi).compress_log_size);
@@ -1674,6 +1780,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
if (test_opt(sbi, DISABLE_CHECKPOINT))
seq_printf(seq, ",checkpoint=disable:%u",
F2FS_OPTION(sbi).unusable_cap);
+ if (test_opt(sbi, MERGE_CHECKPOINT))
+ seq_puts(seq, ",checkpoint_merge");
+ else
+ seq_puts(seq, ",nocheckpoint_merge");
if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
seq_printf(seq, ",fsync_mode=%s", "posix");
else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
@@ -1796,6 +1906,9 @@ restore_flag:
static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
{
+ /* we should flush all the data to keep data consistency */
+ sync_inodes_sb(sbi->sb);
+
down_write(&sbi->gc_lock);
f2fs_dirty_to_prefree(sbi);
@@ -1954,6 +2067,19 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
}
}
+ if (!test_opt(sbi, DISABLE_CHECKPOINT) &&
+ test_opt(sbi, MERGE_CHECKPOINT)) {
+ err = f2fs_start_ckpt_thread(sbi);
+ if (err) {
+ f2fs_err(sbi,
+ "Failed to start F2FS issue_checkpoint_thread (%d)",
+ err);
+ goto restore_gc;
+ }
+ } else {
+ f2fs_stop_ckpt_thread(sbi);
+ }
+
/*
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
@@ -2638,10 +2764,10 @@ static const struct export_operations f2fs_export_ops = {
.get_parent = f2fs_get_parent,
};
-static loff_t max_file_blocks(void)
+loff_t max_file_blocks(struct inode *inode)
{
loff_t result = 0;
- loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
+ loff_t leaf_count;
/*
* note: previously, result is equal to (DEF_ADDRS_PER_INODE -
@@ -2650,6 +2776,11 @@ static loff_t max_file_blocks(void)
* result as zero.
*/
+ if (inode && f2fs_compressed_file(inode))
+ leaf_count = ADDRS_PER_BLOCK(inode);
+ else
+ leaf_count = DEF_ADDRS_PER_BLOCK;
+
/* two direct node blocks */
result += (leaf_count * 2);
@@ -3533,8 +3664,7 @@ try_onemore:
if (err)
goto free_options;
- sbi->max_file_blocks = max_file_blocks();
- sb->s_maxbytes = sbi->max_file_blocks <<
+ sb->s_maxbytes = max_file_blocks(NULL) <<
le32_to_cpu(raw_super->log_blocksize);
sb->s_max_links = F2FS_LINK_MAX;
@@ -3701,6 +3831,19 @@ try_onemore:
f2fs_init_fsync_node_info(sbi);
+ /* setup checkpoint request control and start checkpoint issue thread */
+ f2fs_init_ckpt_req_control(sbi);
+ if (!test_opt(sbi, DISABLE_CHECKPOINT) &&
+ test_opt(sbi, MERGE_CHECKPOINT)) {
+ err = f2fs_start_ckpt_thread(sbi);
+ if (err) {
+ f2fs_err(sbi,
+ "Failed to start F2FS issue_checkpoint_thread (%d)",
+ err);
+ goto stop_ckpt_thread;
+ }
+ }
+
/* setup f2fs internal modules */
err = f2fs_build_segment_manager(sbi);
if (err) {
@@ -3786,12 +3929,10 @@ try_onemore:
* previous checkpoint was not done by clean system shutdown.
*/
if (f2fs_hw_is_readonly(sbi)) {
- if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
- err = -EROFS;
+ if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))
f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
- goto free_meta;
- }
- f2fs_info(sbi, "write access unavailable, skipping recovery");
+ else
+ f2fs_info(sbi, "write access unavailable, skipping recovery");
goto reset_checkpoint;
}
@@ -3910,6 +4051,8 @@ free_nm:
free_sm:
f2fs_destroy_segment_manager(sbi);
f2fs_destroy_post_read_wq(sbi);
+stop_ckpt_thread:
+ f2fs_stop_ckpt_thread(sbi);
free_devices:
destroy_device_list(sbi);
kvfree(sbi->ckpt);
@@ -4024,8 +4167,6 @@ static int __init init_f2fs_fs(void)
return -EINVAL;
}
- f2fs_build_trace_ios();
-
err = init_inodecache();
if (err)
goto fail;
@@ -4118,7 +4259,6 @@ static void __exit exit_f2fs_fs(void)
f2fs_destroy_segment_manager_caches();
f2fs_destroy_node_manager_caches();
destroy_inodecache();
- f2fs_destroy_trace_ios();
}
module_init(init_f2fs_fs)
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 30bae57428d1..e38a7f6921dd 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -11,6 +11,7 @@
#include <linux/f2fs_fs.h>
#include <linux/seq_file.h>
#include <linux/unicode.h>
+#include <linux/ioprio.h>
#include "f2fs.h"
#include "segment.h"
@@ -34,6 +35,7 @@ enum {
FAULT_INFO_TYPE, /* struct f2fs_fault_info */
#endif
RESERVED_BLOCKS, /* struct f2fs_sb_info */
+ CPRC_INFO, /* struct ckpt_req_control */
};
struct f2fs_attr {
@@ -70,6 +72,8 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
else if (struct_type == STAT_INFO)
return (unsigned char *)F2FS_STAT(sbi);
#endif
+ else if (struct_type == CPRC_INFO)
+ return (unsigned char *)&sbi->cprc_info;
return NULL;
}
@@ -96,6 +100,12 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
sbi->sectors_written_start) >> 1)));
}
+static ssize_t sb_status_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ return sprintf(buf, "%lx\n", sbi->s_flag);
+}
+
static ssize_t features_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -255,6 +265,23 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
return len;
}
+ if (!strcmp(a->attr.name, "ckpt_thread_ioprio")) {
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+ int len = 0;
+ int class = IOPRIO_PRIO_CLASS(cprc->ckpt_thread_ioprio);
+ int data = IOPRIO_PRIO_DATA(cprc->ckpt_thread_ioprio);
+
+ if (class == IOPRIO_CLASS_RT)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "rt,");
+ else if (class == IOPRIO_CLASS_BE)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "be,");
+ else
+ return -EINVAL;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d\n", data);
+ return len;
+ }
+
ui = (unsigned int *)(ptr + a->offset);
return sprintf(buf, "%u\n", *ui);
@@ -308,6 +335,38 @@ out:
return ret ? ret : count;
}
+ if (!strcmp(a->attr.name, "ckpt_thread_ioprio")) {
+ const char *name = strim((char *)buf);
+ struct ckpt_req_control *cprc = &sbi->cprc_info;
+ int class;
+ long data;
+ int ret;
+
+ if (!strncmp(name, "rt,", 3))
+ class = IOPRIO_CLASS_RT;
+ else if (!strncmp(name, "be,", 3))
+ class = IOPRIO_CLASS_BE;
+ else
+ return -EINVAL;
+
+ name += 3;
+ ret = kstrtol(name, 10, &data);
+ if (ret)
+ return ret;
+ if (data >= IOPRIO_BE_NR || data < 0)
+ return -EINVAL;
+
+ cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, data);
+ if (test_opt(sbi, MERGE_CHECKPOINT)) {
+ ret = set_task_ioprio(cprc->f2fs_issue_ckpt,
+ cprc->ckpt_thread_ioprio);
+ if (ret)
+ return ret;
+ }
+
+ return count;
+ }
+
ui = (unsigned int *)(ptr + a->offset);
ret = kstrtoul(skip_spaces(buf), 0, &t);
@@ -567,6 +626,7 @@ F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
#endif
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag);
+F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, ckpt_thread_ioprio, ckpt_thread_ioprio);
F2FS_GENERAL_RO_ATTR(dirty_segments);
F2FS_GENERAL_RO_ATTR(free_segments);
F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
@@ -652,6 +712,7 @@ static struct attribute *f2fs_attrs[] = {
#endif
ATTR_LIST(data_io_flag),
ATTR_LIST(node_io_flag),
+ ATTR_LIST(ckpt_thread_ioprio),
ATTR_LIST(dirty_segments),
ATTR_LIST(free_segments),
ATTR_LIST(unusable),
@@ -702,6 +763,13 @@ static struct attribute *f2fs_feat_attrs[] = {
};
ATTRIBUTE_GROUPS(f2fs_feat);
+F2FS_GENERAL_RO_ATTR(sb_status);
+static struct attribute *f2fs_stat_attrs[] = {
+ ATTR_LIST(sb_status),
+ NULL,
+};
+ATTRIBUTE_GROUPS(f2fs_stat);
+
static const struct sysfs_ops f2fs_attr_ops = {
.show = f2fs_attr_show,
.store = f2fs_attr_store,
@@ -730,6 +798,44 @@ static struct kobject f2fs_feat = {
.kset = &f2fs_kset,
};
+static ssize_t f2fs_stat_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_stat_kobj);
+ struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+ return a->show ? a->show(a, sbi, buf) : 0;
+}
+
+static ssize_t f2fs_stat_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_stat_kobj);
+ struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+ return a->store ? a->store(a, sbi, buf, len) : 0;
+}
+
+static void f2fs_stat_kobj_release(struct kobject *kobj)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_stat_kobj);
+ complete(&sbi->s_stat_kobj_unregister);
+}
+
+static const struct sysfs_ops f2fs_stat_attr_ops = {
+ .show = f2fs_stat_attr_show,
+ .store = f2fs_stat_attr_store,
+};
+
+static struct kobj_type f2fs_stat_ktype = {
+ .default_groups = f2fs_stat_groups,
+ .sysfs_ops = &f2fs_stat_attr_ops,
+ .release = f2fs_stat_kobj_release,
+};
+
static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
void *offset)
{
@@ -936,11 +1042,15 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
init_completion(&sbi->s_kobj_unregister);
err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL,
"%s", sb->s_id);
- if (err) {
- kobject_put(&sbi->s_kobj);
- wait_for_completion(&sbi->s_kobj_unregister);
- return err;
- }
+ if (err)
+ goto put_sb_kobj;
+
+ sbi->s_stat_kobj.kset = &f2fs_kset;
+ init_completion(&sbi->s_stat_kobj_unregister);
+ err = kobject_init_and_add(&sbi->s_stat_kobj, &f2fs_stat_ktype,
+ &sbi->s_kobj, "stat");
+ if (err)
+ goto put_stat_kobj;
if (f2fs_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
@@ -956,6 +1066,13 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
victim_bits_seq_show, sb);
}
return 0;
+put_stat_kobj:
+ kobject_put(&sbi->s_stat_kobj);
+ wait_for_completion(&sbi->s_stat_kobj_unregister);
+put_sb_kobj:
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
+ return err;
}
void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
@@ -967,6 +1084,11 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
remove_proc_entry("victim_bits", sbi->s_proc);
remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
}
+
+ kobject_del(&sbi->s_stat_kobj);
+ kobject_put(&sbi->s_stat_kobj);
+ wait_for_completion(&sbi->s_stat_kobj_unregister);
+
kobject_del(&sbi->s_kobj);
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
deleted file mode 100644
index d0ab533a9ce8..000000000000
--- a/fs/f2fs/trace.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * f2fs IO tracer
- *
- * Copyright (c) 2014 Motorola Mobility
- * Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
- */
-#include <linux/fs.h>
-#include <linux/f2fs_fs.h>
-#include <linux/sched.h>
-#include <linux/radix-tree.h>
-
-#include "f2fs.h"
-#include "trace.h"
-
-static RADIX_TREE(pids, GFP_ATOMIC);
-static spinlock_t pids_lock;
-static struct last_io_info last_io;
-
-static inline void __print_last_io(void)
-{
- if (!last_io.len)
- return;
-
- trace_printk("%3x:%3x %4x %-16s %2x %5x %5x %12x %4x\n",
- last_io.major, last_io.minor,
- last_io.pid, "----------------",
- last_io.type,
- last_io.fio.op, last_io.fio.op_flags,
- last_io.fio.new_blkaddr,
- last_io.len);
- memset(&last_io, 0, sizeof(last_io));
-}
-
-static int __file_type(struct inode *inode, pid_t pid)
-{
- if (f2fs_is_atomic_file(inode))
- return __ATOMIC_FILE;
- else if (f2fs_is_volatile_file(inode))
- return __VOLATILE_FILE;
- else if (S_ISDIR(inode->i_mode))
- return __DIR_FILE;
- else if (inode->i_ino == F2FS_NODE_INO(F2FS_I_SB(inode)))
- return __NODE_FILE;
- else if (inode->i_ino == F2FS_META_INO(F2FS_I_SB(inode)))
- return __META_FILE;
- else if (pid)
- return __NORMAL_FILE;
- else
- return __MISC_FILE;
-}
-
-void f2fs_trace_pid(struct page *page)
-{
- struct inode *inode = page->mapping->host;
- pid_t pid = task_pid_nr(current);
- void *p;
-
- set_page_private(page, (unsigned long)pid);
-
-retry:
- if (radix_tree_preload(GFP_NOFS))
- return;
-
- spin_lock(&pids_lock);
- p = radix_tree_lookup(&pids, pid);
- if (p == current)
- goto out;
- if (p)
- radix_tree_delete(&pids, pid);
-
- if (radix_tree_insert(&pids, pid, current)) {
- spin_unlock(&pids_lock);
- radix_tree_preload_end();
- cond_resched();
- goto retry;
- }
-
- trace_printk("%3x:%3x %4x %-16s\n",
- MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
- pid, current->comm);
-out:
- spin_unlock(&pids_lock);
- radix_tree_preload_end();
-}
-
-void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
-{
- struct inode *inode;
- pid_t pid;
- int major, minor;
-
- if (flush) {
- __print_last_io();
- return;
- }
-
- inode = fio->page->mapping->host;
- pid = page_private(fio->page);
-
- major = MAJOR(inode->i_sb->s_dev);
- minor = MINOR(inode->i_sb->s_dev);
-
- if (last_io.major == major && last_io.minor == minor &&
- last_io.pid == pid &&
- last_io.type == __file_type(inode, pid) &&
- last_io.fio.op == fio->op &&
- last_io.fio.op_flags == fio->op_flags &&
- last_io.fio.new_blkaddr + last_io.len ==
- fio->new_blkaddr) {
- last_io.len++;
- return;
- }
-
- __print_last_io();
-
- last_io.major = major;
- last_io.minor = minor;
- last_io.pid = pid;
- last_io.type = __file_type(inode, pid);
- last_io.fio = *fio;
- last_io.len = 1;
- return;
-}
-
-void f2fs_build_trace_ios(void)
-{
- spin_lock_init(&pids_lock);
-}
-
-#define PIDVEC_SIZE 128
-static unsigned int gang_lookup_pids(pid_t *results, unsigned long first_index,
- unsigned int max_items)
-{
- struct radix_tree_iter iter;
- void **slot;
- unsigned int ret = 0;
-
- if (unlikely(!max_items))
- return 0;
-
- radix_tree_for_each_slot(slot, &pids, &iter, first_index) {
- results[ret] = iter.index;
- if (++ret == max_items)
- break;
- }
- return ret;
-}
-
-void f2fs_destroy_trace_ios(void)
-{
- pid_t pid[PIDVEC_SIZE];
- pid_t next_pid = 0;
- unsigned int found;
-
- spin_lock(&pids_lock);
- while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
- unsigned idx;
-
- next_pid = pid[found - 1] + 1;
- for (idx = 0; idx < found; idx++)
- radix_tree_delete(&pids, pid[idx]);
- }
- spin_unlock(&pids_lock);
-}
diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h
deleted file mode 100644
index 789f6aa727fc..000000000000
--- a/fs/f2fs/trace.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * f2fs IO tracer
- *
- * Copyright (c) 2014 Motorola Mobility
- * Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
- */
-#ifndef __F2FS_TRACE_H__
-#define __F2FS_TRACE_H__
-
-#ifdef CONFIG_F2FS_IO_TRACE
-#include <trace/events/f2fs.h>
-
-enum file_type {
- __NORMAL_FILE,
- __DIR_FILE,
- __NODE_FILE,
- __META_FILE,
- __ATOMIC_FILE,
- __VOLATILE_FILE,
- __MISC_FILE,
-};
-
-struct last_io_info {
- int major, minor;
- pid_t pid;
- enum file_type type;
- struct f2fs_io_info fio;
- block_t len;
-};
-
-extern void f2fs_trace_pid(struct page *);
-extern void f2fs_trace_ios(struct f2fs_io_info *, int);
-extern void f2fs_build_trace_ios(void);
-extern void f2fs_destroy_trace_ios(void);
-#else
-#define f2fs_trace_pid(p)
-#define f2fs_trace_ios(i, n)
-#define f2fs_build_trace_ios()
-#define f2fs_destroy_trace_ios()
-
-#endif
-#endif /* __F2FS_TRACE_H__ */
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 65afcc3cc68a..490f843ec3bf 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -64,6 +64,7 @@ static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
}
static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -107,6 +108,7 @@ static int f2fs_xattr_advise_get(const struct xattr_handler *handler,
}
static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -114,7 +116,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
unsigned char old_advise = F2FS_I(inode)->i_advise;
unsigned char new_advise;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EPERM;
if (value == NULL)
return -EINVAL;
@@ -327,7 +329,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
void *last_addr = NULL;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int inline_size = inline_xattr_size(inode);
- int err = 0;
+ int err;
if (!xnid && !inline_size)
return -ENODATA;
@@ -515,7 +517,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
void *buffer, size_t buffer_size, struct page *ipage)
{
struct f2fs_xattr_entry *entry = NULL;
- int error = 0;
+ int error;
unsigned int size, len;
void *base_addr = NULL;
int base_size;
@@ -562,7 +564,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
struct inode *inode = d_inode(dentry);
struct f2fs_xattr_entry *entry;
void *base_addr, *last_base_addr;
- int error = 0;
+ int error;
size_t rest = buffer_size;
down_read(&F2FS_I(inode)->i_xattr_sem);
@@ -632,7 +634,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
int found, newsize;
size_t len;
__u32 new_hsize;
- int error = 0;
+ int error;
if (name == NULL)
return -EINVAL;
@@ -673,7 +675,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
}
if (value && f2fs_xattr_value_same(here, value, size))
- goto exit;
+ goto same;
} else if ((flags & XATTR_REPLACE)) {
error = -ENODATA;
goto exit;
@@ -738,17 +740,20 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (error)
goto exit;
- if (is_inode_flag_set(inode, FI_ACL_MODE)) {
- inode->i_mode = F2FS_I(inode)->i_acl_mode;
- inode->i_ctime = current_time(inode);
- clear_inode_flag(inode, FI_ACL_MODE);
- }
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
f2fs_mark_inode_dirty_sync(inode, true);
if (!error && S_ISDIR(inode->i_mode))
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
+
+same:
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ inode->i_ctime = current_time(inode);
+ clear_inode_flag(inode, FI_ACL_MODE);
+ }
+
exit:
kfree(base_addr);
return error;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 922a0c6ba46c..02d4d4234956 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -397,9 +397,11 @@ extern long fat_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern const struct file_operations fat_file_operations;
extern const struct inode_operations fat_file_inode_operations;
-extern int fat_setattr(struct dentry *dentry, struct iattr *attr);
+extern int fat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
extern void fat_truncate_blocks(struct inode *inode, loff_t offset);
-extern int fat_getattr(const struct path *path, struct kstat *stat,
+extern int fat_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
extern int fat_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index f9ee27cf4d7c..13855ba49cd9 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -95,7 +95,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
goto out_unlock_inode;
/* This MUST be done before doing anything irreversible... */
- err = fat_setattr(file->f_path.dentry, &ia);
+ err = fat_setattr(file_mnt_user_ns(file), file->f_path.dentry, &ia);
if (err)
goto out_unlock_inode;
@@ -195,7 +195,7 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
if (err)
return err;
- return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ return blkdev_issue_flush(inode->i_sb->s_bdev);
}
@@ -394,11 +394,11 @@ void fat_truncate_blocks(struct inode *inode, loff_t offset)
fat_flush_inodes(inode->i_sb, inode, NULL);
}
-int fat_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int fat_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(inode, stat);
+ generic_fillattr(mnt_userns, inode, stat);
stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size;
if (MSDOS_SB(inode->i_sb)->options.nfs == FAT_NFS_NOSTALE_RO) {
@@ -447,12 +447,13 @@ static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
return 0;
}
-static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
+static int fat_allow_set_time(struct user_namespace *mnt_userns,
+ struct msdos_sb_info *sbi, struct inode *inode)
{
umode_t allow_utime = sbi->options.allow_utime;
- if (!uid_eq(current_fsuid(), inode->i_uid)) {
- if (in_group_p(inode->i_gid))
+ if (!uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode))) {
+ if (in_group_p(i_gid_into_mnt(mnt_userns, inode)))
allow_utime >>= 3;
if (allow_utime & MAY_WRITE)
return 1;
@@ -466,7 +467,8 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
/* valid file mode bits */
#define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO)
-int fat_setattr(struct dentry *dentry, struct iattr *attr)
+int fat_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
struct inode *inode = d_inode(dentry);
@@ -476,11 +478,11 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
/* Check for setting the inode time. */
ia_valid = attr->ia_valid;
if (ia_valid & TIMES_SET_FLAGS) {
- if (fat_allow_set_time(sbi, inode))
+ if (fat_allow_set_time(mnt_userns, sbi, inode))
attr->ia_valid &= ~TIMES_SET_FLAGS;
}
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(mnt_userns, dentry, attr);
attr->ia_valid = ia_valid;
if (error) {
if (sbi->options.quiet)
@@ -550,7 +552,7 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
fat_truncate_time(inode, &attr->ia_mtime, S_MTIME);
attr->ia_valid &= ~(ATTR_ATIME|ATTR_CTIME|ATTR_MTIME);
- setattr_copy(inode, attr);
+ setattr_copy(mnt_userns, inode, attr);
mark_inode_dirty(inode);
out:
return error;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index f1b2a1fc2a6a..18a50a46b57f 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -329,22 +329,23 @@ EXPORT_SYMBOL_GPL(fat_truncate_time);
int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
{
- int iflags = I_DIRTY_TIME;
- bool dirty = false;
+ int dirty_flags = 0;
if (inode->i_ino == MSDOS_ROOT_INO)
return 0;
- fat_truncate_time(inode, now, flags);
- if (flags & S_VERSION)
- dirty = inode_maybe_inc_iversion(inode, false);
- if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
- !(inode->i_sb->s_flags & SB_LAZYTIME))
- dirty = true;
+ if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
+ fat_truncate_time(inode, now, flags);
+ if (inode->i_sb->s_flags & SB_LAZYTIME)
+ dirty_flags |= I_DIRTY_TIME;
+ else
+ dirty_flags |= I_DIRTY_SYNC;
+ }
+
+ if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
+ dirty_flags |= I_DIRTY_SYNC;
- if (dirty)
- iflags |= I_DIRTY_SYNC;
- __mark_inode_dirty(inode, iflags);
+ __mark_inode_dirty(inode, dirty_flags);
return 0;
}
EXPORT_SYMBOL_GPL(fat_update_time);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 9d062886fbc1..efba301d68ae 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -261,8 +261,8 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
}
/***** Create a file */
-static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int msdos_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = NULL;
@@ -339,7 +339,8 @@ out:
}
/***** Make a directory */
-static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int msdos_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
@@ -593,7 +594,8 @@ error_inode:
}
/***** Rename, a wrapper for rename_same_dir & rename_diff_dir */
-static int msdos_rename(struct inode *old_dir, struct dentry *old_dentry,
+static int msdos_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -665,7 +667,7 @@ static struct file_system_type msdos_fs_type = {
.name = "msdos",
.mount = msdos_mount,
.kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("msdos");
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 0cdd0fb9f742..5369d82e0bfb 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -756,8 +756,8 @@ error:
return ERR_PTR(err);
}
-static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int vfat_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -846,7 +846,8 @@ out:
return err;
}
-static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int vfat_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -892,9 +893,9 @@ out:
return err;
}
-static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int vfat_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct buffer_head *dotdot_bh;
struct msdos_dir_entry *dotdot_de;
@@ -1062,7 +1063,7 @@ static struct file_system_type vfat_fs_type = {
.name = "vfat",
.mount = vfat_mount,
.kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("vfat");
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 05b36b28f2e8..dfc72f15be7f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -25,6 +25,7 @@
#include <linux/user_namespace.h>
#include <linux/memfd.h>
#include <linux/compat.h>
+#include <linux/mount.h>
#include <linux/poll.h>
#include <asm/siginfo.h>
@@ -46,7 +47,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
/* O_NOATIME can only be set by the owner or superuser */
if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(file_mnt_user_ns(filp), inode))
return -EPERM;
/* required for strict SunOS emulation */
@@ -148,11 +149,15 @@ void f_delown(struct file *filp)
pid_t f_getown(struct file *filp)
{
- pid_t pid;
+ pid_t pid = 0;
read_lock(&filp->f_owner.lock);
- pid = pid_vnr(filp->f_owner.pid);
- if (filp->f_owner.pid_type == PIDTYPE_PGID)
- pid = -pid;
+ rcu_read_lock();
+ if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
+ pid = pid_vnr(filp->f_owner.pid);
+ if (filp->f_owner.pid_type == PIDTYPE_PGID)
+ pid = -pid;
+ }
+ rcu_read_unlock();
read_unlock(&filp->f_owner.lock);
return pid;
}
@@ -200,11 +205,14 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
static int f_getown_ex(struct file *filp, unsigned long arg)
{
struct f_owner_ex __user *owner_p = (void __user *)arg;
- struct f_owner_ex owner;
+ struct f_owner_ex owner = {};
int ret = 0;
read_lock(&filp->f_owner.lock);
- owner.pid = pid_vnr(filp->f_owner.pid);
+ rcu_read_lock();
+ if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
+ owner.pid = pid_vnr(filp->f_owner.pid);
+ rcu_read_unlock();
switch (filp->f_owner.pid_type) {
case PIDTYPE_PID:
owner.type = F_OWNER_TID;
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 01263ffbc4c0..ec6feeccc276 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -173,7 +173,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
/*
* With handle we don't look at the execute bit on the
- * the directory. Ideally we would like CAP_DAC_SEARCH.
+ * directory. Ideally we would like CAP_DAC_SEARCH.
* But we don't have that
*/
if (!capable(CAP_DAC_READ_SEARCH)) {
diff --git a/fs/file.c b/fs/file.c
index dab120b71e44..f3a4bac2cbe9 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -22,6 +22,8 @@
#include <linux/close_range.h>
#include <net/sock.h>
+#include "internal.h"
+
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
/* our min() is unusable in constant expressions ;-/ */
@@ -732,36 +734,48 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
}
/*
- * variant of close_fd that gets a ref on the file for later fput.
- * The caller must ensure that filp_close() called on the file, and then
- * an fput().
+ * See close_fd_get_file() below, this variant assumes current->files->file_lock
+ * is held.
*/
-int close_fd_get_file(unsigned int fd, struct file **res)
+int __close_fd_get_file(unsigned int fd, struct file **res)
{
struct files_struct *files = current->files;
struct file *file;
struct fdtable *fdt;
- spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (fd >= fdt->max_fds)
- goto out_unlock;
+ goto out_err;
file = fdt->fd[fd];
if (!file)
- goto out_unlock;
+ goto out_err;
rcu_assign_pointer(fdt->fd[fd], NULL);
__put_unused_fd(files, fd);
- spin_unlock(&files->file_lock);
get_file(file);
*res = file;
return 0;
-
-out_unlock:
- spin_unlock(&files->file_lock);
+out_err:
*res = NULL;
return -ENOENT;
}
+/*
+ * variant of close_fd that gets a ref on the file for later fput.
+ * The caller must ensure that filp_close() called on the file, and then
+ * an fput().
+ */
+int close_fd_get_file(unsigned int fd, struct file **res)
+{
+ struct files_struct *files = current->files;
+ int ret;
+
+ spin_lock(&files->file_lock);
+ ret = __close_fd_get_file(fd, res);
+ spin_unlock(&files->file_lock);
+
+ return ret;
+}
+
void do_close_on_exec(struct files_struct *files)
{
unsigned i;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c41cb887eb7d..e91980f49388 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1442,9 +1442,15 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
}
/*
- * Write out an inode and its dirty pages. Do not update the writeback list
- * linkage. That is left to the caller. The caller is also responsible for
- * setting I_SYNC flag and calling inode_sync_complete() to clear it.
+ * Write out an inode and its dirty pages (or some of its dirty pages, depending
+ * on @wbc->nr_to_write), and clear the relevant dirty flags from i_state.
+ *
+ * This doesn't remove the inode from the writeback list it is on, except
+ * potentially to move it from b_dirty_time to b_dirty due to timestamp
+ * expiration. The caller is otherwise responsible for writeback list handling.
+ *
+ * The caller is also responsible for setting the I_SYNC flag beforehand and
+ * calling inode_sync_complete() to clear it afterwards.
*/
static int
__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
@@ -1479,7 +1485,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* change I_DIRTY_TIME into I_DIRTY_SYNC.
*/
if ((inode->i_state & I_DIRTY_TIME) &&
- (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+ (wbc->sync_mode == WB_SYNC_ALL ||
time_after(jiffies, inode->dirtied_time_when +
dirtytime_expire_interval * HZ))) {
trace_writeback_lazytime(inode);
@@ -1487,9 +1493,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
- * Some filesystems may redirty the inode during the writeback
- * due to delalloc, clear dirty metadata flags right before
- * write_inode()
+ * Get and clear the dirty flags from i_state. This needs to be done
+ * after calling writepages because some filesystems may redirty the
+ * inode during writepages due to delalloc. It also needs to be done
+ * after handling timestamp expiration, as that may dirty the inode too.
*/
spin_lock(&inode->i_lock);
dirty = inode->i_state & I_DIRTY;
@@ -1524,12 +1531,13 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
- * Write out an inode's dirty pages. Either the caller has an active reference
- * on the inode or the inode has I_WILL_FREE set.
+ * Write out an inode's dirty data and metadata on-demand, i.e. separately from
+ * the regular batched writeback done by the flusher threads in
+ * writeback_sb_inodes(). @wbc controls various aspects of the write, such as
+ * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
*
- * This function is designed to be called for writing back one inode which
- * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
- * and does more profound writeback list handling in writeback_sb_inodes().
+ * To prevent the inode from going away, either the caller must have a reference
+ * to the inode, or the inode must have I_WILL_FREE or I_FREEING set.
*/
static int writeback_single_inode(struct inode *inode,
struct writeback_control *wbc)
@@ -1544,23 +1552,23 @@ static int writeback_single_inode(struct inode *inode,
WARN_ON(inode->i_state & I_WILL_FREE);
if (inode->i_state & I_SYNC) {
- if (wbc->sync_mode != WB_SYNC_ALL)
- goto out;
/*
- * It's a data-integrity sync. We must wait. Since callers hold
- * inode reference or inode has I_WILL_FREE set, it cannot go
- * away under us.
+ * Writeback is already running on the inode. For WB_SYNC_NONE,
+ * that's enough and we can just return. For WB_SYNC_ALL, we
+ * must wait for the existing writeback to complete, then do
+ * writeback again if there's anything left.
*/
+ if (wbc->sync_mode != WB_SYNC_ALL)
+ goto out;
__inode_wait_for_writeback(inode);
}
WARN_ON(inode->i_state & I_SYNC);
/*
- * Skip inode if it is clean and we have no outstanding writeback in
- * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
- * function since flusher thread may be doing for example sync in
- * parallel and if we move the inode, it could get skipped. So here we
- * make sure inode is on some writeback list and leave it there unless
- * we have completely cleaned the inode.
+ * If the inode is already fully clean, then there's nothing to do.
+ *
+ * For data-integrity syncs we also need to check whether any pages are
+ * still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If
+ * there are any such pages, we'll need to wait for them.
*/
if (!(inode->i_state & I_DIRTY_ALL) &&
(wbc->sync_mode != WB_SYNC_ALL ||
@@ -1576,8 +1584,9 @@ static int writeback_single_inode(struct inode *inode,
wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
/*
- * If inode is clean, remove it from writeback lists. Otherwise don't
- * touch it. See comment above for explanation.
+ * If the inode is now fully clean, then it can be safely removed from
+ * its writeback list (if any). Otherwise the flusher threads are
+ * responsible for the writeback lists.
*/
if (!(inode->i_state & I_DIRTY_ALL))
inode_io_list_del_locked(inode, wb);
@@ -2219,23 +2228,24 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
}
/**
- * __mark_inode_dirty - internal function
+ * __mark_inode_dirty - internal function to mark an inode dirty
*
* @inode: inode to mark
- * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
+ * @flags: what kind of dirty, e.g. I_DIRTY_SYNC. This can be a combination of
+ * multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined
+ * with I_DIRTY_PAGES.
*
- * Mark an inode as dirty. Callers should use mark_inode_dirty or
- * mark_inode_dirty_sync.
+ * Mark an inode as dirty. We notify the filesystem, then update the inode's
+ * dirty flags. Then, if needed we add the inode to the appropriate dirty list.
*
- * Put the inode on the super block's dirty list.
+ * Most callers should use mark_inode_dirty() or mark_inode_dirty_sync()
+ * instead of calling this directly.
*
- * CAREFUL! We mark it dirty unconditionally, but move it onto the
- * dirty list only if it is hashed or if it refers to a blockdev.
- * If it was not hashed, it will never be added to the dirty list
- * even if it is later hashed, as it will have been marked dirty already.
+ * CAREFUL! We only add the inode to the dirty list if it is hashed or if it
+ * refers to a blockdev. Unhashed inodes will never be added to the dirty list
+ * even if they are later hashed, as they will have been marked dirty already.
*
- * In short, make sure you hash any inodes _before_ you start marking
- * them dirty.
+ * In short, ensure you hash any inodes _before_ you start marking them dirty.
*
* Note that for blockdevs, inode->dirtied_when represents the dirtying time of
* the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
@@ -2247,25 +2257,34 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
- int dirtytime;
+ int dirtytime = 0;
trace_writeback_mark_inode_dirty(inode, flags);
- /*
- * Don't do this for I_DIRTY_PAGES - that doesn't actually
- * dirty the inode itself
- */
- if (flags & (I_DIRTY_INODE | I_DIRTY_TIME)) {
+ if (flags & I_DIRTY_INODE) {
+ /*
+ * Notify the filesystem about the inode being dirtied, so that
+ * (if needed) it can update on-disk fields and journal the
+ * inode. This is only needed when the inode itself is being
+ * dirtied now. I.e. it's only needed for I_DIRTY_INODE, not
+ * for just I_DIRTY_PAGES or I_DIRTY_TIME.
+ */
trace_writeback_dirty_inode_start(inode, flags);
-
if (sb->s_op->dirty_inode)
- sb->s_op->dirty_inode(inode, flags);
-
+ sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE);
trace_writeback_dirty_inode(inode, flags);
- }
- if (flags & I_DIRTY_INODE)
+
+ /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
flags &= ~I_DIRTY_TIME;
- dirtytime = flags & I_DIRTY_TIME;
+ } else {
+ /*
+ * Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing.
+ * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME
+ * in one call to __mark_inode_dirty().)
+ */
+ dirtytime = flags & I_DIRTY_TIME;
+ WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME);
+ }
/*
* Paired with smp_mb() in __writeback_single_inode() for the
@@ -2288,6 +2307,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
inode_attach_wb(inode, NULL);
+ /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
if (flags & I_DIRTY_INODE)
inode->i_state &= ~I_DIRTY_TIME;
inode->i_state |= flags;
diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c
index f529075a2ce8..e9c0f916349d 100644
--- a/fs/fuse/acl.c
+++ b/fs/fuse/acl.c
@@ -50,7 +50,8 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type)
return acl;
}
-int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int fuse_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
struct fuse_conn *fc = get_fuse_conn(inode);
const char *name;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 588f8d1240aa..c0fee830a34e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -844,11 +844,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (WARN_ON(PageMlocked(oldpage)))
goto out_fallback_unlock;
- err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
- if (err) {
- unlock_page(newpage);
- goto out_put_old;
- }
+ replace_page_cache_page(oldpage, newpage);
get_page(newpage);
@@ -2233,19 +2229,21 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- int err = -ENOTTY;
+ int res;
+ int oldfd;
+ struct fuse_dev *fud = NULL;
- if (cmd == FUSE_DEV_IOC_CLONE) {
- int oldfd;
+ if (_IOC_TYPE(cmd) != FUSE_DEV_IOC_MAGIC)
+ return -ENOTTY;
- err = -EFAULT;
- if (!get_user(oldfd, (__u32 __user *) arg)) {
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(FUSE_DEV_IOC_CLONE):
+ res = -EFAULT;
+ if (!get_user(oldfd, (__u32 __user *)arg)) {
struct file *old = fget(oldfd);
- err = -EINVAL;
+ res = -EINVAL;
if (old) {
- struct fuse_dev *fud = NULL;
-
/*
* Check against file->f_op because CUSE
* uses the same ioctl handler.
@@ -2256,14 +2254,18 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
if (fud) {
mutex_lock(&fuse_mutex);
- err = fuse_device_clone(fud->fc, file);
+ res = fuse_device_clone(fud->fc, file);
mutex_unlock(&fuse_mutex);
}
fput(old);
}
}
+ break;
+ default:
+ res = -ENOTTY;
+ break;
}
- return err;
+ return res;
}
const struct file_operations fuse_dev_operations = {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 78f9f209078c..06a18700a845 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -605,7 +605,8 @@ out_err:
return err;
}
-static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+static int fuse_mknod(struct user_namespace *, struct inode *, struct dentry *,
+ umode_t, dev_t);
static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
umode_t mode)
@@ -645,7 +646,7 @@ out_dput:
return err;
mknod:
- err = fuse_mknod(dir, entry, mode, 0);
+ err = fuse_mknod(&init_user_ns, dir, entry, mode, 0);
if (err)
goto out_dput;
no_open:
@@ -715,8 +716,8 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
return err;
}
-static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
- dev_t rdev)
+static int fuse_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *entry, umode_t mode, dev_t rdev)
{
struct fuse_mknod_in inarg;
struct fuse_mount *fm = get_fuse_mount(dir);
@@ -738,13 +739,14 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
return create_new_entry(fm, &args, dir, entry, mode);
}
-static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
- bool excl)
+static int fuse_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *entry, umode_t mode, bool excl)
{
- return fuse_mknod(dir, entry, mode, 0);
+ return fuse_mknod(&init_user_ns, dir, entry, mode, 0);
}
-static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
+static int fuse_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_mkdir_in inarg;
struct fuse_mount *fm = get_fuse_mount(dir);
@@ -765,8 +767,8 @@ static int fuse_mkdir(struct inode *dir, struct dentry *entry, umode_t mode)
return create_new_entry(fm, &args, dir, entry, S_IFDIR);
}
-static int fuse_symlink(struct inode *dir, struct dentry *entry,
- const char *link)
+static int fuse_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *entry, const char *link)
{
struct fuse_mount *fm = get_fuse_mount(dir);
unsigned len = strlen(link) + 1;
@@ -908,9 +910,9 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
return err;
}
-static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
- struct inode *newdir, struct dentry *newent,
- unsigned int flags)
+static int fuse_rename2(struct user_namespace *mnt_userns, struct inode *olddir,
+ struct dentry *oldent, struct inode *newdir,
+ struct dentry *newent, unsigned int flags)
{
struct fuse_conn *fc = get_fuse_conn(olddir);
int err;
@@ -1087,7 +1089,7 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
forget_all_cached_acls(inode);
err = fuse_do_getattr(inode, stat, file);
} else if (stat) {
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
}
@@ -1249,7 +1251,8 @@ static int fuse_perm_getattr(struct inode *inode, int mask)
* access request is sent. Execute permission is still checked
* locally based on file mode.
*/
-static int fuse_permission(struct inode *inode, int mask)
+static int fuse_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
struct fuse_conn *fc = get_fuse_conn(inode);
bool refreshed = false;
@@ -1280,7 +1283,7 @@ static int fuse_permission(struct inode *inode, int mask)
}
if (fc->default_permissions) {
- err = generic_permission(inode, mask);
+ err = generic_permission(&init_user_ns, inode, mask);
/* If permission is denied, try to refresh file
attributes. This is also needed, because the root
@@ -1288,7 +1291,8 @@ static int fuse_permission(struct inode *inode, int mask)
if (err == -EACCES && !refreshed) {
err = fuse_perm_getattr(inode, mask);
if (!err)
- err = generic_permission(inode, mask);
+ err = generic_permission(&init_user_ns,
+ inode, mask);
}
/* Note: the opposite of the above test does not
@@ -1610,7 +1614,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (!fc->default_permissions)
attr->ia_valid |= ATTR_FORCE;
- err = setattr_prepare(dentry, attr);
+ err = setattr_prepare(&init_user_ns, dentry, attr);
if (err)
return err;
@@ -1756,7 +1760,8 @@ error:
return err;
}
-static int fuse_setattr(struct dentry *entry, struct iattr *attr)
+static int fuse_setattr(struct user_namespace *mnt_userns, struct dentry *entry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -1818,7 +1823,8 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
return ret;
}
-static int fuse_getattr(const struct path *path, struct kstat *stat,
+static int fuse_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 7c4b8cb93f9f..63d97a15ffde 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -863,6 +863,7 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
static inline void fuse_make_bad(struct inode *inode)
{
+ remove_inode_hash(inode);
set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
}
@@ -1180,8 +1181,8 @@ extern const struct xattr_handler *fuse_no_acl_xattr_handlers[];
struct posix_acl;
struct posix_acl *fuse_get_acl(struct inode *inode, int type);
-int fuse_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-
+int fuse_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
/* readdir.c */
int fuse_readdir(struct file *file, struct dir_context *ctx);
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 8868ac31a3c0..4ee6f734ba83 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -1324,8 +1324,15 @@ static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
/* virtiofs allocates and installs its own fuse devices */
ctx->fudptr = NULL;
- if (ctx->dax)
+ if (ctx->dax) {
+ if (!fs->dax_dev) {
+ err = -EINVAL;
+ pr_err("virtio-fs: dax can't be enabled as filesystem"
+ " device does not support it.\n");
+ goto err_free_fuse_devs;
+ }
ctx->dax_dev = fs->dax_dev;
+ }
err = fuse_fill_super_common(sb, ctx);
if (err < 0)
goto err_free_fuse_devs;
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index cdea18de94f7..1a7d7ace54e1 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -188,6 +188,7 @@ static int fuse_xattr_get(const struct xattr_handler *handler,
}
static int fuse_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value, size_t size,
int flags)
@@ -214,6 +215,7 @@ static int no_xattr_get(const struct xattr_handler *handler,
}
static int no_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *nodee,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 2e939f5fe751..9165d70ead07 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -106,7 +106,8 @@ out:
return error;
}
-int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int gfs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
@@ -130,7 +131,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
mode = inode->i_mode;
if (type == ACL_TYPE_ACCESS && acl) {
- ret = posix_acl_update_mode(inode, &mode, &acl);
+ ret = posix_acl_update_mode(&init_user_ns, inode, &mode, &acl);
if (ret)
goto unlock;
}
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 61353a1501c5..eccc6a43326c 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -13,6 +13,7 @@
extern struct posix_acl *gfs2_get_acl(struct inode *inode, int type);
extern int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-extern int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int gfs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 62d9081d1e26..7a358ae05185 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1230,6 +1230,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
gfs2_inplace_release(ip);
+ if (ip->i_qadata && ip->i_qadata->qa_qd_num)
+ gfs2_quota_unlock(ip);
+
if (length != written && (iomap->flags & IOMAP_F_NEW)) {
/* Deallocate blocks that were just allocated. */
loff_t blockmask = i_blocksize(inode) - 1;
@@ -1242,9 +1245,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
}
}
- if (ip->i_qadata && ip->i_qadata->qa_qd_num)
- gfs2_quota_unlock(ip);
-
if (unlikely(!written))
goto out_unlock;
@@ -1538,13 +1538,13 @@ more_rgrps:
goto out;
}
ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
- 0, rd_gh);
+ LM_FLAG_NODE_SCOPE, rd_gh);
if (ret)
goto out;
/* Must be done with the rgrp glock held: */
if (gfs2_rs_active(&ip->i_res) &&
- rgd == ip->i_res.rs_rbm.rgd)
+ rgd == ip->i_res.rs_rgd)
gfs2_rs_deltree(&ip->i_res);
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index b39b339feddc..2d500f90cdac 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -238,7 +238,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask,
goto out;
error = -EACCES;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
goto out;
error = 0;
@@ -256,7 +256,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask,
!capable(CAP_LINUX_IMMUTABLE))
goto out;
if (!IS_IMMUTABLE(inode)) {
- error = gfs2_permission(inode, MAY_WRITE);
+ error = gfs2_permission(&init_user_ns, inode, MAY_WRITE);
if (error)
goto out;
}
@@ -716,10 +716,10 @@ static int gfs2_release(struct inode *inode, struct file *file)
kfree(file->private_data);
file->private_data = NULL;
- if (file->f_mode & FMODE_WRITE) {
+ if (gfs2_rs_active(&ip->i_res))
gfs2_rs_delete(ip, &inode->i_writecount);
+ if (file->f_mode & FMODE_WRITE)
gfs2_qa_put(ip);
- }
return 0;
}
@@ -749,7 +749,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
{
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- int sync_state = inode->i_state & I_DIRTY_ALL;
+ int sync_state = inode->i_state & I_DIRTY;
struct gfs2_inode *ip = GFS2_I(inode);
int ret = 0, ret1 = 0;
@@ -762,7 +762,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
if (!gfs2_is_jdata(ip))
sync_state &= ~I_DIRTY_PAGES;
if (datasync)
- sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
+ sync_state &= ~I_DIRTY_SYNC;
if (sync_state) {
ret = sync_inode_metadata(inode, 1);
@@ -797,9 +797,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
if (ret)
goto out_uninit;
- ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
- is_sync_kiocb(iocb));
-
+ ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, 0);
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
@@ -833,8 +831,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
if (offset + len > i_size_read(&ip->i_inode))
goto out;
- ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
- is_sync_kiocb(iocb));
+ ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, 0);
if (ret == -ENOTBLK)
ret = 0;
out:
@@ -1115,8 +1112,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
goto out_qunlock;
/* check if the selected rgrp limits our max_blks further */
- if (ap.allowed && ap.allowed < max_blks)
- max_blks = ap.allowed;
+ if (ip->i_res.rs_reserved < max_blks)
+ max_blks = ip->i_res.rs_reserved;
/* Almost done. Calculate bytes that can be written using
* max_blks. We also recompute max_bytes, data_blocks and
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index d87a5bc3607b..9567520d79f7 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -313,9 +313,23 @@ void gfs2_glock_put(struct gfs2_glock *gl)
static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
- if ((gh->gh_state == LM_ST_EXCLUSIVE ||
- gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
- return 0;
+
+ if (gh != gh_head) {
+ /**
+ * Here we make a special exception to grant holders who agree
+ * to share the EX lock with other holders who also have the
+ * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit
+ * is set, we grant more holders with the bit set.
+ */
+ if (gh_head->gh_state == LM_ST_EXCLUSIVE &&
+ (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) &&
+ gh->gh_state == LM_ST_EXCLUSIVE &&
+ (gh->gh_flags & LM_FLAG_NODE_SCOPE))
+ return 1;
+ if ((gh->gh_state == LM_ST_EXCLUSIVE ||
+ gh_head->gh_state == LM_ST_EXCLUSIVE))
+ return 0;
+ }
if (gl->gl_state == gh->gh_state)
return 1;
if (gh->gh_flags & GL_EXACT)
@@ -2030,6 +2044,8 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
*p++ = 'A';
if (flags & LM_FLAG_PRIORITY)
*p++ = 'p';
+ if (flags & LM_FLAG_NODE_SCOPE)
+ *p++ = 'n';
if (flags & GL_ASYNC)
*p++ = 'a';
if (flags & GL_EXACT)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 53813364517b..31a8f2f649b5 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -75,6 +75,11 @@ enum {
* request and directly join the other shared lock. A shared lock request
* without the priority flag might be forced to wait until the deferred
* requested had acquired and released the lock.
+ *
+ * LM_FLAG_NODE_SCOPE
+ * This holder agrees to share the lock within this node. In other words,
+ * the glock is held in EX mode according to DLM, but local holders on the
+ * same node can share it.
*/
#define LM_FLAG_TRY 0x0001
@@ -82,6 +87,7 @@ enum {
#define LM_FLAG_NOEXP 0x0004
#define LM_FLAG_ANY 0x0008
#define LM_FLAG_PRIORITY 0x0010
+#define LM_FLAG_NODE_SCOPE 0x0020
#define GL_ASYNC 0x0040
#define GL_EXACT 0x0080
#define GL_SKIP 0x0100
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 3faa421568b0..8e32d569c8bf 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -86,16 +86,12 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_trans tr;
+ unsigned int revokes;
int ret;
- memset(&tr, 0, sizeof(tr));
- INIT_LIST_HEAD(&tr.tr_buf);
- INIT_LIST_HEAD(&tr.tr_databuf);
- INIT_LIST_HEAD(&tr.tr_ail1_list);
- INIT_LIST_HEAD(&tr.tr_ail2_list);
- tr.tr_revokes = atomic_read(&gl->gl_ail_count);
+ revokes = atomic_read(&gl->gl_ail_count);
- if (!tr.tr_revokes) {
+ if (!revokes) {
bool have_revokes;
bool log_in_flight;
@@ -122,20 +118,14 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
return 0;
}
- /* A shortened, inline version of gfs2_trans_begin()
- * tr->alloced is not set since the transaction structure is
- * on the stack */
- tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
- tr.tr_ip = _RET_IP_;
- ret = gfs2_log_reserve(sdp, tr.tr_reserved);
- if (ret < 0)
- return ret;
- WARN_ON_ONCE(current->journal_info);
- current->journal_info = &tr;
-
- __gfs2_ail_flush(gl, 0, tr.tr_revokes);
-
+ memset(&tr, 0, sizeof(tr));
+ set_bit(TR_ONSTACK, &tr.tr_flags);
+ ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
+ if (ret)
+ goto flush;
+ __gfs2_ail_flush(gl, 0, revokes);
gfs2_trans_end(sdp);
+
flush:
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_AIL_EMPTY_GL);
@@ -146,19 +136,15 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
unsigned int revokes = atomic_read(&gl->gl_ail_count);
- unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
int ret;
if (!revokes)
return;
- while (revokes > max_revokes)
- max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
-
- ret = gfs2_trans_begin(sdp, 0, max_revokes);
+ ret = gfs2_trans_begin(sdp, 0, revokes);
if (ret)
return;
- __gfs2_ail_flush(gl, fsync, max_revokes);
+ __gfs2_ail_flush(gl, fsync, revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_AIL_FLUSH);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 8e1ab8ed4abc..0957119f7744 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -20,6 +20,7 @@
#include <linux/percpu.h>
#include <linux/lockref.h>
#include <linux/rhashtable.h>
+#include <linux/mutex.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
@@ -106,7 +107,8 @@ struct gfs2_rgrpd {
u32 rd_data; /* num of data blocks in rgrp */
u32 rd_bitbytes; /* number of bytes in data bitmaps */
u32 rd_free;
- u32 rd_reserved; /* number of blocks reserved */
+ u32 rd_requested; /* number of blocks in rd_rstree */
+ u32 rd_reserved; /* number of reserved blocks */
u32 rd_free_clone;
u32 rd_dinodes;
u64 rd_igeneration;
@@ -122,34 +124,10 @@ struct gfs2_rgrpd {
#define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
spinlock_t rd_rsspin; /* protects reservation related vars */
+ struct mutex rd_mutex;
struct rb_root rd_rstree; /* multi-block reservation tree */
};
-struct gfs2_rbm {
- struct gfs2_rgrpd *rgd;
- u32 offset; /* The offset is bitmap relative */
- int bii; /* Bitmap index */
-};
-
-static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
-{
- return rbm->rgd->rd_bits + rbm->bii;
-}
-
-static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
-{
- BUG_ON(rbm->offset >= rbm->rgd->rd_data);
- return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
- rbm->offset;
-}
-
-static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
- const struct gfs2_rbm *rbm2)
-{
- return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
- (rbm1->offset == rbm2->offset);
-}
-
enum gfs2_state_bits {
BH_Pinned = BH_PrivateStart,
BH_Escaped = BH_PrivateStart + 1,
@@ -313,9 +291,11 @@ struct gfs2_qadata { /* quota allocation data */
*/
struct gfs2_blkreserv {
- struct rb_node rs_node; /* link to other block reservations */
- struct gfs2_rbm rs_rbm; /* Start of reservation */
- u32 rs_free; /* how many blocks are still free */
+ struct rb_node rs_node; /* node within rd_rstree */
+ struct gfs2_rgrpd *rs_rgd;
+ u64 rs_start;
+ u32 rs_requested;
+ u32 rs_reserved; /* number of reserved blocks */
};
/*
@@ -490,7 +470,7 @@ struct gfs2_quota_data {
enum {
TR_TOUCHED = 1,
TR_ATTACHED = 2,
- TR_ALLOCED = 3,
+ TR_ONSTACK = 3,
};
struct gfs2_trans {
@@ -506,7 +486,6 @@ struct gfs2_trans {
unsigned int tr_num_buf_rm;
unsigned int tr_num_databuf_rm;
unsigned int tr_num_revoke;
- unsigned int tr_num_revoke_rm;
struct list_head tr_list;
struct list_head tr_databuf;
@@ -531,6 +510,7 @@ struct gfs2_jdesc {
unsigned int nr_extents;
struct work_struct jd_work;
struct inode *jd_inode;
+ struct bio *jd_log_bio;
unsigned long jd_flags;
#define JDF_RECOVERY 1
unsigned int jd_jid;
@@ -585,6 +565,7 @@ struct gfs2_args {
unsigned int ar_errors:2; /* errors=withdraw | panic */
unsigned int ar_nobarrier:1; /* do not send barriers */
unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
+ unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */
unsigned int ar_loccookie:1; /* use location based readdir
cookies */
s32 ar_commit; /* Commit interval */
@@ -821,7 +802,6 @@ struct gfs2_sbd {
struct gfs2_trans *sd_log_tr;
unsigned int sd_log_blks_reserved;
- int sd_log_committed_revoke;
atomic_t sd_log_pinned;
unsigned int sd_log_num_revoke;
@@ -834,24 +814,22 @@ struct gfs2_sbd {
atomic_t sd_log_thresh2;
atomic_t sd_log_blks_free;
atomic_t sd_log_blks_needed;
+ atomic_t sd_log_revokes_available;
wait_queue_head_t sd_log_waitq;
wait_queue_head_t sd_logd_waitq;
u64 sd_log_sequence;
- unsigned int sd_log_head;
- unsigned int sd_log_tail;
int sd_log_idle;
struct rw_semaphore sd_log_flush_lock;
atomic_t sd_log_in_flight;
- struct bio *sd_log_bio;
wait_queue_head_t sd_log_flush_wait;
int sd_log_error; /* First log error */
wait_queue_head_t sd_withdraw_wait;
- atomic_t sd_reserving_log;
- wait_queue_head_t sd_reserving_log_wait;
-
+ unsigned int sd_log_tail;
+ unsigned int sd_log_flush_tail;
+ unsigned int sd_log_head;
unsigned int sd_log_flush_head;
spinlock_t sd_ail_lock;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index c1b77e8d6b1c..c9775d5c6594 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -325,7 +325,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
}
if (!is_root) {
- error = gfs2_permission(dir, MAY_EXEC);
+ error = gfs2_permission(&init_user_ns, dir, MAY_EXEC);
if (error)
goto out;
}
@@ -355,7 +355,8 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
{
int error;
- error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
+ error = gfs2_permission(&init_user_ns, &dip->i_inode,
+ MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -490,8 +491,8 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
di = (struct gfs2_dinode *)dibh->b_data;
gfs2_dinode_out(ip, di);
- di->di_major = cpu_to_be32(MAJOR(ip->i_inode.i_rdev));
- di->di_minor = cpu_to_be32(MINOR(ip->i_inode.i_rdev));
+ di->di_major = cpu_to_be32(imajor(&ip->i_inode));
+ di->di_minor = cpu_to_be32(iminor(&ip->i_inode));
di->__pad1 = 0;
di->__pad2 = 0;
di->__pad3 = 0;
@@ -843,8 +844,8 @@ fail:
* Returns: errno
*/
-static int gfs2_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
+static int gfs2_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl);
}
@@ -951,7 +952,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (inode->i_nlink == 0)
goto out_gunlock;
- error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC);
+ error = gfs2_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
if (error)
goto out_gunlock;
@@ -1068,7 +1069,8 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
if (IS_APPEND(&dip->i_inode))
return -EPERM;
- error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
+ error = gfs2_permission(&init_user_ns, &dip->i_inode,
+ MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -1145,7 +1147,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
if (!rgd)
goto out_inodes;
- gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
+ gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, ghs + 2);
error = gfs2_glock_nq(ghs); /* parent */
@@ -1204,8 +1206,8 @@ out_inodes:
* Returns: errno
*/
-static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int gfs2_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
unsigned int size;
@@ -1225,7 +1227,8 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
* Returns: errno
*/
-static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int gfs2_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
@@ -1240,8 +1243,8 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
*
*/
-static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t dev)
+static int gfs2_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
{
return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0);
}
@@ -1450,8 +1453,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
error = -ENOENT;
goto out_gunlock;
}
- error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0,
- &rd_gh);
+ error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &rd_gh);
if (error)
goto out_gunlock;
}
@@ -1490,7 +1493,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
}
}
} else {
- error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC);
+ error = gfs2_permission(&init_user_ns, ndir,
+ MAY_WRITE | MAY_EXEC);
if (error)
goto out_gunlock;
@@ -1525,7 +1529,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
/* Check out the dir to be renamed */
if (dir_rename) {
- error = gfs2_permission(d_inode(odentry), MAY_WRITE);
+ error = gfs2_permission(&init_user_ns, d_inode(odentry),
+ MAY_WRITE);
if (error)
goto out_gunlock;
}
@@ -1688,12 +1693,14 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
if (S_ISDIR(old_mode)) {
- error = gfs2_permission(odentry->d_inode, MAY_WRITE);
+ error = gfs2_permission(&init_user_ns, odentry->d_inode,
+ MAY_WRITE);
if (error)
goto out_gunlock;
}
if (S_ISDIR(new_mode)) {
- error = gfs2_permission(ndentry->d_inode, MAY_WRITE);
+ error = gfs2_permission(&init_user_ns, ndentry->d_inode,
+ MAY_WRITE);
if (error)
goto out_gunlock;
}
@@ -1747,9 +1754,9 @@ out:
return error;
}
-static int gfs2_rename2(struct inode *odir, struct dentry *odentry,
- struct inode *ndir, struct dentry *ndentry,
- unsigned int flags)
+static int gfs2_rename2(struct user_namespace *mnt_userns, struct inode *odir,
+ struct dentry *odentry, struct inode *ndir,
+ struct dentry *ndentry, unsigned int flags)
{
flags &= ~RENAME_NOREPLACE;
@@ -1833,7 +1840,8 @@ out:
* Returns: errno
*/
-int gfs2_permission(struct inode *inode, int mask)
+int gfs2_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
struct gfs2_inode *ip;
struct gfs2_holder i_gh;
@@ -1852,7 +1860,7 @@ int gfs2_permission(struct inode *inode, int mask)
if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
error = -EPERM;
else
- error = generic_permission(inode, mask);
+ error = generic_permission(&init_user_ns, inode, mask);
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
@@ -1861,7 +1869,7 @@ int gfs2_permission(struct inode *inode, int mask)
static int __gfs2_setattr_simple(struct inode *inode, struct iattr *attr)
{
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -1963,7 +1971,8 @@ out:
* Returns: errno
*/
-static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
+static int gfs2_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
@@ -1982,7 +1991,7 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
goto error;
@@ -1993,7 +2002,8 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
else {
error = gfs2_setattr_simple(inode, attr);
if (!error && attr->ia_valid & ATTR_MODE)
- error = posix_acl_chmod(inode, inode->i_mode);
+ error = posix_acl_chmod(&init_user_ns, inode,
+ inode->i_mode);
}
error:
@@ -2007,6 +2017,7 @@ out:
/**
* gfs2_getattr - Read out an inode's attributes
+ * @mnt_userns: user namespace of the mount the inode was found from
* @path: Object to query
* @stat: The inode's stats
* @request_mask: Mask of STATX_xxx flags indicating the caller's interests
@@ -2021,7 +2032,8 @@ out:
* Returns: errno
*/
-static int gfs2_getattr(const struct path *path, struct kstat *stat,
+static int gfs2_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
@@ -2049,7 +2061,7 @@ static int gfs2_getattr(const struct path *path, struct kstat *stat,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh);
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 8073b8d2c7fa..c447bd5b3017 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -99,7 +99,8 @@ extern int gfs2_inode_refresh(struct gfs2_inode *ip);
extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
int is_root);
-extern int gfs2_permission(struct inode *inode, int mask);
+extern int gfs2_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask);
extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr);
extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 9f2b5609f225..153272f82984 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -284,7 +284,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
- int lvb_needs_unlock = 0;
int error;
if (gl->gl_lksb.sb_lkid == 0) {
@@ -297,13 +296,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_update_request_times(gl);
- /* don't want to skip dlm_unlock writing the lvb when lock is ex */
-
- if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
- lvb_needs_unlock = 1;
+ /* don't want to skip dlm_unlock writing the lvb when lock has one */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- !lvb_needs_unlock) {
+ !gl->gl_lksb.sb_lvbptr) {
gfs2_glock_free(gl);
return;
}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 2e9314091c81..6410281546f9 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -50,10 +50,12 @@ unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
unsigned int blks;
unsigned int first, second;
+ /* The initial struct gfs2_log_descriptor block */
blks = 1;
first = sdp->sd_ldptrs;
if (nstruct > first) {
+ /* Subsequent struct gfs2_meta_header blocks */
second = sdp->sd_inptrs;
blks += DIV_ROUND_UP(nstruct - first, second);
}
@@ -89,7 +91,7 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
struct writeback_control *wbc,
- struct gfs2_trans *tr)
+ struct gfs2_trans *tr, struct blk_plug *plug)
__releases(&sdp->sd_ail_lock)
__acquires(&sdp->sd_ail_lock)
{
@@ -131,6 +133,11 @@ __acquires(&sdp->sd_ail_lock)
continue;
spin_unlock(&sdp->sd_ail_lock);
ret = generic_writepages(mapping, wbc);
+ if (need_resched()) {
+ blk_finish_plug(plug);
+ cond_resched();
+ blk_start_plug(plug);
+ }
spin_lock(&sdp->sd_ail_lock);
if (ret == -ENODATA) /* if a jdata write into a new hole */
ret = 0; /* ignore it */
@@ -205,7 +212,7 @@ restart:
list_for_each_entry_reverse(tr, head, tr_list) {
if (wbc->nr_to_write <= 0)
break;
- ret = gfs2_ail1_start_one(sdp, wbc, tr);
+ ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug);
if (ret) {
if (ret == -EBUSY)
goto restart;
@@ -240,6 +247,45 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp)
return gfs2_ail1_flush(sdp, &wbc);
}
+static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp)
+{
+ unsigned int new_flush_tail = sdp->sd_log_head;
+ struct gfs2_trans *tr;
+
+ if (!list_empty(&sdp->sd_ail1_list)) {
+ tr = list_last_entry(&sdp->sd_ail1_list,
+ struct gfs2_trans, tr_list);
+ new_flush_tail = tr->tr_first;
+ }
+ sdp->sd_log_flush_tail = new_flush_tail;
+}
+
+static void gfs2_log_update_head(struct gfs2_sbd *sdp)
+{
+ unsigned int new_head = sdp->sd_log_flush_head;
+
+ if (sdp->sd_log_flush_tail == sdp->sd_log_head)
+ sdp->sd_log_flush_tail = new_head;
+ sdp->sd_log_head = new_head;
+}
+
+/**
+ * gfs2_ail_empty_tr - empty one of the ail lists of a transaction
+ */
+
+static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ struct list_head *head)
+{
+ struct gfs2_bufdata *bd;
+
+ while (!list_empty(head)) {
+ bd = list_first_entry(head, struct gfs2_bufdata,
+ bd_ail_st_list);
+ gfs2_assert(sdp, bd->bd_tr == tr);
+ gfs2_remove_from_ail(bd);
+ }
+}
+
/**
* gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
* @sdp: the filesystem
@@ -315,6 +361,7 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
else
oldest_tr = 0;
}
+ gfs2_log_update_flush_tail(sdp);
ret = list_empty(&sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock);
@@ -348,47 +395,69 @@ static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
spin_unlock(&sdp->sd_ail_lock);
}
-/**
- * gfs2_ail_empty_tr - empty one of the ail lists for a transaction
- */
-
-static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
- struct list_head *head)
+static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
{
- struct gfs2_bufdata *bd;
-
- while (!list_empty(head)) {
- bd = list_first_entry(head, struct gfs2_bufdata,
- bd_ail_st_list);
- gfs2_assert(sdp, bd->bd_tr == tr);
- gfs2_remove_from_ail(bd);
- }
+ gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
+ list_del(&tr->tr_list);
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
+ gfs2_trans_free(sdp, tr);
}
static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
{
- struct gfs2_trans *tr, *safe;
+ struct list_head *ail2_list = &sdp->sd_ail2_list;
unsigned int old_tail = sdp->sd_log_tail;
- int wrap = (new_tail < old_tail);
- int a, b, rm;
+ struct gfs2_trans *tr, *safe;
spin_lock(&sdp->sd_ail_lock);
+ if (old_tail <= new_tail) {
+ list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
+ if (old_tail <= tr->tr_first && tr->tr_first < new_tail)
+ __ail2_empty(sdp, tr);
+ }
+ } else {
+ list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
+ if (old_tail <= tr->tr_first || tr->tr_first < new_tail)
+ __ail2_empty(sdp, tr);
+ }
+ }
+ spin_unlock(&sdp->sd_ail_lock);
+}
- list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
- a = (old_tail <= tr->tr_first);
- b = (tr->tr_first < new_tail);
- rm = (wrap) ? (a || b) : (a && b);
- if (!rm)
- continue;
+/**
+ * gfs2_log_is_empty - Check if the log is empty
+ * @sdp: The GFS2 superblock
+ */
- gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
- list_del(&tr->tr_list);
- gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
- gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
- gfs2_trans_free(sdp, tr);
+bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
+ return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
+}
+
+static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
+{
+ unsigned int available;
+
+ available = atomic_read(&sdp->sd_log_revokes_available);
+ while (available >= revokes) {
+ if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available,
+ &available, available - revokes))
+ return true;
}
+ return false;
+}
- spin_unlock(&sdp->sd_ail_lock);
+/**
+ * gfs2_log_release_revokes - Release a given number of revokes
+ * @sdp: The GFS2 superblock
+ * @revokes: The number of revokes to release
+ *
+ * sdp->sd_log_flush_lock must be held.
+ */
+void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
+{
+ if (revokes)
+ atomic_add(revokes, &sdp->sd_log_revokes_available);
}
/**
@@ -400,86 +469,141 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
{
-
atomic_add(blks, &sdp->sd_log_blks_free);
trace_gfs2_log_blocks(sdp, blks);
gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
sdp->sd_jdesc->jd_blocks);
- up_read(&sdp->sd_log_flush_lock);
+ if (atomic_read(&sdp->sd_log_blks_needed))
+ wake_up(&sdp->sd_log_waitq);
}
/**
- * gfs2_log_reserve - Make a log reservation
+ * __gfs2_log_try_reserve - Try to make a log reservation
* @sdp: The GFS2 superblock
* @blks: The number of blocks to reserve
+ * @taboo_blks: The number of blocks to leave free
*
- * Note that we never give out the last few blocks of the journal. Thats
- * due to the fact that there is a small number of header blocks
- * associated with each log flush. The exact number can't be known until
- * flush time, so we ensure that we have just enough free blocks at all
- * times to avoid running out during a log flush.
+ * Try to do the same as __gfs2_log_reserve(), but fail if no more log
+ * space is immediately available.
+ */
+static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks,
+ unsigned int taboo_blks)
+{
+ unsigned wanted = blks + taboo_blks;
+ unsigned int free_blocks;
+
+ free_blocks = atomic_read(&sdp->sd_log_blks_free);
+ while (free_blocks >= wanted) {
+ if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
+ free_blocks - blks)) {
+ trace_gfs2_log_blocks(sdp, -blks);
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * __gfs2_log_reserve - Make a log reservation
+ * @sdp: The GFS2 superblock
+ * @blks: The number of blocks to reserve
+ * @taboo_blks: The number of blocks to leave free
+ *
+ * @taboo_blks is set to 0 for logd, and to GFS2_LOG_FLUSH_MIN_BLOCKS
+ * for all other processes. This ensures that when the log is almost full,
+ * logd will still be able to call gfs2_log_flush one more time without
+ * blocking, which will advance the tail and make some more log space
+ * available.
*
* We no longer flush the log here, instead we wake up logd to do that
* for us. To avoid the thundering herd and to ensure that we deal fairly
* with queued waiters, we use an exclusive wait. This means that when we
* get woken with enough journal space to get our reservation, we need to
* wake the next waiter on the list.
- *
- * Returns: errno
*/
-int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
+static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks,
+ unsigned int taboo_blks)
{
- int ret = 0;
- unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
- unsigned wanted = blks + reserved_blks;
- DEFINE_WAIT(wait);
- int did_wait = 0;
+ unsigned wanted = blks + taboo_blks;
unsigned int free_blocks;
- if (gfs2_assert_warn(sdp, blks) ||
- gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
- return -EINVAL;
atomic_add(blks, &sdp->sd_log_blks_needed);
-retry:
- free_blocks = atomic_read(&sdp->sd_log_blks_free);
- if (unlikely(free_blocks <= wanted)) {
- do {
- prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
- TASK_UNINTERRUPTIBLE);
+ for (;;) {
+ if (current != sdp->sd_logd_process)
wake_up(&sdp->sd_logd_waitq);
- did_wait = 1;
- if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
- io_schedule();
- free_blocks = atomic_read(&sdp->sd_log_blks_free);
- } while(free_blocks <= wanted);
- finish_wait(&sdp->sd_log_waitq, &wait);
- }
- atomic_inc(&sdp->sd_reserving_log);
- if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
- free_blocks - blks) != free_blocks) {
- if (atomic_dec_and_test(&sdp->sd_reserving_log))
- wake_up(&sdp->sd_reserving_log_wait);
- goto retry;
+ io_wait_event(sdp->sd_log_waitq,
+ (free_blocks = atomic_read(&sdp->sd_log_blks_free),
+ free_blocks >= wanted));
+ do {
+ if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
+ &free_blocks,
+ free_blocks - blks))
+ goto reserved;
+ } while (free_blocks >= wanted);
}
- atomic_sub(blks, &sdp->sd_log_blks_needed);
- trace_gfs2_log_blocks(sdp, -blks);
- /*
- * If we waited, then so might others, wake them up _after_ we get
- * our share of the log.
- */
- if (unlikely(did_wait))
+reserved:
+ trace_gfs2_log_blocks(sdp, -blks);
+ if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
wake_up(&sdp->sd_log_waitq);
+}
+
+/**
+ * gfs2_log_try_reserve - Try to make a log reservation
+ * @sdp: The GFS2 superblock
+ * @tr: The transaction
+ * @extra_revokes: The number of additional revokes reserved (output)
+ *
+ * This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be
+ * held for correct revoke accounting.
+ */
+
+bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ unsigned int *extra_revokes)
+{
+ unsigned int blks = tr->tr_reserved;
+ unsigned int revokes = tr->tr_revokes;
+ unsigned int revoke_blks = 0;
+
+ *extra_revokes = 0;
+ if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) {
+ revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
+ *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
+ blks += revoke_blks;
+ }
+ if (!blks)
+ return true;
+ if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS))
+ return true;
+ if (!revoke_blks)
+ gfs2_log_release_revokes(sdp, revokes);
+ return false;
+}
- down_read(&sdp->sd_log_flush_lock);
- if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
- gfs2_log_release(sdp, blks);
- ret = -EROFS;
+/**
+ * gfs2_log_reserve - Make a log reservation
+ * @sdp: The GFS2 superblock
+ * @tr: The transaction
+ * @extra_revokes: The number of additional revokes reserved (output)
+ *
+ * sdp->sd_log_flush_lock must not be held.
+ */
+
+void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ unsigned int *extra_revokes)
+{
+ unsigned int blks = tr->tr_reserved;
+ unsigned int revokes = tr->tr_revokes;
+ unsigned int revoke_blks = 0;
+
+ *extra_revokes = 0;
+ if (revokes) {
+ revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
+ *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
+ blks += revoke_blks;
}
- if (atomic_dec_and_test(&sdp->sd_reserving_log))
- wake_up(&sdp->sd_reserving_log_wait);
- return ret;
+ __gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS);
}
/**
@@ -507,24 +631,20 @@ static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer
}
/**
- * calc_reserved - Calculate the number of blocks to reserve when
- * refunding a transaction's unused buffers.
+ * calc_reserved - Calculate the number of blocks to keep reserved
* @sdp: The GFS2 superblock
*
* This is complex. We need to reserve room for all our currently used
- * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
- * all our journaled data buffers for journaled files (e.g. files in the
+ * metadata blocks (e.g. normal file I/O rewriting file time stamps) and
+ * all our journaled data blocks for journaled files (e.g. files in the
* meta_fs like rindex, or files for which chattr +j was done.)
- * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
- * will count it as free space (sd_log_blks_free) and corruption will follow.
+ * If we don't reserve enough space, corruption will follow.
*
- * We can have metadata bufs and jdata bufs in the same journal. So each
- * type gets its own log header, for which we need to reserve a block.
- * In fact, each type has the potential for needing more than one header
- * in cases where we have more buffers than will fit on a journal page.
+ * We can have metadata blocks and jdata blocks in the same journal. Each
+ * type gets its own log descriptor, for which we need to reserve a block.
+ * In fact, each type has the potential for needing more than one log descriptor
+ * in cases where we have more blocks than will fit in a log descriptor.
* Metadata journal entries take up half the space of journaled buffer entries.
- * Thus, metadata entries have buf_limit (502) and journaled buffers have
- * databuf_limit (251) before they cause a wrap around.
*
* Also, we need to reserve blocks for revoke journal entries and one for an
* overall header for the lot.
@@ -533,59 +653,29 @@ static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer
*/
static unsigned int calc_reserved(struct gfs2_sbd *sdp)
{
- unsigned int reserved = 0;
- unsigned int mbuf;
- unsigned int dbuf;
+ unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
+ unsigned int blocks;
struct gfs2_trans *tr = sdp->sd_log_tr;
if (tr) {
- mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
- dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
- reserved = mbuf + dbuf;
- /* Account for header blocks */
- reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
- reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
+ blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm;
+ reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp));
+ blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
+ reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp));
}
-
- if (sdp->sd_log_committed_revoke > 0)
- reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
- /* One for the overall header */
- if (reserved)
- reserved++;
return reserved;
}
-static unsigned int current_tail(struct gfs2_sbd *sdp)
-{
- struct gfs2_trans *tr;
- unsigned int tail;
-
- spin_lock(&sdp->sd_ail_lock);
-
- if (list_empty(&sdp->sd_ail1_list)) {
- tail = sdp->sd_log_head;
- } else {
- tr = list_last_entry(&sdp->sd_ail1_list, struct gfs2_trans,
- tr_list);
- tail = tr->tr_first;
- }
-
- spin_unlock(&sdp->sd_ail_lock);
-
- return tail;
-}
-
-static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
+static void log_pull_tail(struct gfs2_sbd *sdp)
{
- unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
+ unsigned int new_tail = sdp->sd_log_flush_tail;
+ unsigned int dist;
+ if (new_tail == sdp->sd_log_tail)
+ return;
+ dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
ail2_empty(sdp, new_tail);
-
- atomic_add(dist, &sdp->sd_log_blks_free);
- trace_gfs2_log_blocks(sdp, dist);
- gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
- sdp->sd_jdesc->jd_blocks);
-
+ gfs2_log_release(sdp, dist);
sdp->sd_log_tail = new_tail;
}
@@ -698,7 +788,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
}
/**
- * gfs2_write_revokes - Add as many revokes to the system transaction as we can
+ * gfs2_flush_revokes - Add as many revokes to the system transaction as we can
* @sdp: The GFS2 superblock
*
* Our usual strategy is to defer writing revokes as much as we can in the hope
@@ -709,38 +799,14 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
* been written back. This will basically come at no cost now, and will save
* us from having to keep track of those blocks on the AIL2 list later.
*/
-void gfs2_write_revokes(struct gfs2_sbd *sdp)
+void gfs2_flush_revokes(struct gfs2_sbd *sdp)
{
/* number of revokes we still have room for */
- int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
+ unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
gfs2_log_lock(sdp);
- while (sdp->sd_log_num_revoke > max_revokes)
- max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
- max_revokes -= sdp->sd_log_num_revoke;
- if (!sdp->sd_log_num_revoke) {
- atomic_dec(&sdp->sd_log_blks_free);
- /* If no blocks have been reserved, we need to also
- * reserve a block for the header */
- if (!sdp->sd_log_blks_reserved) {
- atomic_dec(&sdp->sd_log_blks_free);
- trace_gfs2_log_blocks(sdp, -2);
- } else {
- trace_gfs2_log_blocks(sdp, -1);
- }
- }
gfs2_ail1_empty(sdp, max_revokes);
gfs2_log_unlock(sdp);
-
- if (!sdp->sd_log_num_revoke) {
- atomic_inc(&sdp->sd_log_blks_free);
- if (!sdp->sd_log_blks_reserved) {
- atomic_inc(&sdp->sd_log_blks_free);
- trace_gfs2_log_blocks(sdp, 2);
- } else {
- trace_gfs2_log_blocks(sdp, 1);
- }
- }
}
/**
@@ -769,7 +835,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
u64 dblock;
if (gfs2_withdrawn(sdp))
- goto out;
+ return;
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
lh = page_address(page);
@@ -822,10 +888,8 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
sb->s_blocksize - LH_V1_SIZE - 4);
lh->lh_crc = cpu_to_be32(crc);
- gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
- gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
-out:
- log_flush_wait(sdp);
+ gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock);
+ gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags);
}
/**
@@ -838,25 +902,24 @@ out:
static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
{
- unsigned int tail;
int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
- tail = current_tail(sdp);
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
gfs2_ordered_wait(sdp);
log_flush_wait(sdp);
op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
}
- sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
- gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
- sdp->sd_log_flush_head, flags, op_flags);
+ sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head);
+ gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++,
+ sdp->sd_log_flush_tail, sdp->sd_log_flush_head,
+ flags, op_flags);
gfs2_log_incr_head(sdp);
-
- if (sdp->sd_log_tail != tail)
- log_pull_tail(sdp, tail);
+ log_flush_wait(sdp);
+ log_pull_tail(sdp);
+ gfs2_log_update_head(sdp);
}
/**
@@ -935,12 +998,16 @@ static void trans_drain(struct gfs2_trans *tr)
while (!list_empty(head)) {
bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
list_del_init(&bd->bd_list);
+ if (!list_empty(&bd->bd_ail_st_list))
+ gfs2_remove_from_ail(bd);
kmem_cache_free(gfs2_bufdata_cachep, bd);
}
head = &tr->tr_databuf;
while (!list_empty(head)) {
bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
list_del_init(&bd->bd_list);
+ if (!list_empty(&bd->bd_ail_st_list))
+ gfs2_remove_from_ail(bd);
kmem_cache_free(gfs2_bufdata_cachep, bd);
}
}
@@ -956,42 +1023,66 @@ static void trans_drain(struct gfs2_trans *tr)
void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
{
struct gfs2_trans *tr = NULL;
+ unsigned int reserved_blocks = 0, used_blocks = 0;
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+ unsigned int first_log_head;
+ unsigned int reserved_revokes = 0;
down_write(&sdp->sd_log_flush_lock);
+ trace_gfs2_log_flush(sdp, 1, flags);
+repeat:
/*
* Do this check while holding the log_flush_lock to prevent new
* buffers from being added to the ail via gfs2_pin()
*/
- if (gfs2_withdrawn(sdp))
+ if (gfs2_withdrawn(sdp) || !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
goto out;
/* Log might have been flushed while we waited for the flush lock */
if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
goto out;
- trace_gfs2_log_flush(sdp, 1, flags);
- if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
- clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ first_log_head = sdp->sd_log_head;
+ sdp->sd_log_flush_head = first_log_head;
- sdp->sd_log_flush_head = sdp->sd_log_head;
tr = sdp->sd_log_tr;
- if (tr) {
- sdp->sd_log_tr = NULL;
- tr->tr_first = sdp->sd_log_flush_head;
- if (unlikely (state == SFS_FROZEN))
- if (gfs2_assert_withdraw_delayed(sdp,
- !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
- goto out_withdraw;
+ if (tr || sdp->sd_log_num_revoke) {
+ if (reserved_blocks)
+ gfs2_log_release(sdp, reserved_blocks);
+ reserved_blocks = sdp->sd_log_blks_reserved;
+ reserved_revokes = sdp->sd_log_num_revoke;
+ if (tr) {
+ sdp->sd_log_tr = NULL;
+ tr->tr_first = first_log_head;
+ if (unlikely (state == SFS_FROZEN)) {
+ if (gfs2_assert_withdraw_delayed(sdp,
+ !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
+ goto out_withdraw;
+ }
+ }
+ } else if (!reserved_blocks) {
+ unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
+
+ reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
+ if (current == sdp->sd_logd_process)
+ taboo_blocks = 0;
+
+ if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) {
+ up_write(&sdp->sd_log_flush_lock);
+ __gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks);
+ down_write(&sdp->sd_log_flush_lock);
+ goto repeat;
+ }
+ BUG_ON(sdp->sd_log_num_revoke);
}
+ if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
+ clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+
if (unlikely(state == SFS_FROZEN))
- if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke))
+ if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
goto out_withdraw;
- if (gfs2_assert_withdraw_delayed(sdp,
- sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke))
- goto out_withdraw;
gfs2_ordered_write(sdp);
if (gfs2_withdrawn(sdp))
@@ -999,16 +1090,13 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
lops_before_commit(sdp, tr);
if (gfs2_withdrawn(sdp))
goto out_withdraw;
- gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
+ gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
if (gfs2_withdrawn(sdp))
goto out_withdraw;
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
- log_flush_wait(sdp);
log_write_header(sdp, flags);
- } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
- atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
- trace_gfs2_log_blocks(sdp, -1);
+ } else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
log_write_header(sdp, flags);
}
if (gfs2_withdrawn(sdp))
@@ -1016,9 +1104,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
lops_after_commit(sdp, tr);
gfs2_log_lock(sdp);
- sdp->sd_log_head = sdp->sd_log_flush_head;
sdp->sd_log_blks_reserved = 0;
- sdp->sd_log_committed_revoke = 0;
spin_lock(&sdp->sd_ail_lock);
if (tr && !list_empty(&tr->tr_ail1_list)) {
@@ -1033,10 +1119,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
empty_ail1_list(sdp);
if (gfs2_withdrawn(sdp))
goto out_withdraw;
- atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
- trace_gfs2_log_blocks(sdp, -1);
log_write_header(sdp, flags);
- sdp->sd_log_head = sdp->sd_log_flush_head;
}
if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
GFS2_LOG_HEAD_FLUSH_FREEZE))
@@ -1046,12 +1129,22 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
}
out_end:
- trace_gfs2_log_flush(sdp, 0, flags);
+ used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head);
+ reserved_revokes += atomic_read(&sdp->sd_log_revokes_available);
+ atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
+ gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs);
+ if (reserved_revokes > sdp->sd_ldptrs)
+ reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
out:
+ if (used_blocks != reserved_blocks) {
+ gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
+ gfs2_log_release(sdp, reserved_blocks - used_blocks);
+ }
up_write(&sdp->sd_log_flush_lock);
gfs2_trans_free(sdp, tr);
if (gfs2_withdrawing(sdp))
gfs2_withdraw(sdp);
+ trace_gfs2_log_flush(sdp, 0, flags);
return;
out_withdraw:
@@ -1087,8 +1180,8 @@ static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
old->tr_num_databuf_new += new->tr_num_databuf_new;
old->tr_num_buf_rm += new->tr_num_buf_rm;
old->tr_num_databuf_rm += new->tr_num_databuf_rm;
+ old->tr_revokes += new->tr_revokes;
old->tr_num_revoke += new->tr_num_revoke;
- old->tr_num_revoke_rm += new->tr_num_revoke_rm;
list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
list_splice_tail_init(&new->tr_buf, &old->tr_buf);
@@ -1110,20 +1203,17 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
if (sdp->sd_log_tr) {
gfs2_merge_trans(sdp, tr);
} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
- gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
+ gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
sdp->sd_log_tr = tr;
set_bit(TR_ATTACHED, &tr->tr_flags);
}
- sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
reserved = calc_reserved(sdp);
maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
gfs2_assert_withdraw(sdp, maxres >= reserved);
unused = maxres - reserved;
- atomic_add(unused, &sdp->sd_log_blks_free);
- trace_gfs2_log_blocks(sdp, unused);
- gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
- sdp->sd_jdesc->jd_blocks);
+ if (unused)
+ gfs2_log_release(sdp, unused);
sdp->sd_log_blks_reserved = reserved;
gfs2_log_unlock(sdp);
@@ -1166,15 +1256,11 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
- sdp->sd_log_flush_head = sdp->sd_log_head;
-
log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
+ log_pull_tail(sdp);
gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
-
- sdp->sd_log_head = sdp->sd_log_flush_head;
- sdp->sd_log_tail = sdp->sd_log_head;
}
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
@@ -1208,7 +1294,6 @@ int gfs2_logd(void *data)
struct gfs2_sbd *sdp = data;
unsigned long t = 1;
DEFINE_WAIT(wait);
- bool did_flush;
while (!kthread_should_stop()) {
@@ -1227,12 +1312,10 @@ int gfs2_logd(void *data)
continue;
}
- did_flush = false;
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
gfs2_ail1_empty(sdp, 0);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
- GFS2_LFC_LOGD_JFLUSH_REQD);
- did_flush = true;
+ GFS2_LFC_LOGD_JFLUSH_REQD);
}
if (gfs2_ail_flush_reqd(sdp)) {
@@ -1240,13 +1323,9 @@ int gfs2_logd(void *data)
gfs2_ail1_wait(sdp);
gfs2_ail1_empty(sdp, 0);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
- GFS2_LFC_LOGD_AIL_FLUSH_REQD);
- did_flush = true;
+ GFS2_LFC_LOGD_AIL_FLUSH_REQD);
}
- if (!gfs2_ail_flush_reqd(sdp) || did_flush)
- wake_up(&sdp->sd_log_waitq);
-
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
try_to_freeze();
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 79f97290146e..eea58015710e 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -13,6 +13,13 @@
#include "incore.h"
#include "inode.h"
+/*
+ * The minimum amount of log space required for a log flush is one block for
+ * revokes and one block for the log header. Log flushes other than
+ * GFS2_LOG_HEAD_FLUSH_NORMAL may write one or two more log headers.
+ */
+#define GFS2_LOG_FLUSH_MIN_BLOCKS 4
+
/**
* gfs2_log_lock - acquire the right to mess with the log manager
* @sdp: the filesystem
@@ -43,7 +50,9 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
if (++value == sdp->sd_jdesc->jd_blocks) {
value = 0;
}
- sdp->sd_log_head = sdp->sd_log_tail = value;
+ sdp->sd_log_tail = value;
+ sdp->sd_log_flush_tail = value;
+ sdp->sd_log_head = value;
}
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
@@ -64,8 +73,13 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
+extern bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
+extern void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
-extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
+extern bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ unsigned int *extra_revokes);
+extern void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ unsigned int *extra_revokes);
extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
u64 seq, u32 tail, u32 lblock, u32 flags,
int op_flags);
@@ -78,6 +92,6 @@ extern void log_flush_wait(struct gfs2_sbd *sdp);
extern int gfs2_logd(void *data);
extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
-extern void gfs2_write_revokes(struct gfs2_sbd *sdp);
+extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 3922b26264f5..a82f4747aa8d 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -76,15 +76,20 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
struct gfs2_bitmap *bi = rgd->rd_bits + index;
+ rgrp_lock_local(rgd);
if (bi->bi_clone == NULL)
- return;
+ goto out;
if (sdp->sd_args.ar_discard)
gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
memcpy(bi->bi_clone + bi->bi_offset,
bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
clear_bit(GBF_FULL, &bi->bi_flags);
rgd->rd_free_clone = rgd->rd_free;
+ BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
rgd->rd_extfail_pt = rgd->rd_free;
+
+out:
+ rgrp_unlock_local(rgd);
}
/**
@@ -262,7 +267,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
bio_end_io_t *end_io)
{
struct super_block *sb = sdp->sd_vfs;
- struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+ struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
bio_set_dev(bio, sb->s_bdev);
@@ -322,17 +327,18 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
* then add the page segment to that.
*/
-void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
- unsigned size, unsigned offset, u64 blkno)
+void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ struct page *page, unsigned size, unsigned offset,
+ u64 blkno)
{
struct bio *bio;
int ret;
- bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
+ bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
gfs2_end_log_write, false);
ret = bio_add_page(bio, page, size, offset);
if (ret == 0) {
- bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
+ bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
REQ_OP_WRITE, gfs2_end_log_write, true);
ret = bio_add_page(bio, page, size, offset);
WARN_ON(ret == 0);
@@ -355,7 +361,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
gfs2_log_incr_head(sdp);
- gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock);
+ gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
+ bh_offset(bh), dblock);
}
/**
@@ -369,14 +376,14 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
* the page may be freed at any time.
*/
-void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
+static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
{
struct super_block *sb = sdp->sd_vfs;
u64 dblock;
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
gfs2_log_incr_head(sdp);
- gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
+ gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
}
/**
@@ -845,7 +852,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
struct page *page;
unsigned int length;
- gfs2_write_revokes(sdp);
+ gfs2_flush_revokes(sdp);
if (!sdp->sd_log_num_revoke)
return;
@@ -857,7 +864,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
-
gfs2_log_write_page(sdp, page);
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
mh = page_address(page);
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index fbdbb08dcec6..31b6dd0d2e5d 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -10,37 +10,24 @@
#include <linux/list.h>
#include "incore.h"
-#define BUF_OFFSET \
- ((sizeof(struct gfs2_log_descriptor) + sizeof(__be64) - 1) & \
- ~(sizeof(__be64) - 1))
-#define DATABUF_OFFSET \
- ((sizeof(struct gfs2_log_descriptor) + (2 * sizeof(__be64) - 1)) & \
- ~(2 * sizeof(__be64) - 1))
-
extern const struct gfs2_log_operations *gfs2_log_ops[];
extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
-extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
- unsigned size, unsigned offset, u64 blkno);
-extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
+extern void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ struct page *page, unsigned size, unsigned offset,
+ u64 blkno);
extern void gfs2_log_submit_bio(struct bio **biop, int opf);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, bool keep_cache);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
- unsigned int limit;
-
- limit = (sdp->sd_sb.sb_bsize - BUF_OFFSET) / sizeof(__be64);
- return limit;
+ return sdp->sd_ldptrs;
}
static inline unsigned int databuf_limit(struct gfs2_sbd *sdp)
{
- unsigned int limit;
-
- limit = (sdp->sd_sb.sb_bsize - DATABUF_OFFSET) / (2 * sizeof(__be64));
- return limit;
+ return sdp->sd_ldptrs / 2;
}
static inline void lops_before_commit(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index c7393ee9cf68..28d0eb23e18e 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -98,7 +98,7 @@ static int __init init_gfs2_fs(void)
error = -ENOMEM;
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock),
- 0, 0,
+ 0, SLAB_RECLAIM_ACCOUNT,
gfs2_init_glock_once);
if (!gfs2_glock_cachep)
goto fail_cachep1;
@@ -134,7 +134,7 @@ static int __init init_gfs2_fs(void)
gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
sizeof(struct gfs2_quota_data),
- 0, 0, NULL);
+ 0, SLAB_RECLAIM_ACCOUNT, NULL);
if (!gfs2_quotad_cachep)
goto fail_cachep6;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 61fce59cb4d3..aa4136055a83 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -136,8 +136,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
init_rwsem(&sdp->sd_log_flush_lock);
atomic_set(&sdp->sd_log_in_flight, 0);
- atomic_set(&sdp->sd_reserving_log, 0);
- init_waitqueue_head(&sdp->sd_reserving_log_wait);
init_waitqueue_head(&sdp->sd_log_flush_wait);
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
mutex_init(&sdp->sd_freeze_mutex);
@@ -171,7 +169,8 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
return -EINVAL;
}
- if (sb->sb_fs_format != GFS2_FORMAT_FS ||
+ if (sb->sb_fs_format < GFS2_FS_FORMAT_MIN ||
+ sb->sb_fs_format > GFS2_FS_FORMAT_MAX ||
sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
return -EINVAL;
@@ -179,7 +178,7 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
(sb->sb_bsize & (sb->sb_bsize - 1))) {
- pr_warn("Invalid superblock size\n");
+ pr_warn("Invalid block size\n");
return -EINVAL;
}
@@ -317,6 +316,13 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
sizeof(struct gfs2_meta_header))
* GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
+ /*
+ * We always keep at least one block reserved for revokes in
+ * transactions. This greatly simplifies allocating additional
+ * revoke blocks.
+ */
+ atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
+
/* Compute maximum reservation required to add a entry to a directory */
hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
@@ -488,6 +494,19 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
goto out;
}
+ switch(sdp->sd_sb.sb_fs_format) {
+ case GFS2_FS_FORMAT_MAX:
+ sb->s_xattr = gfs2_xattr_handlers_max;
+ break;
+
+ case GFS2_FS_FORMAT_MIN:
+ sb->s_xattr = gfs2_xattr_handlers_min;
+ break;
+
+ default:
+ BUG();
+ }
+
/* Set up the buffer cache and SB for real */
if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
ret = -EINVAL;
@@ -1032,13 +1051,14 @@ hostdata_error:
}
if (lm->lm_mount == NULL) {
- fs_info(sdp, "Now mounting FS...\n");
+ fs_info(sdp, "Now mounting FS (format %u)...\n", sdp->sd_sb.sb_fs_format);
complete_all(&sdp->sd_locking_init);
return 0;
}
ret = lm->lm_mount(sdp, table);
if (ret == 0)
- fs_info(sdp, "Joined cluster. Now mounting FS...\n");
+ fs_info(sdp, "Joined cluster. Now mounting FS (format %u)...\n",
+ sdp->sd_sb.sb_fs_format);
complete_all(&sdp->sd_locking_init);
return ret;
}
@@ -1084,6 +1104,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
int silent = fc->sb_flags & SB_SILENT;
struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh;
+ struct gfs2_holder freeze_gh;
int error;
sdp = init_sbd(sb);
@@ -1107,7 +1128,6 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_op = &gfs2_super_ops;
sb->s_d_op = &gfs2_dops;
sb->s_export_op = &gfs2_export_ops;
- sb->s_xattr = gfs2_xattr_handlers;
sb->s_qcop = &gfs2_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
@@ -1156,6 +1176,10 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
if (error)
goto fail_locking;
+ /* Turn rgrplvb on by default if fs format is recent enough */
+ if (!sdp->sd_args.ar_got_rgrplvb && sdp->sd_sb.sb_fs_format > 1801)
+ sdp->sd_args.ar_rgrplvb = 1;
+
error = wait_on_journal(sdp);
if (error)
goto fail_sb;
@@ -1195,25 +1219,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
goto fail_per_node;
}
- if (sb_rdonly(sb)) {
- struct gfs2_holder freeze_gh;
+ error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+ if (error)
+ goto fail_per_node;
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT,
- &freeze_gh);
- if (error) {
- fs_err(sdp, "can't make FS RO: %d\n", error);
- goto fail_per_node;
- }
- gfs2_glock_dq_uninit(&freeze_gh);
- } else {
+ if (!sb_rdonly(sb))
error = gfs2_make_fs_rw(sdp);
- if (error) {
- fs_err(sdp, "can't make FS RW: %d\n", error);
- goto fail_per_node;
- }
- }
+ gfs2_freeze_unlock(&freeze_gh);
+ if (error) {
+ fs_err(sdp, "can't make FS RW: %d\n", error);
+ goto fail_per_node;
+ }
gfs2_glock_dq_uninit(&mount_gh);
gfs2_online_uevent(sdp);
return 0;
@@ -1456,6 +1473,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_rgrplvb:
args->ar_rgrplvb = result.boolean;
+ args->ar_got_rgrplvb = 1;
break;
case Opt_loccookie:
args->ar_loccookie = result.boolean;
@@ -1514,15 +1532,20 @@ static int gfs2_reconfigure(struct fs_context *fc)
fc->sb_flags |= SB_RDONLY;
if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
+ struct gfs2_holder freeze_gh;
+
+ error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+ if (error)
+ return -EINVAL;
+
if (fc->sb_flags & SB_RDONLY) {
- error = gfs2_make_fs_ro(sdp);
- if (error)
- errorfc(fc, "unable to remount read-only");
+ gfs2_make_fs_ro(sdp);
} else {
error = gfs2_make_fs_rw(sdp);
if (error)
errorfc(fc, "unable to remount read-write");
}
+ gfs2_freeze_unlock(&freeze_gh);
}
sdp->sd_args = *newargs;
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index c26c68ebd29d..282173774005 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -470,9 +470,7 @@ void gfs2_recover_func(struct work_struct *work)
/* Acquire a shared hold on the freeze lock */
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
- GL_EXACT, &thaw_gh);
+ error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
if (error)
goto fail_gunlock_ji;
@@ -507,22 +505,24 @@ void gfs2_recover_func(struct work_struct *work)
/* We take the sd_log_flush_lock here primarily to prevent log
* flushes and simultaneous journal replays from stomping on
- * each other wrt sd_log_bio. */
+ * each other wrt jd_log_bio. */
down_read(&sdp->sd_log_flush_lock);
for (pass = 0; pass < 2; pass++) {
lops_before_scan(jd, &head, pass);
error = foreach_descriptor(jd, head.lh_tail,
head.lh_blkno, pass);
lops_after_scan(jd, error, pass);
- if (error)
+ if (error) {
+ up_read(&sdp->sd_log_flush_lock);
goto fail_gunlock_thaw;
+ }
}
recover_local_statfs(jd, &head);
clean_journal(jd, &head);
up_read(&sdp->sd_log_flush_lock);
- gfs2_glock_dq_uninit(&thaw_gh);
+ gfs2_freeze_unlock(&thaw_gh);
t_rep = ktime_get();
fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
"jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
@@ -544,7 +544,7 @@ void gfs2_recover_func(struct work_struct *work)
goto done;
fail_gunlock_thaw:
- gfs2_glock_dq_uninit(&thaw_gh);
+ gfs2_freeze_unlock(&thaw_gh);
fail_gunlock_ji:
if (jlocked) {
gfs2_glock_dq_uninit(&ji_gh);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 5e8eef9990e3..89c37a845e64 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -36,6 +36,24 @@
#define BFITNOENT ((u32)~0)
#define NO_BLOCK ((u64)~0)
+struct gfs2_rbm {
+ struct gfs2_rgrpd *rgd;
+ u32 offset; /* The offset is bitmap relative */
+ int bii; /* Bitmap index */
+};
+
+static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
+{
+ return rbm->rgd->rd_bits + rbm->bii;
+}
+
+static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
+{
+ BUG_ON(rbm->offset >= rbm->rgd->rd_data);
+ return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
+ rbm->offset;
+}
+
/*
* These routines are used by the resource group routines (rgrp.c)
* to keep track of block allocation. Each block is represented by two
@@ -61,7 +79,7 @@ static const char valid_change[16] = {
};
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
- const struct gfs2_inode *ip, bool nowrap);
+ struct gfs2_blkreserv *rs, bool nowrap);
/**
@@ -175,7 +193,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
/**
* rs_cmp - multi-block reservation range compare
- * @blk: absolute file system block number of the new reservation
+ * @start: start of the new reservation
* @len: number of blocks in the new reservation
* @rs: existing reservation to compare against
*
@@ -183,13 +201,11 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
* -1 if the block range is before the start of the reservation
* 0 if the block range overlaps with the reservation
*/
-static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
+static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs)
{
- u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
-
- if (blk >= startblk + rs->rs_free)
+ if (start >= rs->rs_start + rs->rs_requested)
return 1;
- if (blk + len - 1 < startblk)
+ if (rs->rs_start >= start + len)
return -1;
return 0;
}
@@ -277,29 +293,38 @@ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
}
/**
- * gfs2_rbm_incr - increment an rbm structure
+ * gfs2_rbm_add - add a number of blocks to an rbm
* @rbm: The rbm with rgd already set correctly
+ * @blocks: The number of blocks to add to rpm
*
- * This function takes an existing rbm structure and increments it to the next
- * viable block offset.
- *
- * Returns: If incrementing the offset would cause the rbm to go past the
- * end of the rgrp, true is returned, otherwise false.
+ * This function takes an existing rbm structure and adds a number of blocks to
+ * it.
*
+ * Returns: True if the new rbm would point past the end of the rgrp.
*/
-static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
+static bool gfs2_rbm_add(struct gfs2_rbm *rbm, u32 blocks)
{
- if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
- rbm->offset++;
+ struct gfs2_rgrpd *rgd = rbm->rgd;
+ struct gfs2_bitmap *bi = rgd->rd_bits + rbm->bii;
+
+ if (rbm->offset + blocks < bi->bi_blocks) {
+ rbm->offset += blocks;
return false;
}
- if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
- return true;
+ blocks -= bi->bi_blocks - rbm->offset;
- rbm->offset = 0;
- rbm->bii++;
- return false;
+ for(;;) {
+ bi++;
+ if (bi == rgd->rd_bits + rgd->rd_length)
+ return true;
+ if (blocks < bi->bi_blocks) {
+ rbm->offset = blocks;
+ rbm->bii = bi - rgd->rd_bits;
+ return false;
+ }
+ blocks -= bi->bi_blocks;
+ }
}
/**
@@ -308,7 +333,8 @@ static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
* @n_unaligned: Number of unaligned blocks to check
* @len: Decremented for each block found (terminate on zero)
*
- * Returns: true if a non-free block is encountered
+ * Returns: true if a non-free block is encountered or the end of the resource
+ * group is reached.
*/
static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
@@ -323,7 +349,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
(*len)--;
if (*len == 0)
return true;
- if (gfs2_rbm_incr(rbm))
+ if (gfs2_rbm_add(rbm, 1))
return true;
}
@@ -595,10 +621,11 @@ static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs,
{
struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
- gfs2_print_dbg(seq, "%s B: n:%llu s:%llu b:%u f:%u\n", fs_id_buf,
+ gfs2_print_dbg(seq, "%s B: n:%llu s:%llu f:%u\n",
+ fs_id_buf,
(unsigned long long)ip->i_no_addr,
- (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
- rs->rs_rbm.offset, rs->rs_free);
+ (unsigned long long)rs->rs_start,
+ rs->rs_requested);
}
/**
@@ -613,33 +640,22 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
if (!gfs2_rs_active(rs))
return;
- rgd = rs->rs_rbm.rgd;
+ rgd = rs->rs_rgd;
trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
rb_erase(&rs->rs_node, &rgd->rd_rstree);
RB_CLEAR_NODE(&rs->rs_node);
- if (rs->rs_free) {
- u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
- rs->rs_free - 1;
- struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
- struct gfs2_bitmap *start, *last;
+ if (rs->rs_requested) {
+ /* return requested blocks to the rgrp */
+ BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested);
+ rs->rs_rgd->rd_requested -= rs->rs_requested;
- /* return reserved blocks to the rgrp */
- BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
- rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
/* The rgrp extent failure point is likely not to increase;
it will only do so if the freed blocks are somehow
contiguous with a span of free blocks that follows. Still,
it will force the number to be recalculated later. */
- rgd->rd_extfail_pt += rs->rs_free;
- rs->rs_free = 0;
- if (gfs2_rbm_from_block(&last_rbm, last_block))
- return;
- start = rbm_bi(&rs->rs_rbm);
- last = rbm_bi(&last_rbm);
- do
- clear_bit(GBF_FULL, &start->bi_flags);
- while (start++ != last);
+ rgd->rd_extfail_pt += rs->rs_requested;
+ rs->rs_requested = 0;
}
}
@@ -652,11 +668,11 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
{
struct gfs2_rgrpd *rgd;
- rgd = rs->rs_rbm.rgd;
+ rgd = rs->rs_rgd;
if (rgd) {
spin_lock(&rgd->rd_rsspin);
__rs_deltree(rs);
- BUG_ON(rs->rs_free);
+ BUG_ON(rs->rs_requested);
spin_unlock(&rgd->rd_rsspin);
}
}
@@ -904,6 +920,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
rgd->rd_data = be32_to_cpu(buf.ri_data);
rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
spin_lock_init(&rgd->rd_rsspin);
+ mutex_init(&rgd->rd_mutex);
error = compute_bitstructs(rgd);
if (error)
@@ -1149,6 +1166,23 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
return count;
}
+static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
+{
+ struct gfs2_bitmap *bi;
+ int x;
+
+ if (rgd->rd_free) {
+ for (x = 0; x < rgd->rd_length; x++) {
+ bi = rgd->rd_bits + x;
+ clear_bit(GBF_FULL, &bi->bi_flags);
+ }
+ } else {
+ for (x = 0; x < rgd->rd_length; x++) {
+ bi = rgd->rd_bits + x;
+ set_bit(GBF_FULL, &bi->bi_flags);
+ }
+ }
+}
/**
* gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
@@ -1192,11 +1226,11 @@ static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
}
if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
- for (x = 0; x < length; x++)
- clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
+ rgrp_set_bitmap_flags(rgd);
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
rgd->rd_free_clone = rgd->rd_free;
+ BUG_ON(rgd->rd_reserved);
/* max out the rgrp allocation failure point */
rgd->rd_extfail_pt = rgd->rd_free;
}
@@ -1244,7 +1278,11 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
if (rgd->rd_rgl->rl_unlinked == 0)
rgd->rd_flags &= ~GFS2_RDF_CHECK;
rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
+ rgrp_set_bitmap_flags(rgd);
rgd->rd_free_clone = rgd->rd_free;
+ BUG_ON(rgd->rd_reserved);
+ /* max out the rgrp allocation failure point */
+ rgd->rd_extfail_pt = rgd->rd_free;
rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
return 0;
@@ -1404,7 +1442,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
while (1) {
- ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &gh);
if (ret)
goto out;
@@ -1412,9 +1451,11 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
/* Trim each bitmap in the rgrp */
for (x = 0; x < rgd->rd_length; x++) {
struct gfs2_bitmap *bi = rgd->rd_bits + x;
+ rgrp_lock_local(rgd);
ret = gfs2_rgrp_send_discards(sdp,
rgd->rd_data0, NULL, bi, minlen,
&amt);
+ rgrp_unlock_local(rgd);
if (ret) {
gfs2_glock_dq_uninit(&gh);
goto out;
@@ -1426,9 +1467,11 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
if (ret == 0) {
bh = rgd->rd_bits[0].bi_bh;
+ rgrp_lock_local(rgd);
rgd->rd_flags |= GFS2_RGF_TRIMMED;
gfs2_trans_add_meta(rgd->rd_gl, bh);
gfs2_rgrp_out(rgd, bh->b_data);
+ rgrp_unlock_local(rgd);
gfs2_trans_end(sdp);
}
}
@@ -1458,8 +1501,7 @@ static void rs_insert(struct gfs2_inode *ip)
struct rb_node **newn, *parent = NULL;
int rc;
struct gfs2_blkreserv *rs = &ip->i_res;
- struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
- u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
+ struct gfs2_rgrpd *rgd = rs->rs_rgd;
BUG_ON(gfs2_rs_active(rs));
@@ -1470,7 +1512,7 @@ static void rs_insert(struct gfs2_inode *ip)
rb_entry(*newn, struct gfs2_blkreserv, rs_node);
parent = *newn;
- rc = rs_cmp(fsblock, rs->rs_free, cur);
+ rc = rs_cmp(rs->rs_start, rs->rs_requested, cur);
if (rc > 0)
newn = &((*newn)->rb_right);
else if (rc < 0)
@@ -1486,7 +1528,7 @@ static void rs_insert(struct gfs2_inode *ip)
rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
/* Do our rgrp accounting for the reservation */
- rgd->rd_reserved += rs->rs_free; /* blocks reserved */
+ rgd->rd_requested += rs->rs_requested; /* blocks requested */
spin_unlock(&rgd->rd_rsspin);
trace_gfs2_rs(rs, TRACE_RS_INSERT);
}
@@ -1507,9 +1549,9 @@ static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
{
u32 tot_reserved, tot_free;
- if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
+ if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested))
return 0;
- tot_reserved = rgd->rd_reserved - rs->rs_free;
+ tot_reserved = rgd->rd_requested - rs->rs_requested;
if (rgd->rd_free_clone < tot_reserved)
tot_reserved = 0;
@@ -1534,17 +1576,26 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
u64 goal;
struct gfs2_blkreserv *rs = &ip->i_res;
u32 extlen;
- u32 free_blocks = rgd_free(rgd, rs);
+ u32 free_blocks, blocks_available;
int ret;
struct inode *inode = &ip->i_inode;
+ spin_lock(&rgd->rd_rsspin);
+ free_blocks = rgd_free(rgd, rs);
+ if (rgd->rd_free_clone < rgd->rd_requested)
+ free_blocks = 0;
+ blocks_available = rgd->rd_free_clone - rgd->rd_reserved;
+ if (rgd == rs->rs_rgd)
+ blocks_available += rs->rs_reserved;
+ spin_unlock(&rgd->rd_rsspin);
+
if (S_ISDIR(inode->i_mode))
extlen = 1;
else {
extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
}
- if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
+ if (free_blocks < extlen || blocks_available < extlen)
return;
/* Find bitmap block that contains bits for goal block */
@@ -1556,10 +1607,10 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
return;
- ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
+ ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, &ip->i_res, true);
if (ret == 0) {
- rs->rs_rbm = rbm;
- rs->rs_free = extlen;
+ rs->rs_start = gfs2_rbm_to_block(&rbm);
+ rs->rs_requested = extlen;
rs_insert(ip);
} else {
if (goal == rgd->rd_last_alloc + rgd->rd_data0)
@@ -1572,7 +1623,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
* @rgd: The resource group
* @block: The starting block
* @length: The required length
- * @ip: Ignore any reservations for this inode
+ * @ignore_rs: Reservation to ignore
*
* If the block does not appear in any reservation, then return the
* block number unchanged. If it does appear in the reservation, then
@@ -1582,7 +1633,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
u32 length,
- const struct gfs2_inode *ip)
+ struct gfs2_blkreserv *ignore_rs)
{
struct gfs2_blkreserv *rs;
struct rb_node *n;
@@ -1602,8 +1653,8 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
}
if (n) {
- while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
- block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
+ while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) {
+ block = rs->rs_start + rs->rs_requested;
n = n->rb_right;
if (n == NULL)
break;
@@ -1618,7 +1669,7 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
/**
* gfs2_reservation_check_and_update - Check for reservations during block alloc
* @rbm: The current position in the resource group
- * @ip: The inode for which we are searching for blocks
+ * @rs: Our own reservation
* @minext: The minimum extent length
* @maxext: A pointer to the maximum extent structure
*
@@ -1632,20 +1683,19 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
*/
static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
- const struct gfs2_inode *ip,
+ struct gfs2_blkreserv *rs,
u32 minext,
struct gfs2_extent *maxext)
{
u64 block = gfs2_rbm_to_block(rbm);
u32 extlen = 1;
u64 nblock;
- int ret;
/*
* If we have a minimum extent length, then skip over any extent
* which is less than the min extent length in size.
*/
- if (minext) {
+ if (minext > 1) {
extlen = gfs2_free_extlen(rbm, minext);
if (extlen <= maxext->len)
goto fail;
@@ -1655,7 +1705,7 @@ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
* Check the extent which has been found against the reservations
* and skip if parts of it are already reserved
*/
- nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
+ nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, rs);
if (nblock == block) {
if (!minext || extlen >= minext)
return 0;
@@ -1664,12 +1714,15 @@ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
maxext->len = extlen;
maxext->rbm = *rbm;
}
-fail:
- nblock = block + extlen;
+ } else {
+ u64 len = nblock - block;
+ if (len >= (u64)1 << 32)
+ return -E2BIG;
+ extlen = len;
}
- ret = gfs2_rbm_from_block(rbm, nblock);
- if (ret < 0)
- return ret;
+fail:
+ if (gfs2_rbm_add(rbm, extlen))
+ return -E2BIG;
return 1;
}
@@ -1677,9 +1730,9 @@ fail:
* gfs2_rbm_find - Look for blocks of a particular state
* @rbm: Value/result starting position and final position
* @state: The state which we want to find
- * @minext: Pointer to the requested extent length (NULL for a single block)
+ * @minext: Pointer to the requested extent length
* This is updated to be the actual reservation size.
- * @ip: If set, check for reservations
+ * @rs: Our own reservation (NULL to skip checking for reservations)
* @nowrap: Stop looking at the end of the rgrp, rather than wrapping
* around until we've reached the starting point.
*
@@ -1693,7 +1746,7 @@ fail:
*/
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
- const struct gfs2_inode *ip, bool nowrap)
+ struct gfs2_blkreserv *rs, bool nowrap)
{
bool scan_from_start = rbm->bii == 0 && rbm->offset == 0;
struct buffer_head *bh;
@@ -1714,8 +1767,7 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
while(1) {
bi = rbm_bi(rbm);
- if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
- test_bit(GBF_FULL, &bi->bi_flags) &&
+ if (test_bit(GBF_FULL, &bi->bi_flags) &&
(state == GFS2_BLKST_FREE))
goto next_bitmap;
@@ -1731,11 +1783,10 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
goto next_bitmap;
}
rbm->offset = offset;
- if (ip == NULL)
+ if (!rs)
return 0;
- ret = gfs2_reservation_check_and_update(rbm, ip,
- minext ? *minext : 0,
+ ret = gfs2_reservation_check_and_update(rbm, rs, *minext,
&maxext);
if (ret == 0)
return 0;
@@ -1767,7 +1818,7 @@ next_iter:
break;
}
- if (minext == NULL || state != GFS2_BLKST_FREE)
+ if (state != GFS2_BLKST_FREE)
return -ENOSPC;
/* If the extent was too small, and it's smaller than the smallest
@@ -1775,7 +1826,7 @@ next_iter:
useless to search this rgrp again for this amount or more. */
if (wrapped && (scan_from_start || rbm->bii > last_bii) &&
*minext < rbm->rgd->rd_extfail_pt)
- rbm->rgd->rd_extfail_pt = *minext;
+ rbm->rgd->rd_extfail_pt = *minext - 1;
/* If the maximum extent we found is big enough to fulfill the
minimum requirements, use it anyway. */
@@ -1938,7 +1989,7 @@ static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
u64 tdiff;
tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
- rs->rs_rbm.rgd->rd_gl->gl_dstamp));
+ rs->rs_rgd->rd_gl->gl_dstamp));
return tdiff > (msecs * 1000 * 1000);
}
@@ -1993,8 +2044,7 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
* We try our best to find an rgrp that has at least ap->target blocks
* available. After a couple of passes (loops == 2), the prospects of finding
* such an rgrp diminish. At this stage, we return the first rgrp that has
- * at least ap->min_target blocks available. Either way, we set ap->allowed to
- * the number of blocks available in the chosen rgrp.
+ * at least ap->min_target blocks available.
*
* Returns: 0 on success,
* -ENOMEM if a suitable rgrp can't be found
@@ -2006,56 +2056,64 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
struct gfs2_blkreserv *rs = &ip->i_res;
- int error = 0, rg_locked, flags = 0;
+ int error = 0, flags = LM_FLAG_NODE_SCOPE;
+ bool rg_locked;
u64 last_unlinked = NO_BLOCK;
+ u32 target = ap->target;
int loops = 0;
- u32 free_blocks, skip = 0;
+ u32 free_blocks, blocks_available, skip = 0;
+
+ BUG_ON(rs->rs_reserved);
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
- if (gfs2_assert_warn(sdp, ap->target))
+ if (gfs2_assert_warn(sdp, target))
return -EINVAL;
if (gfs2_rs_active(rs)) {
- begin = rs->rs_rbm.rgd;
- } else if (rs->rs_rbm.rgd &&
- rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) {
- begin = rs->rs_rbm.rgd;
+ begin = rs->rs_rgd;
+ } else if (rs->rs_rgd &&
+ rgrp_contains_block(rs->rs_rgd, ip->i_goal)) {
+ begin = rs->rs_rgd;
} else {
check_and_update_goal(ip);
- rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
+ rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
}
if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
skip = gfs2_orlov_skip(ip);
- if (rs->rs_rbm.rgd == NULL)
+ if (rs->rs_rgd == NULL)
return -EBADSLT;
while (loops < 3) {
- rg_locked = 1;
+ struct gfs2_rgrpd *rgd;
- if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
- rg_locked = 0;
+ rg_locked = gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl);
+ if (rg_locked) {
+ rgrp_lock_local(rs->rs_rgd);
+ } else {
if (skip && skip--)
goto next_rgrp;
if (!gfs2_rs_active(rs)) {
if (loops == 0 &&
- !fast_to_acquire(rs->rs_rbm.rgd))
+ !fast_to_acquire(rs->rs_rgd))
goto next_rgrp;
if ((loops < 2) &&
gfs2_rgrp_used_recently(rs, 1000) &&
- gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
+ gfs2_rgrp_congested(rs->rs_rgd, loops))
goto next_rgrp;
}
- error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
+ error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl,
LM_ST_EXCLUSIVE, flags,
&ip->i_rgd_gh);
if (unlikely(error))
return error;
+ rgrp_lock_local(rs->rs_rgd);
if (!gfs2_rs_active(rs) && (loops < 2) &&
- gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
+ gfs2_rgrp_congested(rs->rs_rgd, loops))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb) {
- error = update_rgrp_lvb(rs->rs_rbm.rgd);
+ error = update_rgrp_lvb(rs->rs_rgd);
if (unlikely(error)) {
+ rgrp_unlock_local(rs->rs_rgd);
gfs2_glock_dq_uninit(&ip->i_rgd_gh);
return error;
}
@@ -2063,36 +2121,46 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
}
/* Skip unusable resource groups */
- if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
+ if ((rs->rs_rgd->rd_flags & (GFS2_RGF_NOALLOC |
GFS2_RDF_ERROR)) ||
- (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
+ (loops == 0 && target > rs->rs_rgd->rd_extfail_pt))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb)
- gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
+ gfs2_rgrp_bh_get(rs->rs_rgd);
/* Get a reservation if we don't already have one */
if (!gfs2_rs_active(rs))
- rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
+ rg_mblk_search(rs->rs_rgd, ip, ap);
/* Skip rgrps when we can't get a reservation on first pass */
if (!gfs2_rs_active(rs) && (loops < 1))
goto check_rgrp;
/* If rgrp has enough free space, use it */
- free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
- if (free_blocks >= ap->target ||
- (loops == 2 && ap->min_target &&
- free_blocks >= ap->min_target)) {
- ap->allowed = free_blocks;
- return 0;
+ rgd = rs->rs_rgd;
+ spin_lock(&rgd->rd_rsspin);
+ free_blocks = rgd_free(rgd, rs);
+ blocks_available = rgd->rd_free_clone - rgd->rd_reserved;
+ if (free_blocks < target || blocks_available < target) {
+ spin_unlock(&rgd->rd_rsspin);
+ goto check_rgrp;
}
+ rs->rs_reserved = ap->target;
+ if (rs->rs_reserved > blocks_available)
+ rs->rs_reserved = blocks_available;
+ rgd->rd_reserved += rs->rs_reserved;
+ spin_unlock(&rgd->rd_rsspin);
+ rgrp_unlock_local(rs->rs_rgd);
+ return 0;
check_rgrp:
/* Check for unlinked inodes which can be reclaimed */
- if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
- try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
+ if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK)
+ try_rgrp_unlink(rs->rs_rgd, &last_unlinked,
ip->i_no_addr);
skip_rgrp:
+ rgrp_unlock_local(rs->rs_rgd);
+
/* Drop reservation, if we couldn't use reserved rgrp */
if (gfs2_rs_active(rs))
gfs2_rs_deltree(rs);
@@ -2102,7 +2170,7 @@ skip_rgrp:
gfs2_glock_dq_uninit(&ip->i_rgd_gh);
next_rgrp:
/* Find the next rgrp, and continue looking */
- if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
+ if (gfs2_select_rgrp(&rs->rs_rgd, begin))
continue;
if (skip)
continue;
@@ -2119,9 +2187,12 @@ next_rgrp:
return error;
}
/* Flushing the log may release space */
- if (loops == 2)
+ if (loops == 2) {
+ if (ap->min_target)
+ target = ap->min_target;
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_INPLACE_RESERVE);
+ }
}
return -ENOSPC;
@@ -2136,6 +2207,17 @@ next_rgrp:
void gfs2_inplace_release(struct gfs2_inode *ip)
{
+ struct gfs2_blkreserv *rs = &ip->i_res;
+
+ if (rs->rs_reserved) {
+ struct gfs2_rgrpd *rgd = rs->rs_rgd;
+
+ spin_lock(&rgd->rd_rsspin);
+ BUG_ON(rgd->rd_reserved < rs->rs_reserved);
+ rgd->rd_reserved -= rs->rs_reserved;
+ spin_unlock(&rgd->rd_rsspin);
+ rs->rs_reserved = 0;
+ }
if (gfs2_holder_initialized(&ip->i_rgd_gh))
gfs2_glock_dq_uninit(&ip->i_rgd_gh);
}
@@ -2205,7 +2287,7 @@ static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
bi_prev = bi;
}
gfs2_setbit(&rbm, false, new_state);
- gfs2_rbm_incr(&rbm);
+ gfs2_rbm_add(&rbm, 1);
}
}
@@ -2223,11 +2305,12 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
struct gfs2_blkreserv *trs;
const struct rb_node *n;
- gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
+ spin_lock(&rgd->rd_rsspin);
+ gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u q:%u r:%u e:%u\n",
fs_id_buf,
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
- rgd->rd_reserved, rgd->rd_extfail_pt);
+ rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
@@ -2236,7 +2319,6 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
be32_to_cpu(rgl->rl_free),
be32_to_cpu(rgl->rl_dinodes));
}
- spin_lock(&rgd->rd_rsspin);
for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
dump_rs(seq, trs, fs_id_buf);
@@ -2273,29 +2355,29 @@ static void gfs2_adjust_reservation(struct gfs2_inode *ip,
{
struct gfs2_blkreserv *rs = &ip->i_res;
struct gfs2_rgrpd *rgd = rbm->rgd;
- unsigned rlen;
- u64 block;
- int ret;
- spin_lock(&rgd->rd_rsspin);
+ BUG_ON(rs->rs_reserved < len);
+ rs->rs_reserved -= len;
if (gfs2_rs_active(rs)) {
- if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
- block = gfs2_rbm_to_block(rbm);
- ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
- rlen = min(rs->rs_free, len);
- rs->rs_free -= rlen;
- rgd->rd_reserved -= rlen;
+ u64 start = gfs2_rbm_to_block(rbm);
+
+ if (rs->rs_start == start) {
+ unsigned int rlen;
+
+ rs->rs_start += len;
+ rlen = min(rs->rs_requested, len);
+ rs->rs_requested -= rlen;
+ rgd->rd_requested -= rlen;
trace_gfs2_rs(rs, TRACE_RS_CLAIM);
- if (rs->rs_free && !ret)
- goto out;
+ if (rs->rs_start < rgd->rd_data0 + rgd->rd_data &&
+ rs->rs_requested)
+ return;
/* We used up our block reservation, so we should
reserve more blocks next time. */
atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
}
__rs_deltree(rs);
}
-out:
- spin_unlock(&rgd->rd_rsspin);
}
/**
@@ -2315,15 +2397,13 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
u64 goal;
if (gfs2_rs_active(&ip->i_res)) {
- *rbm = ip->i_res.rs_rbm;
- return;
+ goal = ip->i_res.rs_start;
+ } else {
+ if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
+ goal = ip->i_goal;
+ else
+ goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
}
-
- if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
- goal = ip->i_goal;
- else
- goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
-
if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) {
rbm->bii = 0;
rbm->offset = 0;
@@ -2346,17 +2426,21 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
- struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, };
- unsigned int ndata;
+ struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rgd, };
u64 block; /* block, within the file system scope */
- int error;
+ u32 minext = 1;
+ int error = -ENOSPC;
- gfs2_set_alloc_start(&rbm, ip, dinode);
- error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
+ BUG_ON(ip->i_res.rs_reserved < *nblocks);
+ rgrp_lock_local(rbm.rgd);
+ if (gfs2_rs_active(&ip->i_res)) {
+ gfs2_set_alloc_start(&rbm, ip, dinode);
+ error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, &ip->i_res, false);
+ }
if (error == -ENOSPC) {
gfs2_set_alloc_start(&rbm, ip, dinode);
- error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
+ error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, NULL, false);
}
/* Since all blocks are reserved in advance, this shouldn't happen */
@@ -2371,14 +2455,8 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
gfs2_alloc_extent(&rbm, dinode, nblocks);
block = gfs2_rbm_to_block(&rbm);
rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
- if (gfs2_rs_active(&ip->i_res))
- gfs2_adjust_reservation(ip, &rbm, *nblocks);
- ndata = *nblocks;
- if (dinode)
- ndata--;
-
if (!dinode) {
- ip->i_goal = block + ndata - 1;
+ ip->i_goal = block + *nblocks - 1;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error == 0) {
struct gfs2_dinode *di =
@@ -2389,12 +2467,20 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
brelse(dibh);
}
}
- if (rbm.rgd->rd_free < *nblocks) {
+ spin_lock(&rbm.rgd->rd_rsspin);
+ gfs2_adjust_reservation(ip, &rbm, *nblocks);
+ if (rbm.rgd->rd_free < *nblocks || rbm.rgd->rd_reserved < *nblocks) {
fs_warn(sdp, "nblocks=%u\n", *nblocks);
+ spin_unlock(&rbm.rgd->rd_rsspin);
goto rgrp_error;
}
-
+ BUG_ON(rbm.rgd->rd_reserved < *nblocks);
+ BUG_ON(rbm.rgd->rd_free_clone < *nblocks);
+ BUG_ON(rbm.rgd->rd_free < *nblocks);
+ rbm.rgd->rd_reserved -= *nblocks;
+ rbm.rgd->rd_free_clone -= *nblocks;
rbm.rgd->rd_free -= *nblocks;
+ spin_unlock(&rbm.rgd->rd_rsspin);
if (dinode) {
rbm.rgd->rd_dinodes++;
*generation = rbm.rgd->rd_igeneration++;
@@ -2404,6 +2490,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
+ rgrp_unlock_local(rbm.rgd);
gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
if (dinode)
@@ -2411,13 +2498,13 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
- rbm.rgd->rd_free_clone -= *nblocks;
trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
*bn = block;
return 0;
rgrp_error:
+ rgrp_unlock_local(rbm.rgd);
gfs2_rgrp_error(rbm.rgd);
return -EIO;
}
@@ -2437,12 +2524,14 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ rgrp_lock_local(rgd);
rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
rgd->rd_free += blen;
rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+ rgrp_unlock_local(rgd);
/* Directories keep their data in the metadata address space */
if (meta || ip->i_depth || gfs2_is_jdata(ip))
@@ -2478,17 +2567,20 @@ void gfs2_unlink_di(struct inode *inode)
rgd = gfs2_blk2rgrpd(sdp, blkno, true);
if (!rgd)
return;
+ rgrp_lock_local(rgd);
rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
+ rgrp_unlock_local(rgd);
}
void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
+ rgrp_lock_local(rgd);
rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
if (!rgd->rd_dinodes)
gfs2_consist_rgrpd(rgd);
@@ -2497,6 +2589,7 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+ rgrp_unlock_local(rgd);
be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
gfs2_statfs_change(sdp, 0, +1, -1);
@@ -2511,6 +2604,10 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
* @no_addr: The block number to check
* @type: The block type we are looking for
*
+ * The inode glock of @no_addr must be held. The @type to check for is either
+ * GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED; checking for type GFS2_BLKST_FREE
+ * or GFS2_BLKST_USED would make no sense.
+ *
* Returns: 0 if the block type matches the expected type
* -ESTALE if it doesn't match
* or -ve errno if something went wrong while checking
@@ -2534,6 +2631,13 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
rbm.rgd = rgd;
error = gfs2_rbm_from_block(&rbm, no_addr);
if (!WARN_ON_ONCE(error)) {
+ /*
+ * No need to take the local resource group lock here; the
+ * inode glock of @no_addr provides the necessary
+ * synchronization in case the block is an inode. (In case
+ * the block is not an inode, the block type will not match
+ * the @type we are looking for.)
+ */
if (gfs2_testbit(&rbm, false) != type)
error = -ESTALE;
}
@@ -2578,7 +2682,7 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
return;
rgd = gfs2_blk2rgrpd(sdp, block, 1);
} else {
- rgd = ip->i_res.rs_rbm.rgd;
+ rgd = ip->i_res.rs_rgd;
if (!rgd || !rgrp_contains_block(rgd, block))
rgd = gfs2_blk2rgrpd(sdp, block, 1);
}
@@ -2633,9 +2737,8 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
sizeof(struct gfs2_holder),
GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++)
- gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
- LM_ST_EXCLUSIVE, 0,
- &rlist->rl_ghs[x]);
+ gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &rlist->rl_ghs[x]);
}
/**
@@ -2658,3 +2761,14 @@ void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
}
}
+void rgrp_lock_local(struct gfs2_rgrpd *rgd)
+{
+ BUG_ON(!gfs2_glock_is_held_excl(rgd->rd_gl) &&
+ !test_bit(SDF_NORECOVERY, &rgd->rd_sbd->sd_flags));
+ mutex_lock(&rgd->rd_mutex);
+}
+
+void rgrp_unlock_local(struct gfs2_rgrpd *rgd)
+{
+ mutex_unlock(&rgd->rd_mutex);
+}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 9a587ada51ed..a6855fd796e0 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -77,7 +77,7 @@ extern int gfs2_fitrim(struct file *filp, void __user *argp);
/* This is how to tell if a reservation is in the rgrp tree: */
static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
{
- return rs && !RB_EMPTY_NODE(&rs->rs_node);
+ return !RB_EMPTY_NODE(&rs->rs_node);
}
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
@@ -88,4 +88,8 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
}
extern void check_and_update_goal(struct gfs2_inode *ip);
+
+extern void rgrp_lock_local(struct gfs2_rgrpd *rgd);
+extern void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
+
#endif /* __RGRP_DOT_H__ */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2f56acc41c04..97076d3f562f 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -81,19 +81,12 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
{
struct gfs2_jdesc *jd;
- int found = 0;
list_for_each_entry(jd, head, jd_list) {
- if (jd->jd_jid == jid) {
- found = 1;
- break;
- }
+ if (jd->jd_jid == jid)
+ return jd;
}
-
- if (!found)
- jd = NULL;
-
- return jd;
+ return NULL;
}
struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
@@ -165,7 +158,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
struct gfs2_glock *j_gl = ip->i_gl;
- struct gfs2_holder freeze_gh;
struct gfs2_log_header_host head;
int error;
@@ -173,12 +165,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
if (error)
return error;
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT,
- &freeze_gh);
- if (error)
- goto fail_threads;
-
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
if (gfs2_withdrawn(sdp)) {
error = -EIO;
@@ -205,13 +191,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- gfs2_glock_dq_uninit(&freeze_gh);
-
return 0;
fail:
- gfs2_glock_dq_uninit(&freeze_gh);
-fail_threads:
if (sdp->sd_quotad_process)
kthread_stop(sdp->sd_quotad_process);
sdp->sd_quotad_process = NULL;
@@ -452,7 +434,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
}
if (error)
- gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
out:
while (!list_empty(&list)) {
@@ -562,8 +544,6 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
int need_endtrans = 0;
int ret;
- if (!(flags & I_DIRTY_INODE))
- return;
if (unlikely(gfs2_withdrawn(sdp)))
return;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
@@ -607,32 +587,10 @@ out:
* Returns: errno
*/
-int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
{
- struct gfs2_holder freeze_gh;
- int error = 0;
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- gfs2_holder_mark_uninitialized(&freeze_gh);
- if (sdp->sd_freeze_gl &&
- !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
- if (!log_write_allowed) {
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
- LM_ST_SHARED, LM_FLAG_TRY |
- LM_FLAG_NOEXP | GL_EXACT,
- &freeze_gh);
- if (error == GLR_TRYFAILED)
- error = 0;
- } else {
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
- LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT,
- &freeze_gh);
- if (error && !gfs2_withdrawn(sdp))
- return error;
- }
- }
-
gfs2_flush_delete_work(sdp);
if (!log_write_allowed && current == sdp->sd_quotad_process)
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
@@ -652,24 +610,19 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
GFS2_LFC_MAKE_FS_RO);
- wait_event(sdp->sd_reserving_log_wait,
- atomic_read(&sdp->sd_reserving_log) == 0);
- gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) ==
- sdp->sd_jdesc->jd_blocks);
+ wait_event_timeout(sdp->sd_log_waitq,
+ gfs2_log_is_empty(sdp),
+ HZ * 5);
+ gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
} else {
- wait_event_timeout(sdp->sd_reserving_log_wait,
- atomic_read(&sdp->sd_reserving_log) == 0,
+ wait_event_timeout(sdp->sd_log_waitq,
+ gfs2_log_is_empty(sdp),
HZ * 5);
}
- if (gfs2_holder_initialized(&freeze_gh))
- gfs2_glock_dq_uninit(&freeze_gh);
-
gfs2_quota_cleanup(sdp);
if (!log_write_allowed)
sdp->sd_vfs->s_flags |= SB_RDONLY;
-
- return error;
}
/**
@@ -681,7 +634,6 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
static void gfs2_put_super(struct super_block *sb)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
- int error;
struct gfs2_jdesc *jd;
/* No more recovery requests */
@@ -702,9 +654,7 @@ restart:
spin_unlock(&sdp->sd_jindex_spin);
if (!sb_rdonly(sb)) {
- error = gfs2_make_fs_ro(sdp);
- if (error)
- gfs2_io_error(sdp);
+ gfs2_make_fs_ro(sdp);
}
WARN_ON(gfs2_withdrawing(sdp));
@@ -772,10 +722,8 @@ void gfs2_freeze_func(struct work_struct *work)
struct super_block *sb = sdp->sd_vfs;
atomic_inc(&sb->s_active);
- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
- LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
+ error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
if (error) {
- fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
gfs2_assert_withdraw(sdp, 0);
} else {
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
@@ -785,7 +733,7 @@ void gfs2_freeze_func(struct work_struct *work)
error);
gfs2_assert_withdraw(sdp, 0);
}
- gfs2_glock_dq_uninit(&freeze_gh);
+ gfs2_freeze_unlock(&freeze_gh);
}
deactivate_super(sb);
clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
@@ -853,7 +801,7 @@ static int gfs2_unfreeze(struct super_block *sb)
return 0;
}
- gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(&sdp->sd_freeze_gh);
mutex_unlock(&sdp->sd_freeze_mutex);
return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
}
@@ -1229,7 +1177,8 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
goto out_qs;
}
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &gh);
if (error)
goto out_qs;
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index c9fb2a654181..ec4affb33ed5 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -11,6 +11,10 @@
#include <linux/dcache.h>
#include "incore.h"
+/* Supported fs format version range */
+#define GFS2_FS_FORMAT_MIN (1801)
+#define GFS2_FS_FORMAT_MAX (1802)
+
extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
@@ -30,7 +34,7 @@ extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
struct gfs2_inode **ipp);
extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
-extern int gfs2_make_fs_ro(struct gfs2_sbd *sdp);
+extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
@@ -54,7 +58,9 @@ extern struct file_system_type gfs2meta_fs_type;
extern const struct export_operations gfs2_export_ops;
extern const struct super_operations gfs2_super_ops;
extern const struct dentry_operations gfs2_dops;
-extern const struct xattr_handler *gfs2_xattr_handlers[];
+
+extern const struct xattr_handler *gfs2_xattr_handlers_max[];
+extern const struct xattr_handler **gfs2_xattr_handlers_min;
#endif /* __SUPER_DOT_H__ */
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 0b2f858d9a8c..bd6c8e9e49db 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -560,6 +560,7 @@ TRACE_EVENT(gfs2_block_alloc,
__field( u8, block_state )
__field( u64, rd_addr )
__field( u32, rd_free_clone )
+ __field( u32, rd_requested )
__field( u32, rd_reserved )
),
@@ -571,17 +572,20 @@ TRACE_EVENT(gfs2_block_alloc,
__entry->block_state = block_state;
__entry->rd_addr = rgd->rd_addr;
__entry->rd_free_clone = rgd->rd_free_clone;
+ __entry->rd_requested = rgd->rd_requested;
__entry->rd_reserved = rgd->rd_reserved;
),
- TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rr:%lu",
+ TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rq:%u rr:%u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum,
(unsigned long long)__entry->start,
(unsigned long)__entry->len,
block_state_name(__entry->block_state),
(unsigned long long)__entry->rd_addr,
- __entry->rd_free_clone, (unsigned long)__entry->rd_reserved)
+ __entry->rd_free_clone,
+ __entry->rd_requested,
+ __entry->rd_reserved)
);
/* Keep track of multi-block reservations as they are allocated/freed */
@@ -595,33 +599,40 @@ TRACE_EVENT(gfs2_rs,
__field( dev_t, dev )
__field( u64, rd_addr )
__field( u32, rd_free_clone )
+ __field( u32, rd_requested )
__field( u32, rd_reserved )
__field( u64, inum )
__field( u64, start )
- __field( u32, free )
+ __field( u32, requested )
+ __field( u32, reserved )
__field( u8, func )
),
TP_fast_assign(
- __entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev;
- __entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
- __entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
- __entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
+ __entry->dev = rs->rs_rgd->rd_sbd->sd_vfs->s_dev;
+ __entry->rd_addr = rs->rs_rgd->rd_addr;
+ __entry->rd_free_clone = rs->rs_rgd->rd_free_clone;
+ __entry->rd_requested = rs->rs_rgd->rd_requested;
+ __entry->rd_reserved = rs->rs_rgd->rd_reserved;
__entry->inum = container_of(rs, struct gfs2_inode,
i_res)->i_no_addr;
- __entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
- __entry->free = rs->rs_free;
+ __entry->start = rs->rs_start;
+ __entry->requested = rs->rs_requested;
+ __entry->reserved = rs->rs_reserved;
__entry->func = func;
),
- TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu",
+ TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%u rq:%u rr:%u %s q:%u r:%u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum,
(unsigned long long)__entry->start,
(unsigned long long)__entry->rd_addr,
- (unsigned long)__entry->rd_free_clone,
- (unsigned long)__entry->rd_reserved,
- rs_func_name(__entry->func), (unsigned long)__entry->free)
+ __entry->rd_free_clone,
+ __entry->rd_requested,
+ __entry->rd_reserved,
+ rs_func_name(__entry->func),
+ __entry->requested,
+ __entry->reserved)
);
#endif /* _TRACE_GFS2_H */
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 6d4bf7ea7b3b..63fec11ef2ce 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -31,17 +31,17 @@ static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
test_bit(TR_TOUCHED, &tr->tr_flags));
- fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
+ fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
tr->tr_num_buf_new, tr->tr_num_buf_rm,
tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
- tr->tr_num_revoke, tr->tr_num_revoke_rm);
+ tr->tr_num_revoke);
}
-int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
- unsigned int revokes)
+int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
+ unsigned int blocks, unsigned int revokes,
+ unsigned long ip)
{
- struct gfs2_trans *tr;
- int error;
+ unsigned int extra_revokes;
if (current->journal_info) {
gfs2_print_trans(sdp, current->journal_info);
@@ -52,39 +52,72 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
return -EROFS;
- tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
- if (!tr)
- return -ENOMEM;
-
- tr->tr_ip = _RET_IP_;
+ tr->tr_ip = ip;
tr->tr_blocks = blocks;
tr->tr_revokes = revokes;
- tr->tr_reserved = 1;
- set_bit(TR_ALLOCED, &tr->tr_flags);
- if (blocks)
- tr->tr_reserved += 6 + blocks;
- if (revokes)
- tr->tr_reserved += gfs2_struct2blk(sdp, revokes);
+ tr->tr_reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
+ if (blocks) {
+ /*
+ * The reserved blocks are either used for data or metadata.
+ * We can have mixed data and metadata, each with its own log
+ * descriptor block; see calc_reserved().
+ */
+ tr->tr_reserved += blocks + 1 + DIV_ROUND_UP(blocks - 1, databuf_limit(sdp));
+ }
INIT_LIST_HEAD(&tr->tr_databuf);
INIT_LIST_HEAD(&tr->tr_buf);
INIT_LIST_HEAD(&tr->tr_list);
INIT_LIST_HEAD(&tr->tr_ail1_list);
INIT_LIST_HEAD(&tr->tr_ail2_list);
+ if (gfs2_assert_warn(sdp, tr->tr_reserved <= sdp->sd_jdesc->jd_blocks))
+ return -EINVAL;
+
sb_start_intwrite(sdp->sd_vfs);
- error = gfs2_log_reserve(sdp, tr->tr_reserved);
- if (error)
- goto fail;
+ /*
+ * Try the reservations under sd_log_flush_lock to prevent log flushes
+ * from creating inconsistencies between the number of allocated and
+ * reserved revokes. If that fails, do a full-block allocation outside
+ * of the lock to avoid stalling log flushes. Then, allot the
+ * appropriate number of blocks to revokes, use as many revokes locally
+ * as needed, and "release" the surplus into the revokes pool.
+ */
+
+ down_read(&sdp->sd_log_flush_lock);
+ if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
+ goto reserved;
+ up_read(&sdp->sd_log_flush_lock);
+ gfs2_log_reserve(sdp, tr, &extra_revokes);
+ down_read(&sdp->sd_log_flush_lock);
+
+reserved:
+ gfs2_log_release_revokes(sdp, extra_revokes);
+ if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
+ gfs2_log_release_revokes(sdp, tr->tr_revokes);
+ up_read(&sdp->sd_log_flush_lock);
+ gfs2_log_release(sdp, tr->tr_reserved);
+ sb_end_intwrite(sdp->sd_vfs);
+ return -EROFS;
+ }
current->journal_info = tr;
return 0;
+}
-fail:
- sb_end_intwrite(sdp->sd_vfs);
- kmem_cache_free(gfs2_trans_cachep, tr);
+int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
+ unsigned int revokes)
+{
+ struct gfs2_trans *tr;
+ int error;
+ tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
+ if (!tr)
+ return -ENOMEM;
+ error = __gfs2_trans_begin(tr, sdp, blocks, revokes, _RET_IP_);
+ if (error)
+ kmem_cache_free(gfs2_trans_cachep, tr);
return error;
}
@@ -92,37 +125,39 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
{
struct gfs2_trans *tr = current->journal_info;
s64 nbuf;
- int alloced = test_bit(TR_ALLOCED, &tr->tr_flags);
current->journal_info = NULL;
if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
+ gfs2_log_release_revokes(sdp, tr->tr_revokes);
+ up_read(&sdp->sd_log_flush_lock);
gfs2_log_release(sdp, tr->tr_reserved);
- if (alloced) {
+ if (!test_bit(TR_ONSTACK, &tr->tr_flags))
gfs2_trans_free(sdp, tr);
- sb_end_intwrite(sdp->sd_vfs);
- }
+ sb_end_intwrite(sdp->sd_vfs);
return;
}
+ gfs2_log_release_revokes(sdp, tr->tr_revokes - tr->tr_num_revoke);
+
nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
nbuf -= tr->tr_num_buf_rm;
nbuf -= tr->tr_num_databuf_rm;
- if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) &&
- (tr->tr_num_revoke <= tr->tr_revokes)))
+ if (gfs2_assert_withdraw(sdp, nbuf <= tr->tr_blocks) ||
+ gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes))
gfs2_print_trans(sdp, tr);
gfs2_log_commit(sdp, tr);
- if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags))
+ if (!test_bit(TR_ONSTACK, &tr->tr_flags) &&
+ !test_bit(TR_ATTACHED, &tr->tr_flags))
gfs2_trans_free(sdp, tr);
up_read(&sdp->sd_log_flush_lock);
if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_TRANS_END);
- if (alloced)
- sb_end_intwrite(sdp->sd_vfs);
+ sb_end_intwrite(sdp->sd_vfs);
}
static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
@@ -134,6 +169,8 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
bd->bd_bh = bh;
bd->bd_gl = gl;
INIT_LIST_HEAD(&bd->bd_list);
+ INIT_LIST_HEAD(&bd->bd_ail_st_list);
+ INIT_LIST_HEAD(&bd->bd_ail_gl_list);
bh->b_private = bd;
return bd;
}
@@ -262,7 +299,6 @@ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
{
struct gfs2_bufdata *bd, *tmp;
- struct gfs2_trans *tr = current->journal_info;
unsigned int n = len;
gfs2_log_lock(sdp);
@@ -274,7 +310,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
if (bd->bd_gl)
gfs2_glock_remove_revoke(bd->bd_gl);
kmem_cache_free(gfs2_bufdata_cachep, bd);
- tr->tr_num_revoke_rm++;
+ gfs2_log_release_revokes(sdp, 1);
if (--n == 0)
break;
}
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index 83199ce5a5c5..c76ad9a4c75a 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -27,13 +27,16 @@ struct gfs2_glock;
* block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested)
{
- struct gfs2_rgrpd *rgd = ip->i_res.rs_rbm.rgd;
+ struct gfs2_rgrpd *rgd = ip->i_res.rs_rgd;
if (requested < rgd->rd_length)
return requested + 1;
return rgd->rd_length;
}
+extern int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
+ unsigned int blocks, unsigned int revokes,
+ unsigned long ip);
extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
unsigned int revokes);
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index a374397f4273..4f034b87b427 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -91,18 +91,50 @@ out_unlock:
return error;
}
+/**
+ * gfs2_freeze_lock - hold the freeze glock
+ * @sdp: the superblock
+ * @freeze_gh: pointer to the requested holder
+ * @caller_flags: any additional flags needed by the caller
+ */
+int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
+ int caller_flags)
+{
+ int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
+ int error;
+
+ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
+ freeze_gh);
+ if (error && error != GLR_TRYFAILED)
+ fs_err(sdp, "can't lock the freeze lock: %d\n", error);
+ return error;
+}
+
+void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
+{
+ if (gfs2_holder_initialized(freeze_gh))
+ gfs2_glock_dq_uninit(freeze_gh);
+}
+
static void signal_our_withdraw(struct gfs2_sbd *sdp)
{
- struct gfs2_glock *gl = sdp->sd_live_gh.gh_gl;
- struct inode *inode = sdp->sd_jdesc->jd_inode;
- struct gfs2_inode *ip = GFS2_I(inode);
- u64 no_formal_ino = ip->i_no_formal_ino;
+ struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
+ struct inode *inode;
+ struct gfs2_inode *ip;
+ struct gfs2_glock *i_gl;
+ u64 no_formal_ino;
+ int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
int ret = 0;
int tries;
- if (test_bit(SDF_NORECOVERY, &sdp->sd_flags))
+ if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
return;
+ inode = sdp->sd_jdesc->jd_inode;
+ ip = GFS2_I(inode);
+ i_gl = ip->i_gl;
+ no_formal_ino = ip->i_no_formal_ino;
+
/* Prevent any glock dq until withdraw recovery is complete */
set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
/*
@@ -117,8 +149,21 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
* therefore we need to clear SDF_JOURNAL_LIVE manually.
*/
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- if (!sb_rdonly(sdp->sd_vfs))
- ret = gfs2_make_fs_ro(sdp);
+ if (!sb_rdonly(sdp->sd_vfs)) {
+ struct gfs2_holder freeze_gh;
+
+ gfs2_holder_mark_uninitialized(&freeze_gh);
+ if (sdp->sd_freeze_gl &&
+ !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
+ ret = gfs2_freeze_lock(sdp, &freeze_gh,
+ log_write_allowed ? 0 : LM_FLAG_TRY);
+ if (ret == GLR_TRYFAILED)
+ ret = 0;
+ }
+ if (!ret)
+ gfs2_make_fs_ro(sdp);
+ gfs2_freeze_unlock(&freeze_gh);
+ }
if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
if (!ret)
@@ -141,7 +186,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
thaw_super(sdp->sd_vfs);
} else {
- wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
+ TASK_UNINTERRUPTIBLE);
}
/*
@@ -161,15 +207,15 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
* on other nodes to be successful, otherwise we remain the owner of
* the glock as far as dlm is concerned.
*/
- if (gl->gl_ops->go_free) {
- set_bit(GLF_FREEING, &gl->gl_flags);
- wait_on_bit(&gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
+ if (i_gl->gl_ops->go_free) {
+ set_bit(GLF_FREEING, &i_gl->gl_flags);
+ wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
}
/*
* Dequeue the "live" glock, but keep a reference so it's never freed.
*/
- gfs2_glock_hold(gl);
+ gfs2_glock_hold(live_gl);
gfs2_glock_dq_wait(&sdp->sd_live_gh);
/*
* We enqueue the "live" glock in EX so that all other nodes
@@ -208,7 +254,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
gfs2_glock_nq(&sdp->sd_live_gh);
}
- gfs2_glock_queue_put(gl); /* drop the extra reference we acquired */
+ gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
/*
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index a4443dd8a94b..69e1a0ae5a4d 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -149,6 +149,9 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
bool verbose);
+extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
+ struct gfs2_holder *freeze_gh, int caller_flags);
+extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
#define gfs2_io_error(sdp) \
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 9d7667bc4292..124b3d5a7266 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -70,6 +70,20 @@ static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
return 0;
}
+static bool gfs2_eatype_valid(struct gfs2_sbd *sdp, u8 type)
+{
+ switch(sdp->sd_sb.sb_fs_format) {
+ case GFS2_FS_FORMAT_MAX:
+ return true;
+
+ case GFS2_FS_FORMAT_MIN:
+ return type <= GFS2_EATYPE_SECURITY;
+
+ default:
+ return false;
+ }
+}
+
typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, void *private);
@@ -77,6 +91,7 @@ typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
ea_call_t ea_call, void *data)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_ea_header *ea, *prev = NULL;
int error = 0;
@@ -89,9 +104,8 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
bh->b_data + bh->b_size))
goto fail;
- if (!GFS2_EATYPE_VALID(ea->ea_type))
+ if (!gfs2_eatype_valid(sdp, ea->ea_type))
goto fail;
-
error = ea_call(ip, bh, ea, prev, data);
if (error)
return error;
@@ -259,7 +273,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
return -EIO;
}
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &rg_gh);
if (error)
return error;
@@ -344,6 +359,7 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
void *private)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct ea_list *ei = private;
struct gfs2_ea_request *er = ei->ei_er;
unsigned int ea_size;
@@ -353,6 +369,8 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
if (ea->ea_type == GFS2_EATYPE_UNUSED)
return 0;
+ BUG_ON(ea->ea_type > GFS2_EATYPE_SECURITY &&
+ sdp->sd_sb.sb_fs_format == GFS2_FS_FORMAT_MIN);
switch (ea->ea_type) {
case GFS2_EATYPE_USR:
prefix = "user.";
@@ -366,8 +384,12 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
prefix = "security.";
l = 9;
break;
+ case GFS2_EATYPE_TRUSTED:
+ prefix = "trusted.";
+ l = 8;
+ break;
default:
- BUG();
+ return 0;
}
ea_size = l + ea->ea_name_len + 1;
@@ -1214,6 +1236,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name,
}
static int gfs2_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -1385,7 +1408,8 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
return -EIO;
}
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ LM_FLAG_NODE_SCOPE, &gh);
if (error)
return error;
@@ -1463,7 +1487,25 @@ static const struct xattr_handler gfs2_xattr_security_handler = {
.set = gfs2_xattr_set,
};
-const struct xattr_handler *gfs2_xattr_handlers[] = {
+static bool
+gfs2_xattr_trusted_list(struct dentry *dentry)
+{
+ return capable(CAP_SYS_ADMIN);
+}
+
+static const struct xattr_handler gfs2_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .flags = GFS2_EATYPE_TRUSTED,
+ .list = gfs2_xattr_trusted_list,
+ .get = gfs2_xattr_get,
+ .set = gfs2_xattr_set,
+};
+
+const struct xattr_handler *gfs2_xattr_handlers_max[] = {
+ /* GFS2_FS_FORMAT_MAX */
+ &gfs2_xattr_trusted_handler,
+
+ /* GFS2_FS_FORMAT_MIN */
&gfs2_xattr_user_handler,
&gfs2_xattr_security_handler,
&posix_acl_access_xattr_handler,
@@ -1471,3 +1513,4 @@ const struct xattr_handler *gfs2_xattr_handlers[] = {
NULL,
};
+const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 74fa62643136..2bd54efaf416 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -121,6 +121,7 @@ static int hfs_xattr_get(const struct xattr_handler *handler,
}
static int hfs_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value, size_t size,
int flags)
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 3bf2ae0e467c..527f6e46cbe8 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -189,8 +189,8 @@ static int hfs_dir_release(struct inode *inode, struct file *file)
* a directory and return a corresponding inode, given the inode for
* the directory and the name (and its length) of the new file.
*/
-static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int hfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode;
int res;
@@ -219,7 +219,8 @@ static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
* in a directory, given the inode for the parent directory and the
* name (and its length) of the new directory.
*/
-static int hfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int hfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int res;
@@ -279,9 +280,9 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry)
* new file/directory.
* XXX: how do you handle must_be dir?
*/
-static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int hfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
int res;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index f71c384064c8..b8eb0322a3e5 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -204,7 +204,8 @@ extern const struct address_space_operations hfs_btree_aops;
extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
extern int hfs_write_inode(struct inode *, struct writeback_control *);
-extern int hfs_inode_setattr(struct dentry *, struct iattr *);
+extern int hfs_inode_setattr(struct user_namespace *, struct dentry *,
+ struct iattr *);
extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
__be32 log_size, __be32 phys_size, u32 clump_size);
extern struct inode *hfs_iget(struct super_block *, struct hfs_cat_key *, hfs_cat_rec *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index f35a37c65e5f..3fc5cb346586 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -602,13 +602,15 @@ static int hfs_file_release(struct inode *inode, struct file *file)
* correspond to the same HFS file.
*/
-int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
+int hfs_inode_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
int error;
- error = setattr_prepare(dentry, attr); /* basic permission checks */
+ error = setattr_prepare(&init_user_ns, dentry,
+ attr); /* basic permission checks */
if (error)
return error;
@@ -647,7 +649,7 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
current_time(inode);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 29a9dcfbe81f..03e6c046faf4 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -434,8 +434,8 @@ out:
return res;
}
-static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int hfsplus_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
@@ -476,8 +476,8 @@ out:
return res;
}
-static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int hfsplus_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
struct inode *inode;
@@ -517,18 +517,20 @@ out:
return res;
}
-static int hfsplus_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int hfsplus_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
- return hfsplus_mknod(dir, dentry, mode, 0);
+ return hfsplus_mknod(&init_user_ns, dir, dentry, mode, 0);
}
-static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int hfsplus_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0);
+ return hfsplus_mknod(&init_user_ns, dir, dentry, mode | S_IFDIR, 0);
}
-static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
+static int hfsplus_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index a92de5199ec3..12b20479ed2b 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -488,8 +488,9 @@ void hfsplus_inode_write_fork(struct inode *inode,
struct hfsplus_fork_raw *fork);
int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd);
int hfsplus_cat_write_inode(struct inode *inode);
-int hfsplus_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags);
+int hfsplus_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags);
int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index e3da9e96b835..078c5c8a5156 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -241,12 +241,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
return 0;
}
-static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
+static int hfsplus_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
@@ -264,14 +265,15 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_mtime = inode->i_ctime = current_time(inode);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
-int hfsplus_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int hfsplus_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
@@ -286,7 +288,7 @@ int hfsplus_getattr(const struct path *path, struct kstat *stat,
stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP;
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
return 0;
}
@@ -340,7 +342,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
}
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ blkdev_issue_flush(inode->i_sb->s_bdev);
inode_unlock(inode);
@@ -376,7 +378,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
return NULL;
inode->i_ino = sbi->next_cnid++;
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
set_nlink(inode, 1);
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index ce15b9496b77..3edb1926d127 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -91,7 +91,7 @@ static int hfsplus_ioctl_setflags(struct file *file, int __user *user_flags)
if (err)
goto out;
- if (!inode_owner_or_capable(inode)) {
+ if (!inode_owner_or_capable(&init_user_ns, inode)) {
err = -EACCES;
goto out_drop_write;
}
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 807119ae5adf..b9e3db3f855f 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -239,7 +239,7 @@ out:
mutex_unlock(&sbi->vh_mutex);
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
- blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
+ blkdev_issue_flush(sb->s_bdev);
return error;
}
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index bb0b27d88e50..4d169c5a2673 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -858,6 +858,7 @@ static int hfsplus_osx_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index cfbe6a3bfb1e..c1c7a16cbf21 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -23,6 +23,7 @@ static int hfsplus_security_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_security_setxattr(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
index fbad91e1dada..e150372ec564 100644
--- a/fs/hfsplus/xattr_trusted.c
+++ b/fs/hfsplus/xattr_trusted.c
@@ -22,6 +22,7 @@ static int hfsplus_trusted_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_trusted_setxattr(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
index 74d19faf255e..a6b60b153916 100644
--- a/fs/hfsplus/xattr_user.c
+++ b/fs/hfsplus/xattr_user.c
@@ -22,6 +22,7 @@ static int hfsplus_user_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_user_setxattr(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index aea35459d390..29e407762626 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -34,6 +34,8 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
#define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file))
+static struct kmem_cache *hostfs_inode_cache;
+
/* Changed in hostfs_args before the kernel starts running */
static char *root_ino = "";
static int append = 0;
@@ -221,7 +223,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
{
struct hostfs_inode_info *hi;
- hi = kmalloc(sizeof(*hi), GFP_KERNEL_ACCOUNT);
+ hi = kmem_cache_alloc(hostfs_inode_cache, GFP_KERNEL_ACCOUNT);
if (hi == NULL)
return NULL;
hi->fd = -1;
@@ -243,7 +245,7 @@ static void hostfs_evict_inode(struct inode *inode)
static void hostfs_free_inode(struct inode *inode)
{
- kfree(HOSTFS_I(inode));
+ kmem_cache_free(hostfs_inode_cache, HOSTFS_I(inode));
}
static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
@@ -555,8 +557,8 @@ static int read_name(struct inode *ino, char *name)
return 0;
}
-static int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int hostfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode;
char *name;
@@ -654,8 +656,8 @@ static int hostfs_unlink(struct inode *ino, struct dentry *dentry)
return err;
}
-static int hostfs_symlink(struct inode *ino, struct dentry *dentry,
- const char *to)
+static int hostfs_symlink(struct user_namespace *mnt_userns, struct inode *ino,
+ struct dentry *dentry, const char *to)
{
char *file;
int err;
@@ -667,7 +669,8 @@ static int hostfs_symlink(struct inode *ino, struct dentry *dentry,
return err;
}
-static int hostfs_mkdir(struct inode *ino, struct dentry *dentry, umode_t mode)
+static int hostfs_mkdir(struct user_namespace *mnt_userns, struct inode *ino,
+ struct dentry *dentry, umode_t mode)
{
char *file;
int err;
@@ -691,7 +694,8 @@ static int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
return err;
}
-static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+static int hostfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
char *name;
@@ -729,7 +733,8 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
return err;
}
-static int hostfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
+static int hostfs_rename2(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -757,7 +762,8 @@ static int hostfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
-static int hostfs_permission(struct inode *ino, int desired)
+static int hostfs_permission(struct user_namespace *mnt_userns,
+ struct inode *ino, int desired)
{
char *name;
int r = 0, w = 0, x = 0, err;
@@ -779,11 +785,12 @@ static int hostfs_permission(struct inode *ino, int desired)
err = access_file(name, r, w, x);
__putname(name);
if (!err)
- err = generic_permission(ino, desired);
+ err = generic_permission(&init_user_ns, ino, desired);
return err;
}
-static int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
+static int hostfs_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct hostfs_iattr attrs;
@@ -792,7 +799,7 @@ static int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
int fd = HOSTFS_I(inode)->fd;
- err = setattr_prepare(dentry, attr);
+ err = setattr_prepare(&init_user_ns, dentry, attr);
if (err)
return err;
@@ -849,7 +856,7 @@ static int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_size != i_size_read(inode))
truncate_setsize(inode, attr->ia_size);
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -986,12 +993,16 @@ MODULE_ALIAS_FS("hostfs");
static int __init init_hostfs(void)
{
+ hostfs_inode_cache = KMEM_CACHE(hostfs_inode_info, 0);
+ if (!hostfs_inode_cache)
+ return -ENOMEM;
return register_filesystem(&hostfs_type);
}
static void __exit exit_hostfs(void)
{
unregister_filesystem(&hostfs_type);
+ kmem_cache_destroy(hostfs_inode_cache);
}
module_init(init_hostfs)
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 1cca83218fb5..167ec6884642 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -280,7 +280,7 @@ void hpfs_init_inode(struct inode *);
void hpfs_read_inode(struct inode *);
void hpfs_write_inode(struct inode *);
void hpfs_write_inode_nolock(struct inode *);
-int hpfs_setattr(struct dentry *, struct iattr *);
+int hpfs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
void hpfs_write_if_changed(struct inode *);
void hpfs_evict_inode(struct inode *);
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index eb8b4baf0f2e..82208cc28ebd 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -257,7 +257,8 @@ void hpfs_write_inode_nolock(struct inode *i)
brelse(bh);
}
-int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
+int hpfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error = -EINVAL;
@@ -274,7 +275,7 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
goto out_unlock;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
goto out_unlock;
@@ -288,7 +289,7 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
hpfs_truncate(inode);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
hpfs_write_inode(inode);
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 1aee39160ac5..d73f8a67168e 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -20,7 +20,8 @@ static void hpfs_update_directory_times(struct inode *dir)
hpfs_write_inode_nolock(dir);
}
-static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int hpfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -128,7 +129,8 @@ bail:
return err;
}
-static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
+static int hpfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -215,7 +217,8 @@ bail:
return err;
}
-static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
+static int hpfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -289,7 +292,8 @@ bail:
return err;
}
-static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink)
+static int hpfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symlink)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -506,10 +510,10 @@ fail:
const struct address_space_operations hpfs_symlink_aops = {
.readpage = hpfs_symlink_readpage
};
-
-static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+
+static int hpfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
const unsigned char *old_name = old_dentry->d_name.name;
unsigned old_len = old_dentry->d_name.len;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 21c20fd5f9ee..701c82c36138 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -171,7 +171,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
ret = -ENOMEM;
- if (hugetlb_reserve_pages(inode,
+ if (!hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma,
vma->vm_flags))
@@ -310,7 +310,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
/*
* Support for read() - Find the page attached to f_mapping and copy out the
- * data. Its *very* similar to do_generic_mapping_read(), we can't use that
+ * data. Its *very* similar to generic_file_buffered_read(), we can't use that
* since it has PAGE_SIZE assumptions.
*/
static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -442,15 +442,15 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
*
* truncation is indicated by end of range being LLONG_MAX
* In this case, we first scan the range and release found pages.
- * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
+ * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
* maps and global counts. Page faults can not race with truncation
* in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
* page faults in the truncated range by checking i_size. i_size is
* modified while holding i_mmap_rwsem.
* hole punch is indicated if end is not LLONG_MAX
* In the hole punch case we scan the range and release found pages.
- * Only when releasing a page is the associated region/reserv map
- * deleted. The region/reserv map for ranges without associated
+ * Only when releasing a page is the associated region/reserve map
+ * deleted. The region/reserve map for ranges without associated
* pages are not modified. Page faults can race with hole punch.
* This is indicated if we find a mapped page.
* Note: If the passed end of range value is beyond the end of file, but
@@ -567,7 +567,7 @@ static void hugetlbfs_evict_inode(struct inode *inode)
clear_inode(inode);
}
-static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
+static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
{
pgoff_t pgoff;
struct address_space *mapping = inode->i_mapping;
@@ -582,7 +582,6 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
i_mmap_unlock_write(mapping);
remove_inode_hugepages(inode, offset, LLONG_MAX);
- return 0;
}
static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
@@ -604,7 +603,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
inode_lock(inode);
- /* protected by i_mutex */
+ /* protected by i_rwsem */
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
inode_unlock(inode);
return -EPERM;
@@ -680,7 +679,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
*/
struct page *page;
unsigned long addr;
- int avoid_reserve = 0;
cond_resched();
@@ -716,8 +714,15 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
continue;
}
- /* Allocate page and add to page cache */
- page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
+ /*
+ * Allocate page without setting the avoid_reserve argument.
+ * There certainly are no reserves associated with the
+ * pseudo_vma. However, there could be shared mappings with
+ * reserves for the file at the inode level. If we fallocate
+ * pages in these areas, we need to consume the reserves
+ * to keep reservation accounting consistent.
+ */
+ page = alloc_huge_page(&pseudo_vma, addr, 0);
hugetlb_drop_vma_policy(&pseudo_vma);
if (IS_ERR(page)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
@@ -735,7 +740,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- set_page_huge_active(page);
+ SetHPageMigratable(page);
/*
* unlock_page because locked by add_to_page_cache()
* put_page() due to reference from alloc_huge_page()
@@ -752,7 +757,8 @@ out:
return error;
}
-static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
+static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct hstate *h = hstate_inode(inode);
@@ -760,9 +766,7 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
unsigned int ia_valid = attr->ia_valid;
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
- BUG_ON(!inode);
-
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
@@ -772,16 +776,14 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
if (newsize & ~huge_page_mask(h))
return -EINVAL;
- /* protected by i_mutex */
+ /* protected by i_rwsem */
if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
(newsize > oldsize && (info->seals & F_SEAL_GROW)))
return -EPERM;
- error = hugetlb_vmtruncate(inode, newsize);
- if (error)
- return error;
+ hugetlb_vmtruncate(inode, newsize);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -837,7 +839,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
inode->i_ino = get_next_ino();
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
&hugetlbfs_i_mmap_rwsem_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
@@ -899,33 +901,39 @@ static int do_hugetlbfs_mknod(struct inode *dir,
return error;
}
-static int hugetlbfs_mknod(struct inode *dir,
- struct dentry *dentry, umode_t mode, dev_t dev)
+static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
{
return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
}
-static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
+ int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
+ mode | S_IFDIR, 0);
if (!retval)
inc_nlink(dir);
return retval;
}
-static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
+static int hugetlbfs_create(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
{
- return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
+ return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
}
-static int hugetlbfs_tmpfile(struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
}
-static int hugetlbfs_symlink(struct inode *dir,
- struct dentry *dentry, const char *symname)
+static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
+ const char *symname)
{
struct inode *inode;
int error = -ENOSPC;
@@ -945,17 +953,6 @@ static int hugetlbfs_symlink(struct inode *dir,
return error;
}
-/*
- * mark the head page dirty
- */
-static int hugetlbfs_set_page_dirty(struct page *page)
-{
- struct page *head = compound_head(page);
-
- SetPageDirty(head);
- return 0;
-}
-
static int hugetlbfs_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page,
enum migrate_mode mode)
@@ -966,15 +963,9 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
- /*
- * page_private is subpool pointer in hugetlb pages. Transfer to
- * new page. PagePrivate is not associated with page_private for
- * hugetlb pages and can not be set here as only page_huge_active
- * pages can be migrated.
- */
- if (page_private(page)) {
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
+ if (hugetlb_page_subpool(page)) {
+ hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
+ hugetlb_set_page_subpool(page, NULL);
}
if (mode != MIGRATE_SYNC_NO_COPY)
@@ -1149,7 +1140,7 @@ static void hugetlbfs_destroy_inode(struct inode *inode)
static const struct address_space_operations hugetlbfs_aops = {
.write_begin = hugetlbfs_write_begin,
.write_end = hugetlbfs_write_end,
- .set_page_dirty = hugetlbfs_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_no_writeback,
.migratepage = hugetlbfs_migrate_page,
.error_remove_page = hugetlbfs_error_remove_page,
};
@@ -1349,7 +1340,7 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
/*
* Allocate and initialize subpool if maximum or minimum size is
- * specified. Any needed reservations (for minimim size) are taken
+ * specified. Any needed reservations (for minimum size) are taken
* taken when the subpool is created.
*/
if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
@@ -1492,7 +1483,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
inode->i_size = size;
clear_nlink(inode);
- if (hugetlb_reserve_pages(inode, 0,
+ if (!hugetlb_reserve_pages(inode, 0,
size >> huge_page_shift(hstate_inode(inode)), NULL,
acctflag))
file = ERR_PTR(-ENOMEM);
@@ -1526,8 +1517,8 @@ static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
put_fs_context(fc);
}
if (IS_ERR(mnt))
- pr_err("Cannot mount internal hugetlbfs for page size %uK",
- 1U << (h->order + PAGE_SHIFT - 10));
+ pr_err("Cannot mount internal hugetlbfs for page size %luK",
+ huge_page_size(h) >> 10);
return mnt;
}
@@ -1555,7 +1546,7 @@ static int __init init_hugetlbfs_fs(void)
goto out_free;
/* default hstate mount is required */
- mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
+ mnt = mount_one_hugetlbfs(&default_hstate);
if (IS_ERR(mnt)) {
error = PTR_ERR(mnt);
goto out_unreg;
diff --git a/fs/init.c b/fs/init.c
index e9c320a48cf1..5c36adaa9b44 100644
--- a/fs/init.c
+++ b/fs/init.c
@@ -49,7 +49,7 @@ int __init init_chdir(const char *filename)
error = kern_path(filename, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
if (error)
return error;
- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+ error = path_permission(&path, MAY_EXEC | MAY_CHDIR);
if (!error)
set_fs_pwd(current->fs, &path);
path_put(&path);
@@ -64,7 +64,7 @@ int __init init_chroot(const char *filename)
error = kern_path(filename, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
if (error)
return error;
- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+ error = path_permission(&path, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
error = -EPERM;
@@ -118,7 +118,7 @@ int __init init_eaccess(const char *filename)
error = kern_path(filename, LOOKUP_FOLLOW, &path);
if (error)
return error;
- error = inode_permission(d_inode(path.dentry), MAY_ACCESS);
+ error = path_permission(&path, MAY_ACCESS);
path_put(&path);
return error;
}
@@ -157,8 +157,8 @@ int __init init_mknod(const char *filename, umode_t mode, unsigned int dev)
mode &= ~current_umask();
error = security_path_mknod(&path, dentry, mode, dev);
if (!error)
- error = vfs_mknod(path.dentry->d_inode, dentry, mode,
- new_decode_dev(dev));
+ error = vfs_mknod(mnt_user_ns(path.mnt), path.dentry->d_inode,
+ dentry, mode, new_decode_dev(dev));
done_path_create(&path, dentry);
return error;
}
@@ -167,6 +167,7 @@ int __init init_link(const char *oldname, const char *newname)
{
struct dentry *new_dentry;
struct path old_path, new_path;
+ struct user_namespace *mnt_userns;
int error;
error = kern_path(oldname, 0, &old_path);
@@ -181,14 +182,15 @@ int __init init_link(const char *oldname, const char *newname)
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
- error = may_linkat(&old_path);
+ mnt_userns = mnt_user_ns(new_path.mnt);
+ error = may_linkat(mnt_userns, &old_path);
if (unlikely(error))
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_dput;
- error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry,
- NULL);
+ error = vfs_link(old_path.dentry, mnt_userns, new_path.dentry->d_inode,
+ new_dentry, NULL);
out_dput:
done_path_create(&new_path, new_dentry);
out:
@@ -207,7 +209,8 @@ int __init init_symlink(const char *oldname, const char *newname)
return PTR_ERR(dentry);
error = security_path_symlink(&path, dentry, oldname);
if (!error)
- error = vfs_symlink(path.dentry->d_inode, dentry, oldname);
+ error = vfs_symlink(mnt_user_ns(path.mnt), path.dentry->d_inode,
+ dentry, oldname);
done_path_create(&path, dentry);
return error;
}
@@ -230,7 +233,8 @@ int __init init_mkdir(const char *pathname, umode_t mode)
mode &= ~current_umask();
error = security_path_mkdir(&path, dentry, mode);
if (!error)
- error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ error = vfs_mkdir(mnt_user_ns(path.mnt), path.dentry->d_inode,
+ dentry, mode);
done_path_create(&path, dentry);
return error;
}
diff --git a/fs/inode.c b/fs/inode.c
index 6442d97d9a4a..a047ab306f9a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -142,6 +142,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
inode->i_fop = &no_open_fops;
+ inode->i_ino = 0;
inode->__i_nlink = 1;
inode->i_opflags = 0;
if (sb->s_xattr)
@@ -1493,7 +1494,7 @@ struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
EXPORT_SYMBOL(find_inode_rcu);
/**
- * find_inode_by_rcu - Find an inode in the inode cache
+ * find_inode_by_ino_rcu - Find an inode in the inode cache
* @sb: Super block of file system to search
* @ino: The inode number to match
*
@@ -1743,24 +1744,26 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
{
- int iflags = I_DIRTY_TIME;
- bool dirty = false;
-
- if (flags & S_ATIME)
- inode->i_atime = *time;
- if (flags & S_VERSION)
- dirty = inode_maybe_inc_iversion(inode, false);
- if (flags & S_CTIME)
- inode->i_ctime = *time;
- if (flags & S_MTIME)
- inode->i_mtime = *time;
- if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
- !(inode->i_sb->s_flags & SB_LAZYTIME))
- dirty = true;
-
- if (dirty)
- iflags |= I_DIRTY_SYNC;
- __mark_inode_dirty(inode, iflags);
+ int dirty_flags = 0;
+
+ if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
+ if (flags & S_ATIME)
+ inode->i_atime = *time;
+ if (flags & S_CTIME)
+ inode->i_ctime = *time;
+ if (flags & S_MTIME)
+ inode->i_mtime = *time;
+
+ if (inode->i_sb->s_flags & SB_LAZYTIME)
+ dirty_flags |= I_DIRTY_TIME;
+ else
+ dirty_flags |= I_DIRTY_SYNC;
+ }
+
+ if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
+ dirty_flags |= I_DIRTY_SYNC;
+
+ __mark_inode_dirty(inode, dirty_flags);
return 0;
}
EXPORT_SYMBOL(generic_update_time);
@@ -1777,7 +1780,7 @@ static int update_time(struct inode *inode, struct timespec64 *time, int flags)
}
/**
- * touch_atime - update the access time
+ * atime_needs_update - update the access time
* @path: the &struct path to update
* @inode: inode to update
*
@@ -1796,7 +1799,7 @@ bool atime_needs_update(const struct path *path, struct inode *inode)
/* Atime updates will likely cause i_uid and i_gid to be written
* back improprely if their true value is unknown to the vfs.
*/
- if (HAS_UNMAPPED_ID(inode))
+ if (HAS_UNMAPPED_ID(mnt_user_ns(mnt), inode))
return false;
if (IS_NOATIME(inode))
@@ -1903,7 +1906,8 @@ int dentry_needs_remove_privs(struct dentry *dentry)
return mask;
}
-static int __remove_privs(struct dentry *dentry, int kill)
+static int __remove_privs(struct user_namespace *mnt_userns,
+ struct dentry *dentry, int kill)
{
struct iattr newattrs;
@@ -1912,7 +1916,7 @@ static int __remove_privs(struct dentry *dentry, int kill)
* Note we call this on write, so notify_change will not
* encounter any conflicting delegations:
*/
- return notify_change(dentry, &newattrs, NULL);
+ return notify_change(mnt_userns, dentry, &newattrs, NULL);
}
/*
@@ -1939,7 +1943,7 @@ int file_remove_privs(struct file *file)
if (kill < 0)
return kill;
if (kill)
- error = __remove_privs(dentry, kill);
+ error = __remove_privs(file_mnt_user_ns(file), dentry, kill);
if (!error)
inode_has_no_xattr(inode);
@@ -2130,14 +2134,21 @@ EXPORT_SYMBOL(init_special_inode);
/**
* inode_init_owner - Init uid,gid,mode for new inode according to posix standards
+ * @mnt_userns: User namespace of the mount the inode was created from
* @inode: New inode
* @dir: Directory inode
* @mode: mode of the new inode
+ *
+ * If the inode has been created through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions
+ * and initializing i_uid and i_gid. On non-idmapped mounts or if permission
+ * checking is to be performed on the raw inode simply passs init_user_ns.
*/
-void inode_init_owner(struct inode *inode, const struct inode *dir,
- umode_t mode)
+void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct inode *dir, umode_t mode)
{
- inode->i_uid = current_fsuid();
+ inode->i_uid = fsuid_into_mnt(mnt_userns);
if (dir && dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
@@ -2145,31 +2156,41 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
if (S_ISDIR(mode))
mode |= S_ISGID;
else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
- !in_group_p(inode->i_gid) &&
- !capable_wrt_inode_uidgid(dir, CAP_FSETID))
+ !in_group_p(i_gid_into_mnt(mnt_userns, dir)) &&
+ !capable_wrt_inode_uidgid(mnt_userns, dir, CAP_FSETID))
mode &= ~S_ISGID;
} else
- inode->i_gid = current_fsgid();
+ inode->i_gid = fsgid_into_mnt(mnt_userns);
inode->i_mode = mode;
}
EXPORT_SYMBOL(inode_init_owner);
/**
* inode_owner_or_capable - check current task permissions to inode
+ * @mnt_userns: user namespace of the mount the inode was found from
* @inode: inode being checked
*
* Return true if current either has CAP_FOWNER in a namespace with the
* inode owner uid mapped, or owns the file.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
*/
-bool inode_owner_or_capable(const struct inode *inode)
+bool inode_owner_or_capable(struct user_namespace *mnt_userns,
+ const struct inode *inode)
{
+ kuid_t i_uid;
struct user_namespace *ns;
- if (uid_eq(current_fsuid(), inode->i_uid))
+ i_uid = i_uid_into_mnt(mnt_userns, inode);
+ if (uid_eq(current_fsuid(), i_uid))
return true;
ns = current_user_ns();
- if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
+ if (kuid_has_mapping(ns, i_uid) && ns_capable(ns, CAP_FOWNER))
return true;
return false;
}
diff --git a/fs/internal.h b/fs/internal.h
index 77c50befbfbe..6aeae7ef3380 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -15,6 +15,7 @@ struct mount;
struct shrink_control;
struct fs_context;
struct user_namespace;
+struct pipe_inode_info;
/*
* block_dev.c
@@ -73,7 +74,7 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct path *);
long do_rmdir(int dfd, struct filename *name);
long do_unlinkat(int dfd, struct filename *name);
-int may_linkat(struct path *link);
+int may_linkat(struct user_namespace *mnt_userns, struct path *link);
int do_renameat2(int olddfd, struct filename *oldname, int newdfd,
struct filename *newname, unsigned int flags);
@@ -132,6 +133,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
const char *, const struct open_flags *);
extern struct open_how build_open_how(int flags, umode_t mode);
extern int build_open_flags(const struct open_how *how, struct open_flags *op);
+extern int __close_fd_get_file(unsigned int fd, struct file **res);
long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
int chmod_common(const struct path *path, umode_t mode);
@@ -193,3 +195,11 @@ int sb_init_dio_done_wq(struct super_block *sb);
*/
int do_statx(int dfd, const char __user *filename, unsigned flags,
unsigned int mask, struct statx __user *buffer);
+
+/*
+ * fs/splice.c:
+ */
+long splice_file_to_pipe(struct file *in,
+ struct pipe_inode_info *opipe,
+ loff_t *offset,
+ size_t len, unsigned int flags);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index a564f36e260c..3dc10bfd8c3b 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -13,13 +13,10 @@
#include <linux/sched/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <linux/kthread.h>
#include <linux/rculist_nulls.h>
-#include <linux/fs_struct.h>
-#include <linux/task_work.h>
-#include <linux/blk-cgroup.h>
-#include <linux/audit.h>
#include <linux/cpu.h>
+#include <linux/tracehook.h>
+#include <linux/freezer.h>
#include "../kernel/sched/sched.h"
#include "io-wq.h"
@@ -36,7 +33,6 @@ enum {
enum {
IO_WQ_BIT_EXIT = 0, /* wq exiting */
- IO_WQ_BIT_ERROR = 1, /* error on setup */
};
enum {
@@ -57,16 +53,9 @@ struct io_worker {
struct io_wq_work *cur_work;
spinlock_t lock;
+ struct completion ref_done;
+
struct rcu_head rcu;
- struct mm_struct *mm;
-#ifdef CONFIG_BLK_CGROUP
- struct cgroup_subsys_state *blkcg_css;
-#endif
- const struct cred *cur_creds;
- const struct cred *saved_creds;
- struct files_struct *restore_files;
- struct nsproxy *restore_nsproxy;
- struct fs_struct *restore_fs;
};
#if BITS_PER_LONG == 64
@@ -95,7 +84,6 @@ struct io_wqe {
struct {
raw_spinlock_t lock;
struct io_wq_work_list work_list;
- unsigned long hash_map;
unsigned flags;
} ____cacheline_aligned_in_smp;
@@ -105,6 +93,8 @@ struct io_wqe {
struct hlist_nulls_head free_list;
struct list_head all_list;
+ struct wait_queue_entry wait;
+
struct io_wq *wq;
struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
};
@@ -120,17 +110,33 @@ struct io_wq {
io_wq_work_fn *do_work;
struct task_struct *manager;
- struct user_struct *user;
+
+ struct io_wq_hash *hash;
+
refcount_t refs;
- struct completion done;
+ struct completion exited;
+
+ atomic_t worker_refs;
+ struct completion worker_done;
struct hlist_node cpuhp_node;
- refcount_t use_refs;
+ pid_t task_pid;
};
static enum cpuhp_state io_wq_online;
+struct io_cb_cancel_data {
+ work_cancel_fn *fn;
+ void *data;
+ int nr_running;
+ int nr_pending;
+ bool cancel_all;
+};
+
+static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
+ struct io_cb_cancel_data *match);
+
static bool io_worker_get(struct io_worker *worker)
{
return refcount_inc_not_zero(&worker->ref);
@@ -139,62 +145,7 @@ static bool io_worker_get(struct io_worker *worker)
static void io_worker_release(struct io_worker *worker)
{
if (refcount_dec_and_test(&worker->ref))
- wake_up_process(worker->task);
-}
-
-/*
- * Note: drops the wqe->lock if returning true! The caller must re-acquire
- * the lock in that case. Some callers need to restart handling if this
- * happens, so we can't just re-acquire the lock on behalf of the caller.
- */
-static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
-{
- bool dropped_lock = false;
-
- if (worker->saved_creds) {
- revert_creds(worker->saved_creds);
- worker->cur_creds = worker->saved_creds = NULL;
- }
-
- if (current->files != worker->restore_files) {
- __acquire(&wqe->lock);
- raw_spin_unlock_irq(&wqe->lock);
- dropped_lock = true;
-
- task_lock(current);
- current->files = worker->restore_files;
- current->nsproxy = worker->restore_nsproxy;
- task_unlock(current);
- }
-
- if (current->fs != worker->restore_fs)
- current->fs = worker->restore_fs;
-
- /*
- * If we have an active mm, we need to drop the wq lock before unusing
- * it. If we do, return true and let the caller retry the idle loop.
- */
- if (worker->mm) {
- if (!dropped_lock) {
- __acquire(&wqe->lock);
- raw_spin_unlock_irq(&wqe->lock);
- dropped_lock = true;
- }
- __set_current_state(TASK_RUNNING);
- kthread_unuse_mm(worker->mm);
- mmput(worker->mm);
- worker->mm = NULL;
- }
-
-#ifdef CONFIG_BLK_CGROUP
- if (worker->blkcg_css) {
- kthread_associate_blkcg(NULL);
- worker->blkcg_css = NULL;
- }
-#endif
- if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
- return dropped_lock;
+ complete(&worker->ref_done);
}
static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
@@ -206,9 +157,10 @@ static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
return &wqe->acct[IO_WQ_ACCT_BOUND];
}
-static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
- struct io_worker *worker)
+static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
{
+ struct io_wqe *wqe = worker->wqe;
+
if (worker->flags & IO_WORKER_F_BOUND)
return &wqe->acct[IO_WQ_ACCT_BOUND];
@@ -218,39 +170,33 @@ static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
static void io_worker_exit(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
- struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+ struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+ unsigned flags;
- /*
- * If we're not at zero, someone else is holding a brief reference
- * to the worker. Wait for that to go away.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- if (!refcount_dec_and_test(&worker->ref))
- schedule();
- __set_current_state(TASK_RUNNING);
+ if (refcount_dec_and_test(&worker->ref))
+ complete(&worker->ref_done);
+ wait_for_completion(&worker->ref_done);
preempt_disable();
current->flags &= ~PF_IO_WORKER;
- if (worker->flags & IO_WORKER_F_RUNNING)
+ flags = worker->flags;
+ worker->flags = 0;
+ if (flags & IO_WORKER_F_RUNNING)
atomic_dec(&acct->nr_running);
- if (!(worker->flags & IO_WORKER_F_BOUND))
- atomic_dec(&wqe->wq->user->processes);
worker->flags = 0;
preempt_enable();
raw_spin_lock_irq(&wqe->lock);
- hlist_nulls_del_rcu(&worker->nulls_node);
+ if (flags & IO_WORKER_F_FREE)
+ hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list);
- if (__io_worker_unuse(wqe, worker)) {
- __release(&wqe->lock);
- raw_spin_lock_irq(&wqe->lock);
- }
acct->nr_workers--;
raw_spin_unlock_irq(&wqe->lock);
kfree_rcu(worker, rcu);
- if (refcount_dec_and_test(&wqe->wq->refs))
- complete(&wqe->wq->done);
+ if (atomic_dec_and_test(&wqe->wq->worker_refs))
+ complete(&wqe->wq->worker_done);
+ do_exit(0);
}
static inline bool io_wqe_run_queue(struct io_wqe *wqe)
@@ -308,35 +254,23 @@ static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
wake_up_process(wqe->wq->manager);
}
-static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
+static void io_wqe_inc_running(struct io_worker *worker)
{
- struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+ struct io_wqe_acct *acct = io_wqe_get_acct(worker);
atomic_inc(&acct->nr_running);
}
-static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
+static void io_wqe_dec_running(struct io_worker *worker)
__must_hold(wqe->lock)
{
- struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
+ struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+ struct io_wqe *wqe = worker->wqe;
if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
io_wqe_wake_worker(wqe, acct);
}
-static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
-{
- allow_kernel_signal(SIGINT);
-
- current->flags |= PF_IO_WORKER;
-
- worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
- worker->restore_files = current->files;
- worker->restore_nsproxy = current->nsproxy;
- worker->restore_fs = current->fs;
- io_wqe_inc_running(wqe, worker);
-}
-
/*
* Worker will start processing some work. Move it to the busy list, if
* it's currently on the freelist
@@ -359,19 +293,17 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
if (worker_bound != work_bound) {
- io_wqe_dec_running(wqe, worker);
+ io_wqe_dec_running(worker);
if (work_bound) {
worker->flags |= IO_WORKER_F_BOUND;
wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
- atomic_dec(&wqe->wq->user->processes);
} else {
worker->flags &= ~IO_WORKER_F_BOUND;
wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
- atomic_inc(&wqe->wq->user->processes);
}
- io_wqe_inc_running(wqe, worker);
+ io_wqe_inc_running(worker);
}
}
@@ -382,15 +314,13 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
* retry the loop in that case (we changed task state), we don't regrab
* the lock if we return success.
*/
-static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
+static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
__must_hold(wqe->lock)
{
if (!(worker->flags & IO_WORKER_F_FREE)) {
worker->flags |= IO_WORKER_F_FREE;
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
}
-
- return __io_worker_unuse(wqe, worker);
}
static inline unsigned int io_get_work_hash(struct io_wq_work *work)
@@ -398,14 +328,31 @@ static inline unsigned int io_get_work_hash(struct io_wq_work *work)
return work->flags >> IO_WQ_HASH_SHIFT;
}
+static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
+{
+ struct io_wq *wq = wqe->wq;
+
+ spin_lock(&wq->hash->wait.lock);
+ if (list_empty(&wqe->wait.entry)) {
+ __add_wait_queue(&wq->hash->wait, &wqe->wait);
+ if (!test_bit(hash, &wq->hash->map)) {
+ __set_current_state(TASK_RUNNING);
+ list_del_init(&wqe->wait.entry);
+ }
+ }
+ spin_unlock(&wq->hash->wait.lock);
+}
+
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
__must_hold(wqe->lock)
{
struct io_wq_work_node *node, *prev;
struct io_wq_work *work, *tail;
- unsigned int hash;
+ unsigned int stall_hash = -1U;
wq_list_for_each(node, prev, &wqe->work_list) {
+ unsigned int hash;
+
work = container_of(node, struct io_wq_work, list);
/* not hashed, can run anytime */
@@ -414,111 +361,51 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
return work;
}
- /* hashed, can run if not already running */
hash = io_get_work_hash(work);
- if (!(wqe->hash_map & BIT(hash))) {
- wqe->hash_map |= BIT(hash);
- /* all items with this hash lie in [work, tail] */
- tail = wqe->hash_tail[hash];
+ /* all items with this hash lie in [work, tail] */
+ tail = wqe->hash_tail[hash];
+
+ /* hashed, can run if not already running */
+ if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
wqe->hash_tail[hash] = NULL;
wq_list_cut(&wqe->work_list, &tail->list, prev);
return work;
}
+ if (stall_hash == -1U)
+ stall_hash = hash;
+ /* fast forward to a next hash, for-each will fix up @prev */
+ node = &tail->list;
}
- return NULL;
-}
-
-static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
-{
- if (worker->mm) {
- kthread_unuse_mm(worker->mm);
- mmput(worker->mm);
- worker->mm = NULL;
- }
-
- if (mmget_not_zero(work->identity->mm)) {
- kthread_use_mm(work->identity->mm);
- worker->mm = work->identity->mm;
- return;
+ if (stall_hash != -1U) {
+ raw_spin_unlock(&wqe->lock);
+ io_wait_on_hash(wqe, stall_hash);
+ raw_spin_lock(&wqe->lock);
}
- /* failed grabbing mm, ensure work gets cancelled */
- work->flags |= IO_WQ_WORK_CANCEL;
-}
-
-static inline void io_wq_switch_blkcg(struct io_worker *worker,
- struct io_wq_work *work)
-{
-#ifdef CONFIG_BLK_CGROUP
- if (!(work->flags & IO_WQ_WORK_BLKCG))
- return;
- if (work->identity->blkcg_css != worker->blkcg_css) {
- kthread_associate_blkcg(work->identity->blkcg_css);
- worker->blkcg_css = work->identity->blkcg_css;
- }
-#endif
-}
-
-static void io_wq_switch_creds(struct io_worker *worker,
- struct io_wq_work *work)
-{
- const struct cred *old_creds = override_creds(work->identity->creds);
-
- worker->cur_creds = work->identity->creds;
- if (worker->saved_creds)
- put_cred(old_creds); /* creds set by previous switch */
- else
- worker->saved_creds = old_creds;
+ return NULL;
}
-static void io_impersonate_work(struct io_worker *worker,
- struct io_wq_work *work)
+static bool io_flush_signals(void)
{
- if ((work->flags & IO_WQ_WORK_FILES) &&
- current->files != work->identity->files) {
- task_lock(current);
- current->files = work->identity->files;
- current->nsproxy = work->identity->nsproxy;
- task_unlock(current);
- if (!work->identity->files) {
- /* failed grabbing files, ensure work gets cancelled */
- work->flags |= IO_WQ_WORK_CANCEL;
- }
+ if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
+ __set_current_state(TASK_RUNNING);
+ if (current->task_works)
+ task_work_run();
+ clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
+ return true;
}
- if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs)
- current->fs = work->identity->fs;
- if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm)
- io_wq_switch_mm(worker, work);
- if ((work->flags & IO_WQ_WORK_CREDS) &&
- worker->cur_creds != work->identity->creds)
- io_wq_switch_creds(worker, work);
- if (work->flags & IO_WQ_WORK_FSIZE)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize;
- else if (current->signal->rlim[RLIMIT_FSIZE].rlim_cur != RLIM_INFINITY)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
- io_wq_switch_blkcg(worker, work);
-#ifdef CONFIG_AUDIT
- current->loginuid = work->identity->loginuid;
- current->sessionid = work->identity->sessionid;
-#endif
+ return false;
}
static void io_assign_current_work(struct io_worker *worker,
struct io_wq_work *work)
{
if (work) {
- /* flush pending signals before assigning new work */
- if (signal_pending(current))
- flush_signals(current);
+ io_flush_signals();
cond_resched();
}
-#ifdef CONFIG_AUDIT
- current->loginuid = KUIDT_INIT(AUDIT_UID_UNSET);
- current->sessionid = AUDIT_SID_UNSET;
-#endif
-
spin_lock_irq(&worker->lock);
worker->cur_work = work;
spin_unlock_irq(&worker->lock);
@@ -552,32 +439,32 @@ get_next:
if (!work)
break;
io_assign_current_work(worker, work);
+ __set_current_state(TASK_RUNNING);
/* handle a whole dependent link */
do {
- struct io_wq_work *old_work, *next_hashed, *linked;
+ struct io_wq_work *next_hashed, *linked;
unsigned int hash = io_get_work_hash(work);
next_hashed = wq_next_work(work);
- io_impersonate_work(worker, work);
-
- old_work = work;
- linked = wq->do_work(work);
+ wq->do_work(work);
+ io_assign_current_work(worker, NULL);
+ linked = wq->free_work(work);
work = next_hashed;
if (!work && linked && !io_wq_is_hashed(linked)) {
work = linked;
linked = NULL;
}
io_assign_current_work(worker, work);
- wq->free_work(old_work);
-
if (linked)
io_wqe_enqueue(wqe, linked);
if (hash != -1U && !next_hashed) {
+ clear_bit(hash, &wq->hash->map);
+ if (wq_has_sleeper(&wq->hash->wait))
+ wake_up(&wq->hash->wait);
raw_spin_lock_irq(&wqe->lock);
- wqe->hash_map &= ~BIT_ULL(hash);
wqe->flags &= ~IO_WQE_FLAG_STALLED;
/* skip unnecessary unlock-lock wqe->lock */
if (!work)
@@ -595,28 +482,33 @@ static int io_wqe_worker(void *data)
struct io_worker *worker = data;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
+ char buf[TASK_COMM_LEN];
- io_worker_start(wqe, worker);
+ worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+ io_wqe_inc_running(worker);
+
+ sprintf(buf, "iou-wrk-%d", wq->task_pid);
+ set_task_comm(current, buf);
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ long ret;
+
set_current_state(TASK_INTERRUPTIBLE);
loop:
raw_spin_lock_irq(&wqe->lock);
if (io_wqe_run_queue(wqe)) {
- __set_current_state(TASK_RUNNING);
io_worker_handle_work(worker);
goto loop;
}
- /* drops the lock on success, retry */
- if (__io_worker_idle(wqe, worker)) {
- __release(&wqe->lock);
- goto loop;
- }
+ __io_worker_idle(wqe, worker);
raw_spin_unlock_irq(&wqe->lock);
- if (signal_pending(current))
- flush_signals(current);
- if (schedule_timeout(WORKER_IDLE_TIMEOUT))
+ if (io_flush_signals())
continue;
+ ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
+ if (try_to_freeze() || ret)
+ continue;
+ if (fatal_signal_pending(current))
+ break;
/* timed out, exit unless we're the fixed worker */
if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
!(worker->flags & IO_WORKER_F_FIXED))
@@ -640,15 +532,16 @@ loop:
*/
void io_wq_worker_running(struct task_struct *tsk)
{
- struct io_worker *worker = kthread_data(tsk);
- struct io_wqe *wqe = worker->wqe;
+ struct io_worker *worker = tsk->pf_io_worker;
+ if (!worker)
+ return;
if (!(worker->flags & IO_WORKER_F_UP))
return;
if (worker->flags & IO_WORKER_F_RUNNING)
return;
worker->flags |= IO_WORKER_F_RUNNING;
- io_wqe_inc_running(wqe, worker);
+ io_wqe_inc_running(worker);
}
/*
@@ -658,9 +551,10 @@ void io_wq_worker_running(struct task_struct *tsk)
*/
void io_wq_worker_sleeping(struct task_struct *tsk)
{
- struct io_worker *worker = kthread_data(tsk);
- struct io_wqe *wqe = worker->wqe;
+ struct io_worker *worker = tsk->pf_io_worker;
+ if (!worker)
+ return;
if (!(worker->flags & IO_WORKER_F_UP))
return;
if (!(worker->flags & IO_WORKER_F_RUNNING))
@@ -668,15 +562,18 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
worker->flags &= ~IO_WORKER_F_RUNNING;
- raw_spin_lock_irq(&wqe->lock);
- io_wqe_dec_running(wqe, worker);
- raw_spin_unlock_irq(&wqe->lock);
+ raw_spin_lock_irq(&worker->wqe->lock);
+ io_wqe_dec_running(worker);
+ raw_spin_unlock_irq(&worker->wqe->lock);
}
static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
{
struct io_wqe_acct *acct = &wqe->acct[index];
struct io_worker *worker;
+ struct task_struct *tsk;
+
+ __set_current_state(TASK_RUNNING);
worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
if (!worker)
@@ -686,14 +583,22 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
worker->nulls_node.pprev = NULL;
worker->wqe = wqe;
spin_lock_init(&worker->lock);
+ init_completion(&worker->ref_done);
+
+ atomic_inc(&wq->worker_refs);
- worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
- "io_wqe_worker-%d/%d", index, wqe->node);
- if (IS_ERR(worker->task)) {
+ tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
+ if (IS_ERR(tsk)) {
+ if (atomic_dec_and_test(&wq->worker_refs))
+ complete(&wq->worker_done);
kfree(worker);
return false;
}
- kthread_bind_mask(worker->task, cpumask_of_node(wqe->node));
+
+ tsk->pf_io_worker = worker;
+ worker->task = tsk;
+ set_cpus_allowed_ptr(tsk, cpumask_of_node(wqe->node));
+ tsk->flags |= PF_NO_SETAFFINITY;
raw_spin_lock_irq(&wqe->lock);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
@@ -705,12 +610,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
worker->flags |= IO_WORKER_F_FIXED;
acct->nr_workers++;
raw_spin_unlock_irq(&wqe->lock);
-
- if (index == IO_WQ_ACCT_UNBOUND)
- atomic_inc(&wq->user->processes);
-
- refcount_inc(&wq->refs);
- wake_up_process(worker->task);
+ wake_up_new_task(tsk);
return true;
}
@@ -719,6 +619,8 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
{
struct io_wqe_acct *acct = &wqe->acct[index];
+ if (acct->nr_workers && test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state))
+ return false;
/* if we have available workers or no work, no need */
if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
return false;
@@ -752,97 +654,92 @@ static bool io_wq_for_each_worker(struct io_wqe *wqe,
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
+ set_notify_signal(worker->task);
wake_up_process(worker->task);
return false;
}
-/*
- * Manager thread. Tasked with creating new workers, if we need them.
- */
-static int io_wq_manager(void *data)
+static void io_wq_check_workers(struct io_wq *wq)
{
- struct io_wq *wq = data;
int node;
- /* create fixed workers */
- refcount_set(&wq->refs, 1);
for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+ bool fork_worker[2] = { false, false };
+
if (!node_online(node))
continue;
- if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
- continue;
- set_bit(IO_WQ_BIT_ERROR, &wq->state);
- set_bit(IO_WQ_BIT_EXIT, &wq->state);
- goto out;
+
+ raw_spin_lock_irq(&wqe->lock);
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
+ fork_worker[IO_WQ_ACCT_BOUND] = true;
+ if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
+ fork_worker[IO_WQ_ACCT_UNBOUND] = true;
+ raw_spin_unlock_irq(&wqe->lock);
+ if (fork_worker[IO_WQ_ACCT_BOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
+ if (fork_worker[IO_WQ_ACCT_UNBOUND])
+ create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
}
+}
- complete(&wq->done);
+static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
+{
+ return true;
+}
- while (!kthread_should_stop()) {
- if (current->task_works)
- task_work_run();
+static void io_wq_cancel_pending(struct io_wq *wq)
+{
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_all,
+ .cancel_all = true,
+ };
+ int node;
+
+ for_each_node(node)
+ io_wqe_cancel_pending_work(wq->wqes[node], &match);
+}
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- bool fork_worker[2] = { false, false };
+/*
+ * Manager thread. Tasked with creating new workers, if we need them.
+ */
+static int io_wq_manager(void *data)
+{
+ struct io_wq *wq = data;
+ char buf[TASK_COMM_LEN];
+ int node;
- if (!node_online(node))
- continue;
+ sprintf(buf, "iou-mgr-%d", wq->task_pid);
+ set_task_comm(current, buf);
- raw_spin_lock_irq(&wqe->lock);
- if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
- fork_worker[IO_WQ_ACCT_BOUND] = true;
- if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
- fork_worker[IO_WQ_ACCT_UNBOUND] = true;
- raw_spin_unlock_irq(&wqe->lock);
- if (fork_worker[IO_WQ_ACCT_BOUND])
- create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
- if (fork_worker[IO_WQ_ACCT_UNBOUND])
- create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
- }
+ do {
set_current_state(TASK_INTERRUPTIBLE);
+ io_wq_check_workers(wq);
schedule_timeout(HZ);
- }
-
- if (current->task_works)
- task_work_run();
-
-out:
- if (refcount_dec_and_test(&wq->refs)) {
- complete(&wq->done);
- return 0;
- }
- /* if ERROR is set and we get here, we have workers to wake */
- if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
- rcu_read_lock();
- for_each_node(node)
- io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
- rcu_read_unlock();
- }
- return 0;
-}
-
-static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
- struct io_wq_work *work)
-{
- bool free_worker;
+ try_to_freeze();
+ if (fatal_signal_pending(current))
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state));
- if (!(work->flags & IO_WQ_WORK_UNBOUND))
- return true;
- if (atomic_read(&acct->nr_running))
- return true;
+ io_wq_check_workers(wq);
rcu_read_lock();
- free_worker = !hlist_nulls_empty(&wqe->free_list);
+ for_each_node(node)
+ io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
rcu_read_unlock();
- if (free_worker)
- return true;
- if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
- !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
- return false;
+ if (atomic_dec_and_test(&wq->worker_refs))
+ complete(&wq->worker_done);
+ wait_for_completion(&wq->worker_done);
- return true;
+ spin_lock_irq(&wq->hash->wait.lock);
+ for_each_node(node)
+ list_del_init(&wq->wqes[node]->wait.entry);
+ spin_unlock_irq(&wq->hash->wait.lock);
+
+ io_wq_cancel_pending(wq);
+ complete(&wq->exited);
+ do_exit(0);
}
static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
@@ -850,11 +747,9 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
struct io_wq *wq = wqe->wq;
do {
- struct io_wq_work *old_work = work;
-
work->flags |= IO_WQ_WORK_CANCEL;
- work = wq->do_work(work);
- wq->free_work(old_work);
+ wq->do_work(work);
+ work = wq->free_work(work);
} while (work);
}
@@ -878,19 +773,39 @@ append:
wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
}
+static int io_wq_fork_manager(struct io_wq *wq)
+{
+ struct task_struct *tsk;
+
+ if (wq->manager)
+ return 0;
+
+ WARN_ON_ONCE(test_bit(IO_WQ_BIT_EXIT, &wq->state));
+
+ init_completion(&wq->worker_done);
+ atomic_set(&wq->worker_refs, 1);
+ tsk = create_io_thread(io_wq_manager, wq, NUMA_NO_NODE);
+ if (!IS_ERR(tsk)) {
+ wq->manager = get_task_struct(tsk);
+ wake_up_new_task(tsk);
+ return 0;
+ }
+
+ if (atomic_dec_and_test(&wq->worker_refs))
+ complete(&wq->worker_done);
+
+ return PTR_ERR(tsk);
+}
+
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
int work_flags;
unsigned long flags;
- /*
- * Do early check to see if we need a new unbound worker, and if we do,
- * if we're allowed to do so. This isn't 100% accurate as there's a
- * gap between this check and incrementing the value, but that's OK.
- * It's close enough to not be an issue, fork() has the same delay.
- */
- if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
+ /* Can only happen if manager creation fails after exec */
+ if (io_wq_fork_manager(wqe->wq) ||
+ test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
io_run_cancel(work, wqe);
return;
}
@@ -925,14 +840,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
}
-struct io_cb_cancel_data {
- work_cancel_fn *fn;
- void *data;
- int nr_running;
- int nr_pending;
- bool cancel_all;
-};
-
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_cb_cancel_data *match = data;
@@ -944,9 +851,8 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
*/
spin_lock_irqsave(&worker->lock, flags);
if (worker->cur_work &&
- !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
match->fn(worker->cur_work, match->data)) {
- send_sig(SIGINT, worker->task, 1);
+ set_notify_signal(worker->task);
match->nr_running++;
}
spin_unlock_irqrestore(&worker->lock, flags);
@@ -1050,6 +956,24 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
return IO_WQ_CANCEL_NOTFOUND;
}
+static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
+ int sync, void *key)
+{
+ struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
+ int ret;
+
+ list_del_init(&wait->entry);
+
+ rcu_read_lock();
+ ret = io_wqe_activate_free_worker(wqe);
+ rcu_read_unlock();
+
+ if (!ret)
+ wake_up_process(wqe->wq->manager);
+
+ return 1;
+}
+
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
{
int ret = -ENOMEM, node;
@@ -1070,12 +994,11 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
if (ret)
goto err_wqes;
+ refcount_inc(&data->hash->refs);
+ wq->hash = data->hash;
wq->free_work = data->free_work;
wq->do_work = data->do_work;
- /* caller must already hold a reference to this */
- wq->user = data->user;
-
ret = -ENOMEM;
for_each_node(node) {
struct io_wqe *wqe;
@@ -1090,11 +1013,11 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
wqe->node = alloc_node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
- if (wq->user) {
- wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
+ wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
task_rlimit(current, RLIMIT_NPROC);
- }
atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
+ wqe->wait.func = io_wqe_hash_wake;
+ INIT_LIST_HEAD(&wqe->wait.entry);
wqe->wq = wq;
raw_spin_lock_init(&wqe->lock);
INIT_WQ_LIST(&wqe->work_list);
@@ -1102,24 +1025,15 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
INIT_LIST_HEAD(&wqe->all_list);
}
- init_completion(&wq->done);
+ wq->task_pid = current->pid;
+ init_completion(&wq->exited);
+ refcount_set(&wq->refs, 1);
- wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
- if (!IS_ERR(wq->manager)) {
- wake_up_process(wq->manager);
- wait_for_completion(&wq->done);
- if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
- ret = -ENOMEM;
- goto err;
- }
- refcount_set(&wq->use_refs, 1);
- reinit_completion(&wq->done);
+ ret = io_wq_fork_manager(wq);
+ if (!ret)
return wq;
- }
-
- ret = PTR_ERR(wq->manager);
- complete(&wq->done);
err:
+ io_wq_put_hash(data->hash);
cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
for_each_node(node)
kfree(wq->wqes[node]);
@@ -1130,46 +1044,46 @@ err_wq:
return ERR_PTR(ret);
}
-bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
+static void io_wq_destroy_manager(struct io_wq *wq)
{
- if (data->free_work != wq->free_work || data->do_work != wq->do_work)
- return false;
-
- return refcount_inc_not_zero(&wq->use_refs);
+ if (wq->manager) {
+ wake_up_process(wq->manager);
+ wait_for_completion(&wq->exited);
+ put_task_struct(wq->manager);
+ wq->manager = NULL;
+ }
}
-static void __io_wq_destroy(struct io_wq *wq)
+static void io_wq_destroy(struct io_wq *wq)
{
int node;
cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
set_bit(IO_WQ_BIT_EXIT, &wq->state);
- if (wq->manager)
- kthread_stop(wq->manager);
-
- rcu_read_lock();
- for_each_node(node)
- io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
- rcu_read_unlock();
+ io_wq_destroy_manager(wq);
- wait_for_completion(&wq->done);
-
- for_each_node(node)
- kfree(wq->wqes[node]);
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+ WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
+ kfree(wqe);
+ }
+ io_wq_put_hash(wq->hash);
kfree(wq->wqes);
kfree(wq);
}
-void io_wq_destroy(struct io_wq *wq)
+void io_wq_put(struct io_wq *wq)
{
- if (refcount_dec_and_test(&wq->use_refs))
- __io_wq_destroy(wq);
+ if (refcount_dec_and_test(&wq->refs))
+ io_wq_destroy(wq);
}
-struct task_struct *io_wq_get_task(struct io_wq *wq)
+void io_wq_put_and_exit(struct io_wq *wq)
{
- return wq->manager;
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ io_wq_destroy_manager(wq);
+ io_wq_put(wq);
}
static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
diff --git a/fs/io-wq.h b/fs/io-wq.h
index b158f8addcf3..80d590564ff9 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -1,7 +1,7 @@
#ifndef INTERNAL_IO_WQ_H
#define INTERNAL_IO_WQ_H
-#include <linux/io_uring.h>
+#include <linux/refcount.h>
struct io_wq;
@@ -9,16 +9,8 @@ enum {
IO_WQ_WORK_CANCEL = 1,
IO_WQ_WORK_HASHED = 2,
IO_WQ_WORK_UNBOUND = 4,
- IO_WQ_WORK_NO_CANCEL = 8,
IO_WQ_WORK_CONCURRENT = 16,
- IO_WQ_WORK_FILES = 32,
- IO_WQ_WORK_FS = 64,
- IO_WQ_WORK_MM = 128,
- IO_WQ_WORK_CREDS = 256,
- IO_WQ_WORK_BLKCG = 512,
- IO_WQ_WORK_FSIZE = 1024,
-
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
};
@@ -95,7 +87,7 @@ static inline void wq_list_del(struct io_wq_work_list *list,
struct io_wq_work {
struct io_wq_work_node list;
- struct io_identity *identity;
+ const struct cred *creds;
unsigned flags;
};
@@ -107,19 +99,30 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
return container_of(work->list.next, struct io_wq_work, list);
}
-typedef void (free_work_fn)(struct io_wq_work *);
-typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *);
+typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
+typedef void (io_wq_work_fn)(struct io_wq_work *);
-struct io_wq_data {
- struct user_struct *user;
+struct io_wq_hash {
+ refcount_t refs;
+ unsigned long map;
+ struct wait_queue_head wait;
+};
+
+static inline void io_wq_put_hash(struct io_wq_hash *hash)
+{
+ if (refcount_dec_and_test(&hash->refs))
+ kfree(hash);
+}
+struct io_wq_data {
+ struct io_wq_hash *hash;
io_wq_work_fn *do_work;
free_work_fn *free_work;
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
-bool io_wq_get(struct io_wq *wq, struct io_wq_data *data);
-void io_wq_destroy(struct io_wq *wq);
+void io_wq_put(struct io_wq *wq);
+void io_wq_put_and_exit(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_hash_work(struct io_wq_work *work, void *val);
@@ -134,8 +137,6 @@ typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data, bool cancel_all);
-struct task_struct *io_wq_get_task(struct io_wq *wq);
-
#if defined(CONFIG_IO_WQ)
extern void io_wq_worker_sleeping(struct task_struct *);
extern void io_wq_worker_running(struct task_struct *);
@@ -150,6 +151,7 @@ static inline void io_wq_worker_running(struct task_struct *tsk)
static inline bool io_wq_current_is_worker(void)
{
- return in_task() && (current->flags & PF_IO_WORKER);
+ return in_task() && (current->flags & PF_IO_WORKER) &&
+ current->pf_io_worker;
}
#endif
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 931671082e61..543551d70327 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -57,7 +57,6 @@
#include <linux/mman.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/bvec.h>
#include <linux/net.h>
@@ -75,13 +74,11 @@
#include <linux/fsnotify.h>
#include <linux/fadvise.h>
#include <linux/eventpoll.h>
-#include <linux/fs_struct.h>
#include <linux/splice.h>
#include <linux/task_work.h>
#include <linux/pagemap.h>
#include <linux/io_uring.h>
-#include <linux/blk-cgroup.h>
-#include <linux/audit.h>
+#include <linux/freezer.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -104,6 +101,10 @@
#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
IORING_REGISTER_LAST + IORING_OP_LAST)
+#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
+ IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
+ IOSQE_BUFFER_SELECT)
+
struct io_uring {
u32 head ____cacheline_aligned_in_smp;
u32 tail ____cacheline_aligned_in_smp;
@@ -187,6 +188,11 @@ struct io_rings {
struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
};
+enum io_uring_cmd_flags {
+ IO_URING_F_NONBLOCK = 1,
+ IO_URING_F_COMPLETE_DEFER = 2,
+};
+
struct io_mapped_ubuf {
u64 ubuf;
size_t len;
@@ -195,28 +201,39 @@ struct io_mapped_ubuf {
unsigned long acct_pages;
};
-struct fixed_file_table {
+struct io_ring_ctx;
+
+struct io_rsrc_put {
+ struct list_head list;
+ union {
+ void *rsrc;
+ struct file *file;
+ };
+};
+
+struct fixed_rsrc_table {
struct file **files;
};
-struct fixed_file_ref_node {
+struct fixed_rsrc_ref_node {
struct percpu_ref refs;
struct list_head node;
- struct list_head file_list;
- struct fixed_file_data *file_data;
+ struct list_head rsrc_list;
+ struct fixed_rsrc_data *rsrc_data;
+ void (*rsrc_put)(struct io_ring_ctx *ctx,
+ struct io_rsrc_put *prsrc);
struct llist_node llist;
bool done;
};
-struct fixed_file_data {
- struct fixed_file_table *table;
+struct fixed_rsrc_data {
+ struct fixed_rsrc_table *table;
struct io_ring_ctx *ctx;
- struct fixed_file_ref_node *node;
+ struct fixed_rsrc_ref_node *node;
struct percpu_ref refs;
struct completion done;
- struct list_head ref_list;
- spinlock_t lock;
+ bool quiesce;
};
struct io_buffer {
@@ -234,19 +251,76 @@ struct io_restriction {
bool registered;
};
+enum {
+ IO_SQ_THREAD_SHOULD_STOP = 0,
+ IO_SQ_THREAD_SHOULD_PARK,
+};
+
struct io_sq_data {
refcount_t refs;
+ atomic_t park_pending;
struct mutex lock;
/* ctx's that are using this sqd */
struct list_head ctx_list;
- struct list_head ctx_new_list;
- struct mutex ctx_lock;
struct task_struct *thread;
struct wait_queue_head wait;
unsigned sq_thread_idle;
+ int sq_cpu;
+ pid_t task_pid;
+ pid_t task_tgid;
+
+ unsigned long state;
+ struct completion exited;
+ struct callback_head *park_task_work;
+};
+
+#define IO_IOPOLL_BATCH 8
+#define IO_COMPL_BATCH 32
+#define IO_REQ_CACHE_SIZE 32
+#define IO_REQ_ALLOC_BATCH 8
+
+struct io_comp_state {
+ struct io_kiocb *reqs[IO_COMPL_BATCH];
+ unsigned int nr;
+ unsigned int locked_free_nr;
+ /* inline/task_work completion list, under ->uring_lock */
+ struct list_head free_list;
+ /* IRQ completion list, under ->completion_lock */
+ struct list_head locked_free_list;
+};
+
+struct io_submit_link {
+ struct io_kiocb *head;
+ struct io_kiocb *last;
+};
+
+struct io_submit_state {
+ struct blk_plug plug;
+ struct io_submit_link link;
+
+ /*
+ * io_kiocb alloc cache
+ */
+ void *reqs[IO_REQ_CACHE_SIZE];
+ unsigned int free_reqs;
+
+ bool plug_started;
+
+ /*
+ * Batch completion logic
+ */
+ struct io_comp_state comp;
+
+ /*
+ * File reference cache
+ */
+ struct file *file;
+ unsigned int fd;
+ unsigned int file_refs;
+ unsigned int ios_left;
};
struct io_ring_ctx {
@@ -257,12 +331,10 @@ struct io_ring_ctx {
struct {
unsigned int flags;
unsigned int compat: 1;
- unsigned int limit_mem: 1;
unsigned int cq_overflow_flushed: 1;
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
unsigned int restricted: 1;
- unsigned int sqo_dead: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -284,6 +356,9 @@ struct io_ring_ctx {
unsigned cached_cq_overflow;
unsigned long sq_check_overflow;
+ /* hashed buffered write serialization */
+ struct io_wq_hash *hash_map;
+
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
@@ -291,24 +366,19 @@ struct io_ring_ctx {
struct io_uring_sqe *sq_sqes;
} ____cacheline_aligned_in_smp;
- struct io_rings *rings;
+ struct {
+ struct mutex uring_lock;
+ wait_queue_head_t wait;
+ } ____cacheline_aligned_in_smp;
- /* IO offload */
- struct io_wq *io_wq;
+ struct io_submit_state submit_state;
- /*
- * For SQPOLL usage - we hold a reference to the parent task, so we
- * have access to the ->files
- */
- struct task_struct *sqo_task;
+ struct io_rings *rings;
/* Only used for accounting purposes */
struct mm_struct *mm_account;
-#ifdef CONFIG_BLK_CGROUP
- struct cgroup_subsys_state *sqo_blkcg_css;
-#endif
-
+ const struct cred *sq_creds; /* cred used for __io_sq_thread() */
struct io_sq_data *sq_data; /* if using sq thread polling */
struct wait_queue_head sqo_sq_wait;
@@ -319,7 +389,7 @@ struct io_ring_ctx {
* readers must ensure that ->refs is alive as long as the file* is
* used. Only updated through io_uring_register(2).
*/
- struct fixed_file_data *file_data;
+ struct fixed_rsrc_data *file_data;
unsigned nr_user_files;
/* if used, fixed mapped user buffers */
@@ -328,26 +398,16 @@ struct io_ring_ctx {
struct user_struct *user;
- const struct cred *creds;
-
-#ifdef CONFIG_AUDIT
- kuid_t loginuid;
- unsigned int sessionid;
-#endif
-
struct completion ref_comp;
- struct completion sq_thread_comp;
-
- /* if all else fails... */
- struct io_kiocb *fallback_req;
#if defined(CONFIG_UNIX)
struct socket *ring_sock;
#endif
- struct idr io_buffer_idr;
+ struct xarray io_buffers;
- struct idr personality_idr;
+ struct xarray personalities;
+ u32 pers_next;
struct {
unsigned cached_cq_tail;
@@ -362,11 +422,6 @@ struct io_ring_ctx {
} ____cacheline_aligned_in_smp;
struct {
- struct mutex uring_lock;
- wait_queue_head_t wait;
- } ____cacheline_aligned_in_smp;
-
- struct {
spinlock_t completion_lock;
/*
@@ -384,11 +439,37 @@ struct io_ring_ctx {
struct list_head inflight_list;
} ____cacheline_aligned_in_smp;
- struct delayed_work file_put_work;
- struct llist_head file_put_llist;
+ struct delayed_work rsrc_put_work;
+ struct llist_head rsrc_put_llist;
+ struct list_head rsrc_ref_list;
+ spinlock_t rsrc_ref_lock;
- struct work_struct exit_work;
struct io_restriction restrictions;
+
+ /* exit task_work */
+ struct callback_head *exit_task_work;
+
+ struct wait_queue_head hash_wait;
+
+ /* Keep this last, we don't need it for the fast path */
+ struct work_struct exit_work;
+ struct list_head tctx_list;
+};
+
+struct io_uring_task {
+ /* submission side */
+ struct xarray xa;
+ struct wait_queue_head wait;
+ const struct io_ring_ctx *last;
+ struct io_wq *io_wq;
+ struct percpu_counter inflight;
+ atomic_t in_idle;
+ bool sqpoll;
+
+ spinlock_t task_lock;
+ struct io_wq_work_list task_list;
+ unsigned long task_state;
+ struct callback_head task_work;
};
/*
@@ -411,7 +492,6 @@ struct io_poll_remove {
struct io_close {
struct file *file;
- struct file *put_file;
int fd;
};
@@ -489,13 +569,12 @@ struct io_sr_msg {
struct io_open {
struct file *file;
int dfd;
- bool ignore_nonblock;
struct filename *filename;
struct open_how how;
unsigned long nofile;
};
-struct io_files_update {
+struct io_rsrc_update {
struct file *file;
u64 arg;
u32 nr_args;
@@ -584,7 +663,8 @@ struct io_async_connect {
struct io_async_msghdr {
struct iovec fast_iov[UIO_FASTIOV];
- struct iovec *iov;
+ /* points to an allocated iov, if NULL we use fast_iov instead */
+ struct iovec *free_iov;
struct sockaddr __user *uaddr;
struct msghdr msg;
struct sockaddr_storage addr;
@@ -616,8 +696,8 @@ enum {
REQ_F_POLLED_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_NO_FILE_TABLE_BIT,
- REQ_F_WORK_INITIALIZED_BIT,
REQ_F_LTIMEOUT_ACTIVE_BIT,
+ REQ_F_COMPLETE_INLINE_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -639,7 +719,7 @@ enum {
/* fail rest of links */
REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
- /* on inflight list */
+ /* on inflight list, should be cancelled and waited on exit reliably */
REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
/* read/write uses file position */
REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
@@ -657,10 +737,10 @@ enum {
REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
/* doesn't need file table for this request */
REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
- /* io_wq_work is initialized */
- REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
/* linked timeout is active, i.e. prepared by link's head */
REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
+ /* completion is deferred through io_comp_state */
+ REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
};
struct async_poll {
@@ -668,6 +748,11 @@ struct async_poll {
struct io_poll_iocb *double_poll;
};
+struct io_task_work {
+ struct io_wq_work_node node;
+ task_work_func_t func;
+};
+
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
@@ -689,7 +774,7 @@ struct io_kiocb {
struct io_sr_msg sr_msg;
struct io_open open;
struct io_close close;
- struct io_files_update files_update;
+ struct io_rsrc_update rsrc_update;
struct io_fadvise fadvise;
struct io_madvise madvise;
struct io_epoll epoll;
@@ -719,64 +804,38 @@ struct io_kiocb {
u64 user_data;
struct io_kiocb *link;
- struct percpu_ref *fixed_file_refs;
+ struct percpu_ref *fixed_rsrc_refs;
/*
* 1. used with ctx->iopoll_list with reads/writes
* 2. to track reqs with ->files (see io_op_def::file_table)
*/
struct list_head inflight_entry;
- struct callback_head task_work;
+ union {
+ struct io_task_work io_task_work;
+ struct callback_head task_work;
+ };
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
struct hlist_node hash_node;
struct async_poll *apoll;
struct io_wq_work work;
};
+struct io_tctx_node {
+ struct list_head ctx_node;
+ struct task_struct *task;
+ struct io_ring_ctx *ctx;
+};
+
struct io_defer_entry {
struct list_head list;
struct io_kiocb *req;
u32 seq;
};
-#define IO_IOPOLL_BATCH 8
-
-struct io_comp_state {
- unsigned int nr;
- struct list_head list;
- struct io_ring_ctx *ctx;
-};
-
-struct io_submit_state {
- struct blk_plug plug;
-
- /*
- * io_kiocb alloc cache
- */
- void *reqs[IO_IOPOLL_BATCH];
- unsigned int free_reqs;
-
- bool plug_started;
-
- /*
- * Batch completion logic
- */
- struct io_comp_state comp;
-
- /*
- * File reference cache
- */
- struct file *file;
- unsigned int fd;
- unsigned int file_refs;
- unsigned int ios_left;
-};
-
struct io_op_def {
/* needs req->file assigned */
unsigned needs_file : 1;
- /* don't fail if file grab fails */
- unsigned needs_file_no_error : 1;
/* hash wq insertion if file is a regular file */
unsigned hash_reg_file : 1;
/* unbound wq insertion if file is a non-regular file */
@@ -794,7 +853,6 @@ struct io_op_def {
unsigned plug : 1;
/* size of async data needed, if any */
unsigned short async_size;
- unsigned work_flags;
};
static const struct io_op_def io_op_defs[] = {
@@ -807,7 +865,6 @@ static const struct io_op_def io_op_defs[] = {
.needs_async_data = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
},
[IORING_OP_WRITEV] = {
.needs_file = 1,
@@ -817,12 +874,9 @@ static const struct io_op_def io_op_defs[] = {
.needs_async_data = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
- IO_WQ_WORK_FSIZE,
},
[IORING_OP_FSYNC] = {
.needs_file = 1,
- .work_flags = IO_WQ_WORK_BLKCG,
},
[IORING_OP_READ_FIXED] = {
.needs_file = 1,
@@ -830,7 +884,6 @@ static const struct io_op_def io_op_defs[] = {
.pollin = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
- .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
},
[IORING_OP_WRITE_FIXED] = {
.needs_file = 1,
@@ -839,8 +892,6 @@ static const struct io_op_def io_op_defs[] = {
.pollout = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
- .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
- IO_WQ_WORK_MM,
},
[IORING_OP_POLL_ADD] = {
.needs_file = 1,
@@ -849,7 +900,6 @@ static const struct io_op_def io_op_defs[] = {
[IORING_OP_POLL_REMOVE] = {},
[IORING_OP_SYNC_FILE_RANGE] = {
.needs_file = 1,
- .work_flags = IO_WQ_WORK_BLKCG,
},
[IORING_OP_SENDMSG] = {
.needs_file = 1,
@@ -857,8 +907,6 @@ static const struct io_op_def io_op_defs[] = {
.pollout = 1,
.needs_async_data = 1,
.async_size = sizeof(struct io_async_msghdr),
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
- IO_WQ_WORK_FS,
},
[IORING_OP_RECVMSG] = {
.needs_file = 1,
@@ -867,29 +915,23 @@ static const struct io_op_def io_op_defs[] = {
.buffer_select = 1,
.needs_async_data = 1,
.async_size = sizeof(struct io_async_msghdr),
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
- IO_WQ_WORK_FS,
},
[IORING_OP_TIMEOUT] = {
.needs_async_data = 1,
.async_size = sizeof(struct io_timeout_data),
- .work_flags = IO_WQ_WORK_MM,
},
[IORING_OP_TIMEOUT_REMOVE] = {
/* used by timeout updates' prep() */
- .work_flags = IO_WQ_WORK_MM,
},
[IORING_OP_ACCEPT] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollin = 1,
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
},
[IORING_OP_ASYNC_CANCEL] = {},
[IORING_OP_LINK_TIMEOUT] = {
.needs_async_data = 1,
.async_size = sizeof(struct io_timeout_data),
- .work_flags = IO_WQ_WORK_MM,
},
[IORING_OP_CONNECT] = {
.needs_file = 1,
@@ -897,28 +939,14 @@ static const struct io_op_def io_op_defs[] = {
.pollout = 1,
.needs_async_data = 1,
.async_size = sizeof(struct io_async_connect),
- .work_flags = IO_WQ_WORK_MM,
},
[IORING_OP_FALLOCATE] = {
.needs_file = 1,
- .work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
- },
- [IORING_OP_OPENAT] = {
- .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
- IO_WQ_WORK_FS | IO_WQ_WORK_MM,
- },
- [IORING_OP_CLOSE] = {
- .needs_file = 1,
- .needs_file_no_error = 1,
- .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
- },
- [IORING_OP_FILES_UPDATE] = {
- .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
- },
- [IORING_OP_STATX] = {
- .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
- IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
},
+ [IORING_OP_OPENAT] = {},
+ [IORING_OP_CLOSE] = {},
+ [IORING_OP_FILES_UPDATE] = {},
+ [IORING_OP_STATX] = {},
[IORING_OP_READ] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
@@ -926,7 +954,6 @@ static const struct io_op_def io_op_defs[] = {
.buffer_select = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
},
[IORING_OP_WRITE] = {
.needs_file = 1,
@@ -934,42 +961,31 @@ static const struct io_op_def io_op_defs[] = {
.pollout = 1,
.plug = 1,
.async_size = sizeof(struct io_async_rw),
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
- IO_WQ_WORK_FSIZE,
},
[IORING_OP_FADVISE] = {
.needs_file = 1,
- .work_flags = IO_WQ_WORK_BLKCG,
- },
- [IORING_OP_MADVISE] = {
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
},
+ [IORING_OP_MADVISE] = {},
[IORING_OP_SEND] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
},
[IORING_OP_RECV] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollin = 1,
.buffer_select = 1,
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
},
[IORING_OP_OPENAT2] = {
- .work_flags = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
- IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
},
[IORING_OP_EPOLL_CTL] = {
.unbound_nonreg_file = 1,
- .work_flags = IO_WQ_WORK_FILES,
},
[IORING_OP_SPLICE] = {
.needs_file = 1,
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
- .work_flags = IO_WQ_WORK_BLKCG,
},
[IORING_OP_PROVIDE_BUFFERS] = {},
[IORING_OP_REMOVE_BUFFERS] = {},
@@ -981,54 +997,49 @@ static const struct io_op_def io_op_defs[] = {
[IORING_OP_SHUTDOWN] = {
.needs_file = 1,
},
- [IORING_OP_RENAMEAT] = {
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
- IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
- },
- [IORING_OP_UNLINKAT] = {
- .work_flags = IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
- IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
- },
-};
-
-enum io_mem_account {
- ACCT_LOCKED,
- ACCT_PINNED,
+ [IORING_OP_RENAMEAT] = {},
+ [IORING_OP_UNLINKAT] = {},
};
-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
- struct task_struct *task);
-
-static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
-static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
+static bool io_disarm_next(struct io_kiocb *req);
+static void io_uring_del_task_file(unsigned long index);
+static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files);
+static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
+static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
+static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
struct io_ring_ctx *ctx);
+static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
-static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
- struct io_comp_state *cs);
+static bool io_rw_reissue(struct io_kiocb *req);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req, int nr);
static void io_double_put_req(struct io_kiocb *req);
+static void io_dismantle_req(struct io_kiocb *req);
+static void io_put_task(struct task_struct *task, int nr);
+static void io_queue_next(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
static void __io_queue_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
- struct io_uring_files_update *ip,
+ struct io_uring_rsrc_update *ip,
unsigned nr_args);
static void __io_clean_op(struct io_kiocb *req);
static struct file *io_file_get(struct io_submit_state *state,
struct io_kiocb *req, int fd, bool fixed);
-static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
-static void io_file_put_work(struct work_struct *work);
+static void __io_queue_sqe(struct io_kiocb *req);
+static void io_rsrc_put_work(struct work_struct *work);
-static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
- struct iovec **iovec, struct iov_iter *iter,
- bool needs_lock);
+static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
+ struct iov_iter *iter, bool needs_lock);
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov,
struct iov_iter *iter, bool force);
-static void io_req_drop_files(struct io_kiocb *req);
static void io_req_task_queue(struct io_kiocb *req);
+static void io_submit_flush_completions(struct io_comp_state *cs,
+ struct io_ring_ctx *ctx);
static struct kmem_cache *req_cachep;
@@ -1060,9 +1071,9 @@ static inline void io_set_resource_node(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- if (!req->fixed_file_refs) {
- req->fixed_file_refs = &ctx->file_data->node->refs;
- percpu_ref_get(req->fixed_file_refs);
+ if (!req->fixed_rsrc_refs) {
+ req->fixed_rsrc_refs = &ctx->file_data->node->refs;
+ percpu_ref_get(req->fixed_rsrc_refs);
}
}
@@ -1082,196 +1093,20 @@ static bool io_match_task(struct io_kiocb *head,
return true;
io_for_each_link(req, head) {
- if (!(req->flags & REQ_F_WORK_INITIALIZED))
- continue;
- if (req->file && req->file->f_op == &io_uring_fops)
+ if (req->flags & REQ_F_INFLIGHT)
return true;
- if ((req->work.flags & IO_WQ_WORK_FILES) &&
- req->work.identity->files == files)
+ if (req->task->files == files)
return true;
}
return false;
}
-static void io_sq_thread_drop_mm_files(void)
-{
- struct files_struct *files = current->files;
- struct mm_struct *mm = current->mm;
-
- if (mm) {
- kthread_unuse_mm(mm);
- mmput(mm);
- current->mm = NULL;
- }
- if (files) {
- struct nsproxy *nsproxy = current->nsproxy;
-
- task_lock(current);
- current->files = NULL;
- current->nsproxy = NULL;
- task_unlock(current);
- put_files_struct(files);
- put_nsproxy(nsproxy);
- }
-}
-
-static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
-{
- if (current->flags & PF_EXITING)
- return -EFAULT;
-
- if (!current->files) {
- struct files_struct *files;
- struct nsproxy *nsproxy;
-
- task_lock(ctx->sqo_task);
- files = ctx->sqo_task->files;
- if (!files) {
- task_unlock(ctx->sqo_task);
- return -EOWNERDEAD;
- }
- atomic_inc(&files->count);
- get_nsproxy(ctx->sqo_task->nsproxy);
- nsproxy = ctx->sqo_task->nsproxy;
- task_unlock(ctx->sqo_task);
-
- task_lock(current);
- current->files = files;
- current->nsproxy = nsproxy;
- task_unlock(current);
- }
- return 0;
-}
-
-static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
-{
- struct mm_struct *mm;
-
- if (current->flags & PF_EXITING)
- return -EFAULT;
- if (current->mm)
- return 0;
-
- /* Should never happen */
- if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
- return -EFAULT;
-
- task_lock(ctx->sqo_task);
- mm = ctx->sqo_task->mm;
- if (unlikely(!mm || !mmget_not_zero(mm)))
- mm = NULL;
- task_unlock(ctx->sqo_task);
-
- if (mm) {
- kthread_use_mm(mm);
- return 0;
- }
-
- return -EFAULT;
-}
-
-static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
-{
- const struct io_op_def *def = &io_op_defs[req->opcode];
- int ret;
-
- if (def->work_flags & IO_WQ_WORK_MM) {
- ret = __io_sq_thread_acquire_mm(ctx);
- if (unlikely(ret))
- return ret;
- }
-
- if (def->needs_file || (def->work_flags & IO_WQ_WORK_FILES)) {
- ret = __io_sq_thread_acquire_files(ctx);
- if (unlikely(ret))
- return ret;
- }
-
- return 0;
-}
-
-static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
- struct cgroup_subsys_state **cur_css)
-
-{
-#ifdef CONFIG_BLK_CGROUP
- /* puts the old one when swapping */
- if (*cur_css != ctx->sqo_blkcg_css) {
- kthread_associate_blkcg(ctx->sqo_blkcg_css);
- *cur_css = ctx->sqo_blkcg_css;
- }
-#endif
-}
-
-static void io_sq_thread_unassociate_blkcg(void)
-{
-#ifdef CONFIG_BLK_CGROUP
- kthread_associate_blkcg(NULL);
-#endif
-}
-
static inline void req_set_fail_links(struct io_kiocb *req)
{
if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
}
-/*
- * None of these are dereferenced, they are simply used to check if any of
- * them have changed. If we're under current and check they are still the
- * same, we're fine to grab references to them for actual out-of-line use.
- */
-static void io_init_identity(struct io_identity *id)
-{
- id->files = current->files;
- id->mm = current->mm;
-#ifdef CONFIG_BLK_CGROUP
- rcu_read_lock();
- id->blkcg_css = blkcg_css();
- rcu_read_unlock();
-#endif
- id->creds = current_cred();
- id->nsproxy = current->nsproxy;
- id->fs = current->fs;
- id->fsize = rlimit(RLIMIT_FSIZE);
-#ifdef CONFIG_AUDIT
- id->loginuid = current->loginuid;
- id->sessionid = current->sessionid;
-#endif
- refcount_set(&id->count, 1);
-}
-
-static inline void __io_req_init_async(struct io_kiocb *req)
-{
- memset(&req->work, 0, sizeof(req->work));
- req->flags |= REQ_F_WORK_INITIALIZED;
-}
-
-/*
- * Note: must call io_req_init_async() for the first time you
- * touch any members of io_wq_work.
- */
-static inline void io_req_init_async(struct io_kiocb *req)
-{
- struct io_uring_task *tctx = current->io_uring;
-
- if (req->flags & REQ_F_WORK_INITIALIZED)
- return;
-
- __io_req_init_async(req);
-
- /* Grab a ref if this isn't our static identity */
- req->work.identity = tctx->identity;
- if (tctx->identity != &tctx->__identity)
- refcount_inc(&req->work.identity->count);
-}
-
-static inline bool io_async_submit(struct io_ring_ctx *ctx)
-{
- return ctx->flags & IORING_SETUP_SQPOLL;
-}
-
static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
@@ -1293,10 +1128,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
if (!ctx)
return NULL;
- ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
- if (!ctx->fallback_req)
- goto err;
-
/*
* Use 5 bits less than the max cq entries, that should give us around
* 32 entries per hash list if totally full and uniformly spread.
@@ -1322,9 +1153,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
init_waitqueue_head(&ctx->cq_wait);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->ref_comp);
- init_completion(&ctx->sq_thread_comp);
- idr_init(&ctx->io_buffer_idr);
- idr_init(&ctx->personality_idr);
+ xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
+ xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
spin_lock_init(&ctx->completion_lock);
@@ -1333,12 +1163,15 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->timeout_list);
spin_lock_init(&ctx->inflight_lock);
INIT_LIST_HEAD(&ctx->inflight_list);
- INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
- init_llist_head(&ctx->file_put_llist);
+ spin_lock_init(&ctx->rsrc_ref_lock);
+ INIT_LIST_HEAD(&ctx->rsrc_ref_list);
+ INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
+ init_llist_head(&ctx->rsrc_put_llist);
+ INIT_LIST_HEAD(&ctx->tctx_list);
+ INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
+ INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
return ctx;
err:
- if (ctx->fallback_req)
- kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx->cancel_hash);
kfree(ctx);
return NULL;
@@ -1356,184 +1189,17 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
return false;
}
-static void __io_commit_cqring(struct io_ring_ctx *ctx)
-{
- struct io_rings *rings = ctx->rings;
-
- /* order cqe stores with ring update */
- smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
-}
-
-static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
-{
- if (req->work.identity == &tctx->__identity)
- return;
- if (refcount_dec_and_test(&req->work.identity->count))
- kfree(req->work.identity);
-}
-
-static void io_req_clean_work(struct io_kiocb *req)
+static void io_req_track_inflight(struct io_kiocb *req)
{
- if (!(req->flags & REQ_F_WORK_INITIALIZED))
- return;
-
- req->flags &= ~REQ_F_WORK_INITIALIZED;
-
- if (req->work.flags & IO_WQ_WORK_MM) {
- mmdrop(req->work.identity->mm);
- req->work.flags &= ~IO_WQ_WORK_MM;
- }
-#ifdef CONFIG_BLK_CGROUP
- if (req->work.flags & IO_WQ_WORK_BLKCG) {
- css_put(req->work.identity->blkcg_css);
- req->work.flags &= ~IO_WQ_WORK_BLKCG;
- }
-#endif
- if (req->work.flags & IO_WQ_WORK_CREDS) {
- put_cred(req->work.identity->creds);
- req->work.flags &= ~IO_WQ_WORK_CREDS;
- }
- if (req->work.flags & IO_WQ_WORK_FS) {
- struct fs_struct *fs = req->work.identity->fs;
-
- spin_lock(&req->work.identity->fs->lock);
- if (--fs->users)
- fs = NULL;
- spin_unlock(&req->work.identity->fs->lock);
- if (fs)
- free_fs_struct(fs);
- req->work.flags &= ~IO_WQ_WORK_FS;
- }
- if (req->flags & REQ_F_INFLIGHT)
- io_req_drop_files(req);
-
- io_put_identity(req->task->io_uring, req);
-}
-
-/*
- * Create a private copy of io_identity, since some fields don't match
- * the current context.
- */
-static bool io_identity_cow(struct io_kiocb *req)
-{
- struct io_uring_task *tctx = current->io_uring;
- const struct cred *creds = NULL;
- struct io_identity *id;
-
- if (req->work.flags & IO_WQ_WORK_CREDS)
- creds = req->work.identity->creds;
-
- id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
- if (unlikely(!id)) {
- req->work.flags |= IO_WQ_WORK_CANCEL;
- return false;
- }
-
- /*
- * We can safely just re-init the creds we copied Either the field
- * matches the current one, or we haven't grabbed it yet. The only
- * exception is ->creds, through registered personalities, so handle
- * that one separately.
- */
- io_init_identity(id);
- if (creds)
- id->creds = creds;
-
- /* add one for this request */
- refcount_inc(&id->count);
-
- /* drop tctx and req identity references, if needed */
- if (tctx->identity != &tctx->__identity &&
- refcount_dec_and_test(&tctx->identity->count))
- kfree(tctx->identity);
- if (req->work.identity != &tctx->__identity &&
- refcount_dec_and_test(&req->work.identity->count))
- kfree(req->work.identity);
-
- req->work.identity = id;
- tctx->identity = id;
- return true;
-}
-
-static bool io_grab_identity(struct io_kiocb *req)
-{
- const struct io_op_def *def = &io_op_defs[req->opcode];
- struct io_identity *id = req->work.identity;
struct io_ring_ctx *ctx = req->ctx;
- if (def->work_flags & IO_WQ_WORK_FSIZE) {
- if (id->fsize != rlimit(RLIMIT_FSIZE))
- return false;
- req->work.flags |= IO_WQ_WORK_FSIZE;
- }
-#ifdef CONFIG_BLK_CGROUP
- if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
- (def->work_flags & IO_WQ_WORK_BLKCG)) {
- rcu_read_lock();
- if (id->blkcg_css != blkcg_css()) {
- rcu_read_unlock();
- return false;
- }
- /*
- * This should be rare, either the cgroup is dying or the task
- * is moving cgroups. Just punt to root for the handful of ios.
- */
- if (css_tryget_online(id->blkcg_css))
- req->work.flags |= IO_WQ_WORK_BLKCG;
- rcu_read_unlock();
- }
-#endif
- if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
- if (id->creds != current_cred())
- return false;
- get_cred(id->creds);
- req->work.flags |= IO_WQ_WORK_CREDS;
- }
-#ifdef CONFIG_AUDIT
- if (!uid_eq(current->loginuid, id->loginuid) ||
- current->sessionid != id->sessionid)
- return false;
-#endif
- if (!(req->work.flags & IO_WQ_WORK_FS) &&
- (def->work_flags & IO_WQ_WORK_FS)) {
- if (current->fs != id->fs)
- return false;
- spin_lock(&id->fs->lock);
- if (!id->fs->in_exec) {
- id->fs->users++;
- req->work.flags |= IO_WQ_WORK_FS;
- } else {
- req->work.flags |= IO_WQ_WORK_CANCEL;
- }
- spin_unlock(&current->fs->lock);
- }
- if (!(req->work.flags & IO_WQ_WORK_FILES) &&
- (def->work_flags & IO_WQ_WORK_FILES) &&
- !(req->flags & REQ_F_NO_FILE_TABLE)) {
- if (id->files != current->files ||
- id->nsproxy != current->nsproxy)
- return false;
- atomic_inc(&id->files->count);
- get_nsproxy(id->nsproxy);
-
- if (!(req->flags & REQ_F_INFLIGHT)) {
- req->flags |= REQ_F_INFLIGHT;
-
- spin_lock_irq(&ctx->inflight_lock);
- list_add(&req->inflight_entry, &ctx->inflight_list);
- spin_unlock_irq(&ctx->inflight_lock);
- }
- req->work.flags |= IO_WQ_WORK_FILES;
- }
- if (!(req->work.flags & IO_WQ_WORK_MM) &&
- (def->work_flags & IO_WQ_WORK_MM)) {
- if (id->mm != current->mm)
- return false;
- mmgrab(id->mm);
- req->work.flags |= IO_WQ_WORK_MM;
- }
+ if (!(req->flags & REQ_F_INFLIGHT)) {
+ req->flags |= REQ_F_INFLIGHT;
- return true;
+ spin_lock_irq(&ctx->inflight_lock);
+ list_add(&req->inflight_entry, &ctx->inflight_list);
+ spin_unlock_irq(&ctx->inflight_lock);
+ }
}
static void io_prep_async_work(struct io_kiocb *req)
@@ -1541,7 +1207,8 @@ static void io_prep_async_work(struct io_kiocb *req)
const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
- io_req_init_async(req);
+ if (!req->work.creds)
+ req->work.creds = get_current_cred();
if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT;
@@ -1553,17 +1220,6 @@ static void io_prep_async_work(struct io_kiocb *req)
if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
-
- /* if we fail grabbing identity, we must COW, regrab, and retry */
- if (io_grab_identity(req))
- return;
-
- if (!io_identity_cow(req))
- return;
-
- /* can't fail at this point */
- if (!io_grab_identity(req))
- WARN_ON(1);
}
static void io_prep_async_link(struct io_kiocb *req)
@@ -1574,25 +1230,20 @@ static void io_prep_async_link(struct io_kiocb *req)
io_prep_async_work(cur);
}
-static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
+static void io_queue_async_work(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link = io_prep_linked_timeout(req);
+ struct io_uring_task *tctx = req->task->io_uring;
+
+ BUG_ON(!tctx);
+ BUG_ON(!tctx->io_wq);
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags);
- io_wq_enqueue(ctx->io_wq, &req->work);
- return link;
-}
-
-static void io_queue_async_work(struct io_kiocb *req)
-{
- struct io_kiocb *link;
-
/* init ->work of the whole link before punting */
io_prep_async_link(req);
- link = __io_queue_async_work(req);
-
+ io_wq_enqueue(tctx->io_wq, &req->work);
if (link)
io_queue_linked_timeout(link);
}
@@ -1685,7 +1336,9 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
static void io_commit_cqring(struct io_ring_ctx *ctx)
{
io_flush_timeouts(ctx);
- __io_commit_cqring(ctx);
+
+ /* order cqe stores with ring update */
+ smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
if (unlikely(!list_empty(&ctx->defer_list)))
__io_queue_deferred(ctx);
@@ -1698,21 +1351,25 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
}
+static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
+{
+ return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
+}
+
static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
unsigned tail;
- tail = ctx->cached_cq_tail;
/*
* writes to the cq entry need to come after reading head; the
* control dependency is enough as we're using WRITE_ONCE to
* fill the cq entry
*/
- if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
+ if (__io_cqring_events(ctx) == rings->cq_ring_entries)
return NULL;
- ctx->cached_cq_tail++;
+ tail = ctx->cached_cq_tail++;
return &rings->cqes[tail & ctx->cq_mask];
}
@@ -1727,11 +1384,6 @@ static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
return io_wq_current_is_worker();
}
-static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx)
-{
- return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
-}
-
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
/* see waitqueue_active() comment */
@@ -1826,18 +1478,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
return all_flushed;
}
-static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
struct task_struct *tsk,
struct files_struct *files)
{
+ bool ret = true;
+
if (test_bit(0, &ctx->cq_check_overflow)) {
/* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock);
- __io_cqring_overflow_flush(ctx, force, tsk, files);
+ ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock);
}
+
+ return ret;
}
static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
@@ -1885,100 +1541,114 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
__io_cqring_fill_event(req, res, 0);
}
-static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
+static void io_req_complete_post(struct io_kiocb *req, long res,
+ unsigned int cflags)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
__io_cqring_fill_event(req, res, cflags);
- io_commit_cqring(ctx);
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
-
- io_cqring_ev_posted(ctx);
-}
-
-static void io_submit_flush_completions(struct io_comp_state *cs)
-{
- struct io_ring_ctx *ctx = cs->ctx;
-
- spin_lock_irq(&ctx->completion_lock);
- while (!list_empty(&cs->list)) {
- struct io_kiocb *req;
-
- req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
- list_del(&req->compl.list);
- __io_cqring_fill_event(req, req->result, req->compl.cflags);
+ /*
+ * If we're the last reference to this request, add to our locked
+ * free_list cache.
+ */
+ if (refcount_dec_and_test(&req->refs)) {
+ struct io_comp_state *cs = &ctx->submit_state.comp;
- /*
- * io_free_req() doesn't care about completion_lock unless one
- * of these flags is set. REQ_F_WORK_INITIALIZED is in the list
- * because of a potential deadlock with req->work.fs->lock
- */
- if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT
- |REQ_F_WORK_INITIALIZED)) {
- spin_unlock_irq(&ctx->completion_lock);
- io_put_req(req);
- spin_lock_irq(&ctx->completion_lock);
- } else {
- io_put_req(req);
+ if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
+ if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
+ io_disarm_next(req);
+ if (req->link) {
+ io_req_task_queue(req->link);
+ req->link = NULL;
+ }
}
+ io_dismantle_req(req);
+ io_put_task(req->task, 1);
+ list_add(&req->compl.list, &cs->locked_free_list);
+ cs->locked_free_nr++;
+ } else {
+ if (!percpu_ref_tryget(&ctx->refs))
+ req = NULL;
}
io_commit_cqring(ctx);
- spin_unlock_irq(&ctx->completion_lock);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
- io_cqring_ev_posted(ctx);
- cs->nr = 0;
+ if (req) {
+ io_cqring_ev_posted(ctx);
+ percpu_ref_put(&ctx->refs);
+ }
}
-static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
- struct io_comp_state *cs)
+static void io_req_complete_state(struct io_kiocb *req, long res,
+ unsigned int cflags)
{
- if (!cs) {
- io_cqring_add_event(req, res, cflags);
- io_put_req(req);
- } else {
- io_clean_op(req);
- req->result = res;
- req->compl.cflags = cflags;
- list_add_tail(&req->compl.list, &cs->list);
- if (++cs->nr >= 32)
- io_submit_flush_completions(cs);
- }
+ io_clean_op(req);
+ req->result = res;
+ req->compl.cflags = cflags;
+ req->flags |= REQ_F_COMPLETE_INLINE;
}
-static void io_req_complete(struct io_kiocb *req, long res)
+static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
+ long res, unsigned cflags)
{
- __io_req_complete(req, res, 0, NULL);
+ if (issue_flags & IO_URING_F_COMPLETE_DEFER)
+ io_req_complete_state(req, res, cflags);
+ else
+ io_req_complete_post(req, res, cflags);
}
-static inline bool io_is_fallback_req(struct io_kiocb *req)
+static inline void io_req_complete(struct io_kiocb *req, long res)
{
- return req == (struct io_kiocb *)
- ((unsigned long) req->ctx->fallback_req & ~1UL);
+ __io_req_complete(req, 0, res, 0);
}
-static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
+static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
{
- struct io_kiocb *req;
+ struct io_submit_state *state = &ctx->submit_state;
+ struct io_comp_state *cs = &state->comp;
+ struct io_kiocb *req = NULL;
- req = ctx->fallback_req;
- if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
- return req;
+ /*
+ * If we have more than a batch's worth of requests in our IRQ side
+ * locked cache, grab the lock and move them over to our submission
+ * side cache.
+ */
+ if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
+ spin_lock_irq(&ctx->completion_lock);
+ list_splice_init(&cs->locked_free_list, &cs->free_list);
+ cs->locked_free_nr = 0;
+ spin_unlock_irq(&ctx->completion_lock);
+ }
- return NULL;
+ while (!list_empty(&cs->free_list)) {
+ req = list_first_entry(&cs->free_list, struct io_kiocb,
+ compl.list);
+ list_del(&req->compl.list);
+ state->reqs[state->free_reqs++] = req;
+ if (state->free_reqs == ARRAY_SIZE(state->reqs))
+ break;
+ }
+
+ return req != NULL;
}
-static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
- struct io_submit_state *state)
+static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
{
+ struct io_submit_state *state = &ctx->submit_state;
+
+ BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
+
if (!state->free_reqs) {
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
- size_t sz;
int ret;
- sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
- ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
+ if (io_flush_cached_reqs(ctx))
+ goto got_req;
+
+ ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
+ state->reqs);
/*
* Bulk alloc is all-or-nothing. If we fail to get a batch,
@@ -1987,16 +1657,14 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
if (unlikely(ret <= 0)) {
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
if (!state->reqs[0])
- goto fallback;
+ return NULL;
ret = 1;
}
state->free_reqs = ret;
}
-
+got_req:
state->free_reqs--;
return state->reqs[state->free_reqs];
-fallback:
- return io_get_fallback_req(ctx);
}
static inline void io_put_file(struct io_kiocb *req, struct file *file,
@@ -2014,27 +1682,43 @@ static void io_dismantle_req(struct io_kiocb *req)
kfree(req->async_data);
if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
- if (req->fixed_file_refs)
- percpu_ref_put(req->fixed_file_refs);
- io_req_clean_work(req);
+ if (req->fixed_rsrc_refs)
+ percpu_ref_put(req->fixed_rsrc_refs);
+ if (req->work.creds) {
+ put_cred(req->work.creds);
+ req->work.creds = NULL;
+ }
+
+ if (req->flags & REQ_F_INFLIGHT) {
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ list_del(&req->inflight_entry);
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ req->flags &= ~REQ_F_INFLIGHT;
+ }
+}
+
+/* must to be called somewhat shortly after putting a request */
+static inline void io_put_task(struct task_struct *task, int nr)
+{
+ struct io_uring_task *tctx = task->io_uring;
+
+ percpu_counter_sub(&tctx->inflight, nr);
+ if (unlikely(atomic_read(&tctx->in_idle)))
+ wake_up(&tctx->wait);
+ put_task_struct_many(task, nr);
}
static void __io_free_req(struct io_kiocb *req)
{
- struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx;
io_dismantle_req(req);
+ io_put_task(req->task, 1);
- percpu_counter_dec(&tctx->inflight);
- if (atomic_read(&tctx->in_idle))
- wake_up(&tctx->wait);
- put_task_struct(req->task);
-
- if (likely(!io_is_fallback_req(req)))
- kmem_cache_free(req_cachep, req);
- else
- clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
+ kmem_cache_free(req_cachep, req);
percpu_ref_put(&ctx->refs);
}
@@ -2046,15 +1730,11 @@ static inline void io_remove_next_linked(struct io_kiocb *req)
nxt->link = NULL;
}
-static void io_kill_linked_timeout(struct io_kiocb *req)
+static bool io_kill_linked_timeout(struct io_kiocb *req)
+ __must_hold(&req->ctx->completion_lock)
{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *link;
+ struct io_kiocb *link = req->link;
bool cancelled = false;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->completion_lock, flags);
- link = req->link;
/*
* Can happen if a linked timeout fired and link had been like
@@ -2069,58 +1749,48 @@ static void io_kill_linked_timeout(struct io_kiocb *req)
ret = hrtimer_try_to_cancel(&io->timer);
if (ret != -1) {
io_cqring_fill_event(link, -ECANCELED);
- io_commit_cqring(ctx);
+ io_put_req_deferred(link, 1);
cancelled = true;
}
}
req->flags &= ~REQ_F_LINK_TIMEOUT;
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
-
- if (cancelled) {
- io_cqring_ev_posted(ctx);
- io_put_req(link);
- }
+ return cancelled;
}
-
static void io_fail_links(struct io_kiocb *req)
+ __must_hold(&req->ctx->completion_lock)
{
- struct io_kiocb *link, *nxt;
- struct io_ring_ctx *ctx = req->ctx;
- unsigned long flags;
+ struct io_kiocb *nxt, *link = req->link;
- spin_lock_irqsave(&ctx->completion_lock, flags);
- link = req->link;
req->link = NULL;
-
while (link) {
nxt = link->link;
link->link = NULL;
trace_io_uring_fail_link(req, link);
io_cqring_fill_event(link, -ECANCELED);
-
- /*
- * It's ok to free under spinlock as they're not linked anymore,
- * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
- * work.fs->lock.
- */
- if (link->flags & REQ_F_WORK_INITIALIZED)
- io_put_req_deferred(link, 2);
- else
- io_double_put_req(link);
+ io_put_req_deferred(link, 2);
link = nxt;
}
- io_commit_cqring(ctx);
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
+}
- io_cqring_ev_posted(ctx);
+static bool io_disarm_next(struct io_kiocb *req)
+ __must_hold(&req->ctx->completion_lock)
+{
+ bool posted = false;
+
+ if (likely(req->flags & REQ_F_LINK_TIMEOUT))
+ posted = io_kill_linked_timeout(req);
+ if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
+ posted |= (req->link != NULL);
+ io_fail_links(req);
+ }
+ return posted;
}
static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
{
- if (req->flags & REQ_F_LINK_TIMEOUT)
- io_kill_linked_timeout(req);
+ struct io_kiocb *nxt;
/*
* If LINK is set, we have dependent requests in this chain. If we
@@ -2128,23 +1798,127 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
* dependencies to the next request. In case of failure, fail the rest
* of the chain.
*/
- if (likely(!(req->flags & REQ_F_FAIL_LINK))) {
- struct io_kiocb *nxt = req->link;
+ if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) {
+ struct io_ring_ctx *ctx = req->ctx;
+ unsigned long flags;
+ bool posted;
- req->link = NULL;
- return nxt;
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ posted = io_disarm_next(req);
+ if (posted)
+ io_commit_cqring(req->ctx);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ if (posted)
+ io_cqring_ev_posted(ctx);
}
- io_fail_links(req);
- return NULL;
+ nxt = req->link;
+ req->link = NULL;
+ return nxt;
}
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
{
- if (likely(!(req->link) && !(req->flags & REQ_F_LINK_TIMEOUT)))
+ if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
return NULL;
return __io_req_find_next(req);
}
+static void ctx_flush_and_put(struct io_ring_ctx *ctx)
+{
+ if (!ctx)
+ return;
+ if (ctx->submit_state.comp.nr) {
+ mutex_lock(&ctx->uring_lock);
+ io_submit_flush_completions(&ctx->submit_state.comp, ctx);
+ mutex_unlock(&ctx->uring_lock);
+ }
+ percpu_ref_put(&ctx->refs);
+}
+
+static bool __tctx_task_work(struct io_uring_task *tctx)
+{
+ struct io_ring_ctx *ctx = NULL;
+ struct io_wq_work_list list;
+ struct io_wq_work_node *node;
+
+ if (wq_list_empty(&tctx->task_list))
+ return false;
+
+ spin_lock_irq(&tctx->task_lock);
+ list = tctx->task_list;
+ INIT_WQ_LIST(&tctx->task_list);
+ spin_unlock_irq(&tctx->task_lock);
+
+ node = list.first;
+ while (node) {
+ struct io_wq_work_node *next = node->next;
+ struct io_kiocb *req;
+
+ req = container_of(node, struct io_kiocb, io_task_work.node);
+ if (req->ctx != ctx) {
+ ctx_flush_and_put(ctx);
+ ctx = req->ctx;
+ percpu_ref_get(&ctx->refs);
+ }
+
+ req->task_work.func(&req->task_work);
+ node = next;
+ }
+
+ ctx_flush_and_put(ctx);
+ return list.first != NULL;
+}
+
+static void tctx_task_work(struct callback_head *cb)
+{
+ struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
+
+ clear_bit(0, &tctx->task_state);
+
+ while (__tctx_task_work(tctx))
+ cond_resched();
+}
+
+static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
+ enum task_work_notify_mode notify)
+{
+ struct io_uring_task *tctx = tsk->io_uring;
+ struct io_wq_work_node *node, *prev;
+ unsigned long flags;
+ int ret;
+
+ WARN_ON_ONCE(!tctx);
+
+ spin_lock_irqsave(&tctx->task_lock, flags);
+ wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
+ spin_unlock_irqrestore(&tctx->task_lock, flags);
+
+ /* task_work already pending, we're done */
+ if (test_bit(0, &tctx->task_state) ||
+ test_and_set_bit(0, &tctx->task_state))
+ return 0;
+
+ if (!task_work_add(tsk, &tctx->task_work, notify))
+ return 0;
+
+ /*
+ * Slow path - we failed, find and delete work. if the work is not
+ * in the list, it got run and we're fine.
+ */
+ ret = 0;
+ spin_lock_irqsave(&tctx->task_lock, flags);
+ wq_list_for_each(node, prev, &tctx->task_list) {
+ if (&req->io_task_work.node == node) {
+ wq_list_del(&tctx->task_list, node, prev);
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&tctx->task_lock, flags);
+ clear_bit(0, &tctx->task_state);
+ return ret;
+}
+
static int io_req_task_work_add(struct io_kiocb *req)
{
struct task_struct *tsk = req->task;
@@ -2165,13 +1939,53 @@ static int io_req_task_work_add(struct io_kiocb *req)
if (!(ctx->flags & IORING_SETUP_SQPOLL))
notify = TWA_SIGNAL;
- ret = task_work_add(tsk, &req->task_work, notify);
+ ret = io_task_work_add(tsk, req, notify);
if (!ret)
wake_up_process(tsk);
return ret;
}
+static bool io_run_task_work_head(struct callback_head **work_head)
+{
+ struct callback_head *work, *next;
+ bool executed = false;
+
+ do {
+ work = xchg(work_head, NULL);
+ if (!work)
+ break;
+
+ do {
+ next = work->next;
+ work->func(work);
+ work = next;
+ cond_resched();
+ } while (work);
+ executed = true;
+ } while (1);
+
+ return executed;
+}
+
+static void io_task_work_add_head(struct callback_head **work_head,
+ struct callback_head *task_work)
+{
+ struct callback_head *head;
+
+ do {
+ head = READ_ONCE(*work_head);
+ task_work->next = head;
+ } while (cmpxchg(work_head, head, task_work) != head);
+}
+
+static void io_req_task_work_add_fallback(struct io_kiocb *req,
+ task_work_func_t cb)
+{
+ init_task_work(&req->task_work, cb);
+ io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
+}
+
static void __io_req_task_cancel(struct io_kiocb *req, int error)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -2191,7 +2005,9 @@ static void io_req_task_cancel(struct callback_head *cb)
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct io_ring_ctx *ctx = req->ctx;
- __io_req_task_cancel(req, -ECANCELED);
+ mutex_lock(&ctx->uring_lock);
+ __io_req_task_cancel(req, req->result);
+ mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
}
@@ -2199,46 +2015,45 @@ static void __io_req_task_submit(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
+ /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
mutex_lock(&ctx->uring_lock);
- if (!ctx->sqo_dead &&
- !__io_sq_thread_acquire_mm(ctx) &&
- !__io_sq_thread_acquire_files(ctx))
- __io_queue_sqe(req, NULL);
+ if (!(current->flags & PF_EXITING) && !current->in_execve)
+ __io_queue_sqe(req);
else
__io_req_task_cancel(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);
-
- if (ctx->flags & IORING_SETUP_SQPOLL)
- io_sq_thread_drop_mm_files();
}
static void io_req_task_submit(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
- struct io_ring_ctx *ctx = req->ctx;
__io_req_task_submit(req);
- percpu_ref_put(&ctx->refs);
}
static void io_req_task_queue(struct io_kiocb *req)
{
int ret;
- init_task_work(&req->task_work, io_req_task_submit);
- percpu_ref_get(&req->ctx->refs);
-
+ req->task_work.func = io_req_task_submit;
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
- struct task_struct *tsk;
-
- init_task_work(&req->task_work, io_req_task_cancel);
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, TWA_NONE);
- wake_up_process(tsk);
+ req->result = -ECANCELED;
+ percpu_ref_get(&req->ctx->refs);
+ io_req_task_work_add_fallback(req, io_req_task_cancel);
}
}
+static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
+{
+ percpu_ref_get(&req->ctx->refs);
+ req->result = ret;
+ req->task_work.func = io_req_task_cancel;
+
+ if (unlikely(io_req_task_work_add(req)))
+ io_req_task_work_add_fallback(req, io_req_task_cancel);
+}
+
static inline void io_queue_next(struct io_kiocb *req)
{
struct io_kiocb *nxt = io_req_find_next(req);
@@ -2254,70 +2069,75 @@ static void io_free_req(struct io_kiocb *req)
}
struct req_batch {
- void *reqs[IO_IOPOLL_BATCH];
- int to_free;
-
struct task_struct *task;
int task_refs;
+ int ctx_refs;
};
static inline void io_init_req_batch(struct req_batch *rb)
{
- rb->to_free = 0;
rb->task_refs = 0;
+ rb->ctx_refs = 0;
rb->task = NULL;
}
-static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
- struct req_batch *rb)
-{
- kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
- percpu_ref_put_many(&ctx->refs, rb->to_free);
- rb->to_free = 0;
-}
-
static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
struct req_batch *rb)
{
- if (rb->to_free)
- __io_req_free_batch_flush(ctx, rb);
- if (rb->task) {
- struct io_uring_task *tctx = rb->task->io_uring;
-
- percpu_counter_sub(&tctx->inflight, rb->task_refs);
- if (atomic_read(&tctx->in_idle))
- wake_up(&tctx->wait);
- put_task_struct_many(rb->task, rb->task_refs);
- rb->task = NULL;
- }
+ if (rb->task)
+ io_put_task(rb->task, rb->task_refs);
+ if (rb->ctx_refs)
+ percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
}
-static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
+static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
+ struct io_submit_state *state)
{
- if (unlikely(io_is_fallback_req(req))) {
- io_free_req(req);
- return;
- }
io_queue_next(req);
if (req->task != rb->task) {
- if (rb->task) {
- struct io_uring_task *tctx = rb->task->io_uring;
-
- percpu_counter_sub(&tctx->inflight, rb->task_refs);
- if (atomic_read(&tctx->in_idle))
- wake_up(&tctx->wait);
- put_task_struct_many(rb->task, rb->task_refs);
- }
+ if (rb->task)
+ io_put_task(rb->task, rb->task_refs);
rb->task = req->task;
rb->task_refs = 0;
}
rb->task_refs++;
+ rb->ctx_refs++;
io_dismantle_req(req);
- rb->reqs[rb->to_free++] = req;
- if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
- __io_req_free_batch_flush(req->ctx, rb);
+ if (state->free_reqs != ARRAY_SIZE(state->reqs))
+ state->reqs[state->free_reqs++] = req;
+ else
+ list_add(&req->compl.list, &state->comp.free_list);
+}
+
+static void io_submit_flush_completions(struct io_comp_state *cs,
+ struct io_ring_ctx *ctx)
+{
+ int i, nr = cs->nr;
+ struct io_kiocb *req;
+ struct req_batch rb;
+
+ io_init_req_batch(&rb);
+ spin_lock_irq(&ctx->completion_lock);
+ for (i = 0; i < nr; i++) {
+ req = cs->reqs[i];
+ __io_cqring_fill_event(req, req->result, req->compl.cflags);
+ }
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ io_cqring_ev_posted(ctx);
+ for (i = 0; i < nr; i++) {
+ req = cs->reqs[i];
+
+ /* submission and completion refs */
+ if (refcount_sub_and_test(2, &req->refs))
+ io_req_free_batch(&rb, req, &ctx->submit_state);
+ }
+
+ io_req_free_batch_finish(ctx, &rb);
+ cs->nr = 0;
}
/*
@@ -2352,15 +2172,10 @@ static void io_free_req_deferred(struct io_kiocb *req)
{
int ret;
- init_task_work(&req->task_work, io_put_req_deferred_cb);
+ req->task_work.func = io_put_req_deferred_cb;
ret = io_req_task_work_add(req);
- if (unlikely(ret)) {
- struct task_struct *tsk;
-
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, TWA_NONE);
- wake_up_process(tsk);
- }
+ if (unlikely(ret))
+ io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
}
static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
@@ -2369,22 +2184,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
io_free_req_deferred(req);
}
-static struct io_wq_work *io_steal_work(struct io_kiocb *req)
-{
- struct io_kiocb *nxt;
-
- /*
- * A ref is owned by io-wq in which context we're. So, if that's the
- * last one, it's safe to steal next work. False negatives are Ok,
- * it just will be re-punted async in io_put_work()
- */
- if (refcount_read(&req->refs) != 1)
- return NULL;
-
- nxt = io_req_find_next(req);
- return nxt ? &nxt->work : NULL;
-}
-
static void io_double_put_req(struct io_kiocb *req)
{
/* drop both submit and complete references */
@@ -2443,17 +2242,6 @@ static inline bool io_run_task_work(void)
return false;
}
-static void io_iopoll_queue(struct list_head *again)
-{
- struct io_kiocb *req;
-
- do {
- req = list_first_entry(again, struct io_kiocb, inflight_entry);
- list_del(&req->inflight_entry);
- __io_complete_rw(req, -EAGAIN, 0, NULL);
- } while (!list_empty(again));
-}
-
/*
* Find and free completed poll iocbs
*/
@@ -2462,7 +2250,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
{
struct req_batch rb;
struct io_kiocb *req;
- LIST_HEAD(again);
/* order with ->result store in io_complete_rw_iopoll() */
smp_rmb();
@@ -2472,13 +2259,13 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
int cflags = 0;
req = list_first_entry(done, struct io_kiocb, inflight_entry);
+ list_del(&req->inflight_entry);
+
if (READ_ONCE(req->result) == -EAGAIN) {
- req->result = 0;
req->iopoll_completed = 0;
- list_move_tail(&req->inflight_entry, &again);
- continue;
+ if (io_rw_reissue(req))
+ continue;
}
- list_del(&req->inflight_entry);
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_rw_kbuf(req);
@@ -2487,15 +2274,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
(*nr_events)++;
if (refcount_dec_and_test(&req->refs))
- io_req_free_batch(&rb, req);
+ io_req_free_batch(&rb, req, &ctx->submit_state);
}
io_commit_cqring(ctx);
io_cqring_ev_posted_iopoll(ctx);
io_req_free_batch_finish(ctx, &rb);
-
- if (!list_empty(&again))
- io_iopoll_queue(&again);
}
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -2662,34 +2446,16 @@ static void kiocb_end_write(struct io_kiocb *req)
file_end_write(req->file);
}
-static void io_complete_rw_common(struct kiocb *kiocb, long res,
- struct io_comp_state *cs)
-{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- int cflags = 0;
-
- if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
-
- if (res != req->result)
- req_set_fail_links(req);
- if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_rw_kbuf(req);
- __io_req_complete(req, res, cflags, cs);
-}
-
#ifdef CONFIG_BLOCK
-static bool io_resubmit_prep(struct io_kiocb *req, int error)
+static bool io_resubmit_prep(struct io_kiocb *req)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
- ssize_t ret = -ECANCELED;
+ int rw, ret;
struct iov_iter iter;
- int rw;
- if (error) {
- ret = error;
- goto end_req;
- }
+ /* already prepared */
+ if (req->async_data)
+ return true;
switch (req->opcode) {
case IORING_OP_READV:
@@ -2705,69 +2471,95 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
default:
printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
req->opcode);
- goto end_req;
+ return false;
}
- if (!req->async_data) {
- ret = io_import_iovec(rw, req, &iovec, &iter, false);
- if (ret < 0)
- goto end_req;
- ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
- if (!ret)
- return true;
- kfree(iovec);
- } else {
- return true;
- }
-end_req:
- req_set_fail_links(req);
- return false;
+ ret = io_import_iovec(rw, req, &iovec, &iter, false);
+ if (ret < 0)
+ return false;
+ return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
}
-#endif
-static bool io_rw_reissue(struct io_kiocb *req, long res)
+static bool io_rw_should_reissue(struct io_kiocb *req)
{
-#ifdef CONFIG_BLOCK
umode_t mode = file_inode(req->file)->i_mode;
- int ret;
+ struct io_ring_ctx *ctx = req->ctx;
if (!S_ISBLK(mode) && !S_ISREG(mode))
return false;
- if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
+ if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
+ !(ctx->flags & IORING_SETUP_IOPOLL)))
+ return false;
+ /*
+ * If ref is dying, we might be running poll reap from the exit work.
+ * Don't attempt to reissue from that path, just let it fail with
+ * -EAGAIN.
+ */
+ if (percpu_ref_is_dying(&ctx->refs))
return false;
+ return true;
+}
+#endif
- lockdep_assert_held(&req->ctx->uring_lock);
+static bool io_rw_reissue(struct io_kiocb *req)
+{
+#ifdef CONFIG_BLOCK
+ if (!io_rw_should_reissue(req))
+ return false;
- ret = io_sq_thread_acquire_mm_files(req->ctx, req);
+ lockdep_assert_held(&req->ctx->uring_lock);
- if (io_resubmit_prep(req, ret)) {
+ if (io_resubmit_prep(req)) {
refcount_inc(&req->refs);
io_queue_async_work(req);
return true;
}
-
+ req_set_fail_links(req);
#endif
return false;
}
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
- struct io_comp_state *cs)
+ unsigned int issue_flags)
{
- if (!io_rw_reissue(req, res))
- io_complete_rw_common(&req->rw.kiocb, res, cs);
+ int cflags = 0;
+
+ if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
+ return;
+ if (res != req->result)
+ req_set_fail_links(req);
+
+ if (req->rw.kiocb.ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_rw_kbuf(req);
+ __io_req_complete(req, issue_flags, res, cflags);
}
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- __io_complete_rw(req, res, res2, NULL);
+ __io_complete_rw(req, res, res2, 0);
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
+#ifdef CONFIG_BLOCK
+ /* Rewind iter, if we have one. iopoll path resubmits as usual */
+ if (res == -EAGAIN && io_rw_should_reissue(req)) {
+ struct io_async_rw *rw = req->async_data;
+
+ if (rw)
+ iov_iter_revert(&rw->iter,
+ req->result - iov_iter_count(&rw->iter));
+ else if (!io_resubmit_prep(req))
+ res = -EIO;
+ }
+#endif
+
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
@@ -2825,16 +2617,12 @@ static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
wake_up(&ctx->sq_data->wait);
}
-static inline void __io_state_file_put(struct io_submit_state *state)
-{
- fput_many(state->file, state->file_refs);
- state->file_refs = 0;
-}
-
static inline void io_state_file_put(struct io_submit_state *state)
{
- if (state->file_refs)
- __io_state_file_put(state);
+ if (state->file_refs) {
+ fput_many(state->file, state->file_refs);
+ state->file_refs = 0;
+ }
}
/*
@@ -2852,7 +2640,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
state->file_refs--;
return state->file;
}
- __io_state_file_put(state);
+ io_state_file_put(state);
}
state->file = fget_many(fd, state->ios_left);
if (unlikely(!state->file))
@@ -2910,16 +2698,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw.kiocb;
+ struct file *file = req->file;
unsigned ioprio;
int ret;
- if (S_ISREG(file_inode(req->file)->i_mode))
+ if (S_ISREG(file_inode(file)->i_mode))
req->flags |= REQ_F_ISREG;
kiocb->ki_pos = READ_ONCE(sqe->off);
- if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
+ if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
req->flags |= REQ_F_CUR_POS;
- kiocb->ki_pos = req->file->f_pos;
+ kiocb->ki_pos = file->f_pos;
}
kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
@@ -2927,6 +2716,10 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(ret))
return ret;
+ /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
+ if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
+ req->flags |= REQ_F_NOWAIT;
+
ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) {
ret = ioprio_check_cap(ioprio);
@@ -2937,10 +2730,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} else
kiocb->ki_ioprio = get_current_ioprio();
- /* don't allow async punt if RWF_NOWAIT was requested */
- if (kiocb->ki_flags & IOCB_NOWAIT)
- req->flags |= REQ_F_NOWAIT;
-
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!(kiocb->ki_flags & IOCB_DIRECT) ||
!kiocb->ki_filp->f_op->iopoll)
@@ -2983,7 +2772,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
- struct io_comp_state *cs)
+ unsigned int issue_flags)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data;
@@ -2999,13 +2788,12 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
- __io_complete_rw(req, ret, 0, cs);
+ __io_complete_rw(req, ret, 0, issue_flags);
else
io_rw_done(kiocb, ret);
}
-static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
- struct iov_iter *iter)
+static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
{
struct io_ring_ctx *ctx = req->ctx;
size_t len = req->rw.len;
@@ -3069,7 +2857,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
}
}
- return len;
+ return 0;
}
static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
@@ -3103,7 +2891,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
lockdep_assert_held(&req->ctx->uring_lock);
- head = idr_find(&req->ctx->io_buffer_idr, bgid);
+ head = xa_load(&req->ctx->io_buffers, bgid);
if (head) {
if (!list_empty(&head->list)) {
kbuf = list_last_entry(&head->list, struct io_buffer,
@@ -3111,7 +2899,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
list_del(&kbuf->list);
} else {
kbuf = head;
- idr_remove(&req->ctx->io_buffer_idr, bgid);
+ xa_erase(&req->ctx->io_buffers, bgid);
}
if (*len > kbuf->len)
*len = kbuf->len;
@@ -3210,16 +2998,14 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
return __io_iov_buffer_select(req, iov, needs_lock);
}
-static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
- struct iovec **iovec, struct iov_iter *iter,
- bool needs_lock)
+static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
+ struct iov_iter *iter, bool needs_lock)
{
void __user *buf = u64_to_user_ptr(req->rw.addr);
size_t sqe_len = req->rw.len;
+ u8 opcode = req->opcode;
ssize_t ret;
- u8 opcode;
- opcode = req->opcode;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
return io_import_fixed(req, rw, iter);
@@ -3244,10 +3030,8 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
if (req->flags & REQ_F_BUFFER_SELECT) {
ret = io_iov_buffer_select(req, *iovec, needs_lock);
- if (!ret) {
- ret = (*iovec)->iov_len;
- iov_iter_init(iter, rw, *iovec, 1, ret);
- }
+ if (!ret)
+ iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
*iovec = NULL;
return ret;
}
@@ -3365,8 +3149,10 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
if (!force && !io_op_defs[req->opcode].needs_async_data)
return 0;
if (!req->async_data) {
- if (__io_alloc_async_data(req))
+ if (__io_alloc_async_data(req)) {
+ kfree(iovec);
return -ENOMEM;
+ }
io_req_map_rw(req, iovec, fast_iov, iter);
}
@@ -3377,7 +3163,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
{
struct io_async_rw *iorw = req->async_data;
struct iovec *iov = iorw->fast_iov;
- ssize_t ret;
+ int ret;
ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
if (unlikely(ret < 0))
@@ -3392,19 +3178,9 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- ssize_t ret;
-
- ret = io_prep_rw(req, sqe);
- if (ret)
- return ret;
-
if (unlikely(!(req->file->f_mode & FMODE_READ)))
return -EBADF;
-
- /* either don't need iovec imported or already have it */
- if (!req->async_data)
- return 0;
- return io_rw_prep_async(req, READ);
+ return io_prep_rw(req, sqe);
}
/*
@@ -3423,7 +3199,6 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
struct wait_page_queue *wpq;
struct io_kiocb *req = wait->private;
struct wait_page_key *key = arg;
- int ret;
wpq = container_of(wait, struct wait_page_queue, wait);
@@ -3433,21 +3208,9 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
list_del_init(&wait->entry);
- init_task_work(&req->task_work, io_req_task_submit);
- percpu_ref_get(&req->ctx->refs);
-
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
- ret = io_req_task_work_add(req);
- if (unlikely(ret)) {
- struct task_struct *tsk;
-
- /* queue just for cancelation */
- init_task_work(&req->task_work, io_req_task_cancel);
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, TWA_NONE);
- wake_up_process(tsk);
- }
+ io_req_task_queue(req);
return 1;
}
@@ -3504,15 +3267,14 @@ static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
return -EINVAL;
}
-static int io_read(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_read(struct io_kiocb *req, unsigned int issue_flags)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
ssize_t io_size, ret, ret2;
- bool no_async;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (rw) {
iter = &rw->iter;
@@ -3524,7 +3286,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
}
io_size = iov_iter_count(iter);
req->result = io_size;
- ret = 0;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
@@ -3532,115 +3293,94 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
else
kiocb->ki_flags |= IOCB_NOWAIT;
-
/* If the file doesn't support async, just async punt */
- no_async = force_nonblock && !io_file_supports_async(req->file, READ);
- if (no_async)
- goto copy_iov;
+ if (force_nonblock && !io_file_supports_async(req->file, READ)) {
+ ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
+ return ret ?: -EAGAIN;
+ }
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
- if (unlikely(ret))
- goto out_free;
+ if (unlikely(ret)) {
+ kfree(iovec);
+ return ret;
+ }
ret = io_iter_do_read(req, iter);
- if (!ret) {
- goto done;
- } else if (ret == -EIOCBQUEUED) {
- ret = 0;
+ if (ret == -EIOCBQUEUED) {
+ if (req->async_data)
+ iov_iter_revert(iter, io_size - iov_iter_count(iter));
goto out_free;
} else if (ret == -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
- /* no retry on NONBLOCK marked file */
- if (req->file->f_flags & O_NONBLOCK)
+ /* no retry on NONBLOCK nor RWF_NOWAIT */
+ if (req->flags & REQ_F_NOWAIT)
goto done;
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = 0;
- goto copy_iov;
- } else if (ret < 0) {
- /* make sure -ERESTARTSYS -> -EINTR is done */
+ } else if (ret <= 0 || ret == io_size || !force_nonblock ||
+ (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
+ /* read all, failed, already did sync or don't want to retry */
goto done;
}
- /* read it all, or we did blocking attempt. no retry. */
- if (!iov_iter_count(iter) || !force_nonblock ||
- (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
- goto done;
-
- io_size -= ret;
-copy_iov:
ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
- if (ret2) {
- ret = ret2;
- goto out_free;
- }
- if (no_async)
- return -EAGAIN;
- rw = req->async_data;
- /* it's copied and will be cleaned with ->io */
+ if (ret2)
+ return ret2;
+
iovec = NULL;
+ rw = req->async_data;
/* now use our persistent iterator, if we aren't already */
iter = &rw->iter;
-retry:
- rw->bytes_done += ret;
- /* if we can retry, do so with the callbacks armed */
- if (!io_rw_should_retry(req)) {
- kiocb->ki_flags &= ~IOCB_WAITQ;
- return -EAGAIN;
- }
- /*
- * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
- * get -EIOCBQUEUED, then we'll get a notification when the desired
- * page gets unlocked. We can also get a partial read here, and if we
- * do, then just retry at the new offset.
- */
- ret = io_iter_do_read(req, iter);
- if (ret == -EIOCBQUEUED) {
- ret = 0;
- goto out_free;
- } else if (ret > 0 && ret < io_size) {
+ do {
+ io_size -= ret;
+ rw->bytes_done += ret;
+ /* if we can retry, do so with the callbacks armed */
+ if (!io_rw_should_retry(req)) {
+ kiocb->ki_flags &= ~IOCB_WAITQ;
+ return -EAGAIN;
+ }
+
+ /*
+ * Now retry read with the IOCB_WAITQ parts set in the iocb. If
+ * we get -EIOCBQUEUED, then we'll get a notification when the
+ * desired page gets unlocked. We can also get a partial read
+ * here, and if we do, then just retry at the new offset.
+ */
+ ret = io_iter_do_read(req, iter);
+ if (ret == -EIOCBQUEUED)
+ return 0;
/* we got some bytes, but not all. retry. */
- goto retry;
- }
+ kiocb->ki_flags &= ~IOCB_WAITQ;
+ } while (ret > 0 && ret < io_size);
done:
- kiocb_done(kiocb, ret, cs);
- ret = 0;
+ kiocb_done(kiocb, ret, issue_flags);
out_free:
- /* it's reportedly faster than delegating the null check to kfree() */
+ /* it's faster to check here then delegate to kfree */
if (iovec)
kfree(iovec);
- return ret;
+ return 0;
}
static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- ssize_t ret;
-
- ret = io_prep_rw(req, sqe);
- if (ret)
- return ret;
-
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
-
- /* either don't need iovec imported or already have it */
- if (!req->async_data)
- return 0;
- return io_rw_prep_async(req, WRITE);
+ return io_prep_rw(req, sqe);
}
-static int io_write(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_write(struct io_kiocb *req, unsigned int issue_flags)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter __iter, *iter = &__iter;
struct io_async_rw *rw = req->async_data;
ssize_t ret, ret2, io_size;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (rw) {
iter = &rw->iter;
@@ -3699,22 +3439,23 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
*/
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
- /* no retry on NONBLOCK marked file */
- if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
+ /* no retry on NONBLOCK nor RWF_NOWAIT */
+ if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
goto done;
+ if (ret2 == -EIOCBQUEUED && req->async_data)
+ iov_iter_revert(iter, io_size - iov_iter_count(iter));
if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
goto copy_iov;
done:
- kiocb_done(kiocb, ret2, cs);
+ kiocb_done(kiocb, ret2, issue_flags);
} else {
copy_iov:
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
- if (!ret)
- return -EAGAIN;
+ return ret ?: -EAGAIN;
}
out_free:
/* it's reportedly faster than delegating the null check to kfree() */
@@ -3752,12 +3493,12 @@ static int io_renameat_prep(struct io_kiocb *req,
return 0;
}
-static int io_renameat(struct io_kiocb *req, bool force_nonblock)
+static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rename *ren = &req->rename;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
@@ -3794,12 +3535,12 @@ static int io_unlinkat_prep(struct io_kiocb *req,
return 0;
}
-static int io_unlinkat(struct io_kiocb *req, bool force_nonblock)
+static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_unlink *un = &req->unlink;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
if (un->flags & AT_REMOVEDIR)
@@ -3831,13 +3572,13 @@ static int io_shutdown_prep(struct io_kiocb *req,
#endif
}
-static int io_shutdown(struct io_kiocb *req, bool force_nonblock)
+static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_NET)
struct socket *sock;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
sock = sock_from_file(req->file);
@@ -3881,7 +3622,6 @@ static int __io_splice_prep(struct io_kiocb *req,
* Splice operation will be punted aync, and here need to
* modify io_wq_work.flags, so initialize io_wq_work firstly.
*/
- io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
@@ -3896,7 +3636,7 @@ static int io_tee_prep(struct io_kiocb *req,
return __io_splice_prep(req, sqe);
}
-static int io_tee(struct io_kiocb *req, bool force_nonblock)
+static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_splice *sp = &req->splice;
struct file *in = sp->file_in;
@@ -3904,7 +3644,7 @@ static int io_tee(struct io_kiocb *req, bool force_nonblock)
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
long ret = 0;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
if (sp->len)
ret = do_tee(in, out, sp->len, flags);
@@ -3927,7 +3667,7 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return __io_splice_prep(req, sqe);
}
-static int io_splice(struct io_kiocb *req, bool force_nonblock)
+static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_splice *sp = &req->splice;
struct file *in = sp->file_in;
@@ -3936,7 +3676,7 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
loff_t *poff_in, *poff_out;
long ret = 0;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
@@ -3957,18 +3697,18 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
/*
* IORING_OP_NOP just posts a completion event, nothing else.
*/
-static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
+static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- __io_req_complete(req, 0, 0, cs);
+ __io_req_complete(req, issue_flags, 0, 0);
return 0;
}
-static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -3989,13 +3729,13 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_fsync(struct io_kiocb *req, bool force_nonblock)
+static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
{
loff_t end = req->sync.off + req->sync.len;
int ret;
/* fsync always requires a blocking context */
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = vfs_fsync_range(req->file, req->sync.off,
@@ -4021,12 +3761,12 @@ static int io_fallocate_prep(struct io_kiocb *req,
return 0;
}
-static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
+static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
{
int ret;
/* fallocate always requiring blocking context */
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
req->sync.len);
@@ -4059,7 +3799,6 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return ret;
}
req->open.nofile = rlimit(RLIMIT_NOFILE);
- req->open.ignore_nonblock = false;
req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
@@ -4097,43 +3836,53 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return __io_openat_prep(req, sqe);
}
-static int io_openat2(struct io_kiocb *req, bool force_nonblock)
+static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
{
struct open_flags op;
struct file *file;
+ bool nonblock_set;
+ bool resolve_nonblock;
int ret;
- if (force_nonblock && !req->open.ignore_nonblock)
- return -EAGAIN;
-
ret = build_open_flags(&req->open.how, &op);
if (ret)
goto err;
+ nonblock_set = op.open_flag & O_NONBLOCK;
+ resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ /*
+ * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
+ * it'll always -EAGAIN
+ */
+ if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
+ return -EAGAIN;
+ op.lookup_flags |= LOOKUP_CACHED;
+ op.open_flag |= O_NONBLOCK;
+ }
ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
if (ret < 0)
goto err;
file = do_filp_open(req->open.dfd, req->open.filename, &op);
+ /* only retry if RESOLVE_CACHED wasn't already set by application */
+ if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
+ file == ERR_PTR(-EAGAIN)) {
+ /*
+ * We could hang on to this 'fd', but seems like marginal
+ * gain for something that is now known to be a slower path.
+ * So just put it, and we'll get a new one when we retry.
+ */
+ put_unused_fd(ret);
+ return -EAGAIN;
+ }
+
if (IS_ERR(file)) {
put_unused_fd(ret);
ret = PTR_ERR(file);
- /*
- * A work-around to ensure that /proc/self works that way
- * that it should - if we get -EOPNOTSUPP back, then assume
- * that proc_self_get_link() failed us because we're in async
- * context. We should be safe to retry this from the task
- * itself with force_nonblock == false set, as it should not
- * block on lookup. Would be nice to know this upfront and
- * avoid the async dance, but doesn't seem feasible.
- */
- if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
- req->open.ignore_nonblock = true;
- refcount_inc(&req->refs);
- io_req_task_queue(req);
- return 0;
- }
} else {
+ if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
+ file->f_flags &= ~O_NONBLOCK;
fsnotify_open(file);
fd_install(ret, file);
}
@@ -4146,9 +3895,9 @@ err:
return 0;
}
-static int io_openat(struct io_kiocb *req, bool force_nonblock)
+static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
{
- return io_openat2(req, force_nonblock);
+ return io_openat2(req, issue_flags);
}
static int io_remove_buffers_prep(struct io_kiocb *req,
@@ -4191,25 +3940,25 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
}
i++;
kfree(buf);
- idr_remove(&ctx->io_buffer_idr, bgid);
+ xa_erase(&ctx->io_buffers, bgid);
return i;
}
-static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer *head;
int ret = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
io_ring_submit_lock(ctx, !force_nonblock);
lockdep_assert_held(&ctx->uring_lock);
ret = -ENOENT;
- head = idr_find(&ctx->io_buffer_idr, p->bgid);
+ head = xa_load(&ctx->io_buffers, p->bgid);
if (head)
ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
if (ret < 0)
@@ -4217,11 +3966,11 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
/* need to hold the lock to complete IOPOLL requests */
if (ctx->flags & IORING_SETUP_IOPOLL) {
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, !force_nonblock);
} else {
io_ring_submit_unlock(ctx, !force_nonblock);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
}
return 0;
}
@@ -4280,43 +4029,36 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
return i ? i : -ENOMEM;
}
-static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer *head, *list;
int ret = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
io_ring_submit_lock(ctx, !force_nonblock);
lockdep_assert_held(&ctx->uring_lock);
- list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
+ list = head = xa_load(&ctx->io_buffers, p->bgid);
ret = io_add_buffers(p, &head);
- if (ret < 0)
- goto out;
-
- if (!list) {
- ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
- GFP_KERNEL);
- if (ret < 0) {
+ if (ret >= 0 && !list) {
+ ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
+ if (ret < 0)
__io_remove_buffers(ctx, head, p->bgid, -1U);
- goto out;
- }
}
-out:
if (ret < 0)
req_set_fail_links(req);
/* need to hold the lock to complete IOPOLL requests */
if (ctx->flags & IORING_SETUP_IOPOLL) {
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, !force_nonblock);
} else {
io_ring_submit_unlock(ctx, !force_nonblock);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
}
return 0;
}
@@ -4348,12 +4090,12 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#endif
}
-static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_EPOLL)
struct io_epoll *ie = &req->epoll;
int ret;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
if (force_nonblock && ret == -EAGAIN)
@@ -4361,7 +4103,7 @@ static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
#else
return -EOPNOTSUPP;
@@ -4385,13 +4127,13 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#endif
}
-static int io_madvise(struct io_kiocb *req, bool force_nonblock)
+static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
struct io_madvise *ma = &req->madvise;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
@@ -4417,12 +4159,12 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
+static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_fadvise *fa = &req->fadvise;
int ret;
- if (force_nonblock) {
+ if (issue_flags & IO_URING_F_NONBLOCK) {
switch (fa->advice) {
case POSIX_FADV_NORMAL:
case POSIX_FADV_RANDOM:
@@ -4458,12 +4200,12 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_statx(struct io_kiocb *req, bool force_nonblock)
+static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_statx *ctx = &req->statx;
int ret;
- if (force_nonblock) {
+ if (issue_flags & IO_URING_F_NONBLOCK) {
/* only need file table for an actual valid fd */
if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
req->flags |= REQ_F_NO_FILE_TABLE;
@@ -4481,13 +4223,6 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- /*
- * If we queue this for async, it must not be cancellable. That would
- * leave the 'file' in an undeterminate state, and here need to modify
- * io_wq_work.flags, so initialize io_wq_work firstly.
- */
- io_req_init_async(req);
-
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
@@ -4497,54 +4232,66 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EBADF;
req->close.fd = READ_ONCE(sqe->fd);
- if ((req->file && req->file->f_op == &io_uring_fops))
- return -EBADF;
-
- req->close.put_file = NULL;
return 0;
}
-static int io_close(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_close(struct io_kiocb *req, unsigned int issue_flags)
{
+ struct files_struct *files = current->files;
struct io_close *close = &req->close;
+ struct fdtable *fdt;
+ struct file *file;
int ret;
- /* might be already done during nonblock submission */
- if (!close->put_file) {
- ret = close_fd_get_file(close->fd, &close->put_file);
- if (ret < 0)
- return (ret == -ENOENT) ? -EBADF : ret;
+ file = NULL;
+ ret = -EBADF;
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ if (close->fd >= fdt->max_fds) {
+ spin_unlock(&files->file_lock);
+ goto err;
+ }
+ file = fdt->fd[close->fd];
+ if (!file) {
+ spin_unlock(&files->file_lock);
+ goto err;
+ }
+
+ if (file->f_op == &io_uring_fops) {
+ spin_unlock(&files->file_lock);
+ file = NULL;
+ goto err;
}
/* if the file has a flush method, be safe and punt to async */
- if (close->put_file->f_op->flush && force_nonblock) {
- /* not safe to cancel at this point */
- req->work.flags |= IO_WQ_WORK_NO_CANCEL;
- /* was never set, but play safe */
- req->flags &= ~REQ_F_NOWAIT;
- /* avoid grabbing files - we don't need the files */
- req->flags |= REQ_F_NO_FILE_TABLE;
+ if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
+ spin_unlock(&files->file_lock);
return -EAGAIN;
}
+ ret = __close_fd_get_file(close->fd, &file);
+ spin_unlock(&files->file_lock);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ ret = -EBADF;
+ goto err;
+ }
+
/* No ->flush() or already async, safely close from here */
- ret = filp_close(close->put_file, req->work.identity->files);
+ ret = filp_close(file, current->files);
+err:
if (ret < 0)
req_set_fail_links(req);
- fput(close->put_file);
- close->put_file = NULL;
- __io_req_complete(req, ret, 0, cs);
+ if (file)
+ fput(file);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
-static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
- if (!req->file)
- return -EBADF;
-
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
@@ -4556,12 +4303,12 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
+static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
{
int ret;
/* sync_file_range always requires a blocking context */
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ret = sync_file_range(req->file, req->sync.off, req->sync.len,
@@ -4581,30 +4328,44 @@ static int io_setup_async_msg(struct io_kiocb *req,
if (async_msg)
return -EAGAIN;
if (io_alloc_async_data(req)) {
- if (kmsg->iov != kmsg->fast_iov)
- kfree(kmsg->iov);
+ kfree(kmsg->free_iov);
return -ENOMEM;
}
async_msg = req->async_data;
req->flags |= REQ_F_NEED_CLEANUP;
memcpy(async_msg, kmsg, sizeof(*kmsg));
+ async_msg->msg.msg_name = &async_msg->addr;
+ /* if were using fast_iov, set it to the new one */
+ if (!async_msg->free_iov)
+ async_msg->msg.msg_iter.iov = async_msg->fast_iov;
+
return -EAGAIN;
}
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
- iomsg->iov = iomsg->fast_iov;
iomsg->msg.msg_name = &iomsg->addr;
+ iomsg->free_iov = iomsg->fast_iov;
return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
- req->sr_msg.msg_flags, &iomsg->iov);
+ req->sr_msg.msg_flags, &iomsg->free_iov);
+}
+
+static int io_sendmsg_prep_async(struct io_kiocb *req)
+{
+ int ret;
+
+ if (!io_op_defs[req->opcode].needs_async_data)
+ return 0;
+ ret = io_sendmsg_copy_hdr(req, req->async_data);
+ if (!ret)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return ret;
}
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_async_msghdr *async_msg = req->async_data;
struct io_sr_msg *sr = &req->sr_msg;
- int ret;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -4617,70 +4378,62 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
-
- if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
- return 0;
- ret = io_sendmsg_copy_hdr(req, async_msg);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
+ return 0;
}
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned flags;
+ int min_ret = 0;
int ret;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
- if (req->async_data) {
- kmsg = req->async_data;
- kmsg->msg.msg_name = &kmsg->addr;
- /* if iov is set, it's allocated already */
- if (!kmsg->iov)
- kmsg->iov = kmsg->fast_iov;
- kmsg->msg.msg_iter.iov = kmsg->iov;
- } else {
+ kmsg = req->async_data;
+ if (!kmsg) {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
kmsg = &iomsg;
}
- flags = req->sr_msg.msg_flags;
+ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
+ else if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
+ if (flags & MSG_WAITALL)
+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
- if (force_nonblock && ret == -EAGAIN)
+ if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
return io_setup_async_msg(req, kmsg);
if (ret == -ERESTARTSYS)
ret = -EINTR;
- if (kmsg->iov != kmsg->fast_iov)
- kfree(kmsg->iov);
+ /* fast path, check for non-NULL to avoid function call */
+ if (kmsg->free_iov)
+ kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
+ if (ret < min_ret)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
-static int io_send(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = &req->sr_msg;
struct msghdr msg;
struct iovec iov;
struct socket *sock;
unsigned flags;
+ int min_ret = 0;
int ret;
sock = sock_from_file(req->file);
@@ -4696,22 +4449,25 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
msg.msg_controllen = 0;
msg.msg_namelen = 0;
- flags = req->sr_msg.msg_flags;
+ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
+ else if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
+ if (flags & MSG_WAITALL)
+ min_ret = iov_iter_count(&msg.msg_iter);
+
msg.msg_flags = flags;
ret = sock_sendmsg(sock, &msg);
- if (force_nonblock && ret == -EAGAIN)
+ if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
return -EAGAIN;
if (ret == -ERESTARTSYS)
ret = -EINTR;
- if (ret < 0)
+ if (ret < min_ret)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
@@ -4731,15 +4487,14 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
if (req->flags & REQ_F_BUFFER_SELECT) {
if (iov_len > 1)
return -EINVAL;
- if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
+ if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
return -EFAULT;
- sr->len = iomsg->iov[0].iov_len;
- iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
- sr->len);
- iomsg->iov = NULL;
+ sr->len = iomsg->fast_iov[0].iov_len;
+ iomsg->free_iov = NULL;
} else {
+ iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
- &iomsg->iov, &iomsg->msg.msg_iter,
+ &iomsg->free_iov, &iomsg->msg.msg_iter,
false);
if (ret > 0)
ret = 0;
@@ -4778,11 +4533,11 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
if (clen < 0)
return -EINVAL;
sr->len = clen;
- iomsg->iov[0].iov_len = clen;
- iomsg->iov = NULL;
+ iomsg->free_iov = NULL;
} else {
+ iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
- UIO_FASTIOV, &iomsg->iov,
+ UIO_FASTIOV, &iomsg->free_iov,
&iomsg->msg.msg_iter, true);
if (ret < 0)
return ret;
@@ -4796,7 +4551,6 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
iomsg->msg.msg_name = &iomsg->addr;
- iomsg->iov = iomsg->fast_iov;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
@@ -4826,13 +4580,22 @@ static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
return io_put_kbuf(req, req->sr_msg.kbuf);
}
-static int io_recvmsg_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_recvmsg_prep_async(struct io_kiocb *req)
{
- struct io_async_msghdr *async_msg = req->async_data;
- struct io_sr_msg *sr = &req->sr_msg;
int ret;
+ if (!io_op_defs[req->opcode].needs_async_data)
+ return 0;
+ ret = io_recvmsg_copy_hdr(req, req->async_data);
+ if (!ret)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return ret;
+}
+
+static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_sr_msg *sr = &req->sr_msg;
+
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -4845,36 +4608,25 @@ static int io_recvmsg_prep(struct io_kiocb *req,
if (req->ctx->compat)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
-
- if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
- return 0;
- ret = io_recvmsg_copy_hdr(req, async_msg);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
+ return 0;
}
-static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
struct io_buffer *kbuf;
unsigned flags;
+ int min_ret = 0;
int ret, cflags = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
- if (req->async_data) {
- kmsg = req->async_data;
- kmsg->msg.msg_name = &kmsg->addr;
- /* if iov is set, it's allocated already */
- if (!kmsg->iov)
- kmsg->iov = kmsg->fast_iov;
- kmsg->msg.msg_iter.iov = kmsg->iov;
- } else {
+ kmsg = req->async_data;
+ if (!kmsg) {
ret = io_recvmsg_copy_hdr(req, &iomsg);
if (ret)
return ret;
@@ -4886,16 +4638,20 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
- iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
+ kmsg->fast_iov[0].iov_len = req->sr_msg.len;
+ iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
1, req->sr_msg.len);
}
- flags = req->sr_msg.msg_flags;
+ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
flags |= MSG_DONTWAIT;
+ if (flags & MSG_WAITALL)
+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+
ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
kmsg->uaddr, flags);
if (force_nonblock && ret == -EAGAIN)
@@ -4905,17 +4661,17 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_recv_kbuf(req);
- if (kmsg->iov != kmsg->fast_iov)
- kfree(kmsg->iov);
+ /* fast path, check for non-NULL to avoid function call */
+ if (kmsg->free_iov)
+ kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
+ if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
req_set_fail_links(req);
- __io_req_complete(req, ret, cflags, cs);
+ __io_req_complete(req, issue_flags, ret, cflags);
return 0;
}
-static int io_recv(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_buffer *kbuf;
struct io_sr_msg *sr = &req->sr_msg;
@@ -4924,7 +4680,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
struct socket *sock;
struct iovec iov;
unsigned flags;
+ int min_ret = 0;
int ret, cflags = 0;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
if (unlikely(!sock))
@@ -4948,12 +4706,15 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
msg.msg_iocb = NULL;
msg.msg_flags = 0;
- flags = req->sr_msg.msg_flags;
+ flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
flags |= MSG_DONTWAIT;
+ if (flags & MSG_WAITALL)
+ min_ret = iov_iter_count(&msg.msg_iter);
+
ret = sock_recvmsg(sock, &msg, flags);
if (force_nonblock && ret == -EAGAIN)
return -EAGAIN;
@@ -4962,9 +4723,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
out_free:
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_recv_kbuf(req);
- if (ret < 0)
+ if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
req_set_fail_links(req);
- __io_req_complete(req, ret, cflags, cs);
+ __io_req_complete(req, issue_flags, ret, cflags);
return 0;
}
@@ -4984,10 +4745,10 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int io_accept(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_accept *accept = &req->accept;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
int ret;
@@ -5004,14 +4765,21 @@ static int io_accept(struct io_kiocb *req, bool force_nonblock,
ret = -EINTR;
req_set_fail_links(req);
}
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
+static int io_connect_prep_async(struct io_kiocb *req)
+{
+ struct io_async_connect *io = req->async_data;
+ struct io_connect *conn = &req->connect;
+
+ return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
+}
+
static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_connect *conn = &req->connect;
- struct io_async_connect *io = req->async_data;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -5020,20 +4788,15 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
conn->addr_len = READ_ONCE(sqe->addr2);
-
- if (!io)
- return 0;
-
- return move_addr_to_kernel(conn->addr, conn->addr_len,
- &io->address);
+ return 0;
}
-static int io_connect(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_connect __io, *io;
unsigned file_flags;
int ret;
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (req->async_data) {
io = req->async_data;
@@ -5066,66 +4829,36 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock,
out:
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
#else /* !CONFIG_NET */
-static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_send(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_recvmsg_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_recv(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_accept(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- return -EOPNOTSUPP;
-}
-
-static int io_connect(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
-{
- return -EOPNOTSUPP;
-}
+#define IO_NETOP_FN(op) \
+static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
+{ \
+ return -EOPNOTSUPP; \
+}
+
+#define IO_NETOP_PREP(op) \
+IO_NETOP_FN(op) \
+static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
+{ \
+ return -EOPNOTSUPP; \
+} \
+
+#define IO_NETOP_PREP_ASYNC(op) \
+IO_NETOP_PREP(op) \
+static int io_##op##_prep_async(struct io_kiocb *req) \
+{ \
+ return -EOPNOTSUPP; \
+}
+
+IO_NETOP_PREP_ASYNC(sendmsg);
+IO_NETOP_PREP_ASYNC(recvmsg);
+IO_NETOP_PREP_ASYNC(connect);
+IO_NETOP_PREP(accept);
+IO_NETOP_FN(send);
+IO_NETOP_FN(recv);
#endif /* CONFIG_NET */
struct io_poll_table {
@@ -5148,7 +4881,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
list_del_init(&poll->wait.entry);
req->result = mask;
- init_task_work(&req->task_work, func);
+ req->task_work.func = func;
percpu_ref_get(&req->ctx->refs);
/*
@@ -5159,12 +4892,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
*/
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
- struct task_struct *tsk;
-
WRITE_ONCE(poll->canceled, true);
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, TWA_NONE);
- wake_up_process(tsk);
+ io_req_task_work_add_fallback(req, func);
}
return 1;
}
@@ -5316,6 +5045,9 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
pt->error = -EINVAL;
return;
}
+ /* double add on the same waitqueue head, ignore */
+ if (poll->head == head)
+ return;
poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
if (!poll) {
pt->error = -ENOMEM;
@@ -5607,7 +5339,7 @@ static int io_poll_remove_prep(struct io_kiocb *req,
* Find a running poll command that matches one specified in sqe->addr,
* and remove it if found.
*/
-static int io_poll_remove(struct io_kiocb *req)
+static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -5658,7 +5390,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return 0;
}
-static int io_poll_add(struct io_kiocb *req)
+static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx;
@@ -5789,24 +5521,27 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
return 0;
}
+static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
+{
+ return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
+ : HRTIMER_MODE_REL;
+}
+
/*
* Remove or update an existing timeout command
*/
-static int io_timeout_remove(struct io_kiocb *req)
+static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_timeout_rem *tr = &req->timeout_rem;
struct io_ring_ctx *ctx = req->ctx;
int ret;
spin_lock_irq(&ctx->completion_lock);
- if (req->timeout_rem.flags & IORING_TIMEOUT_UPDATE) {
- enum hrtimer_mode mode = (tr->flags & IORING_TIMEOUT_ABS)
- ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
-
- ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
- } else {
+ if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
ret = io_timeout_cancel(ctx, tr->addr);
- }
+ else
+ ret = io_timeout_update(ctx, tr->addr, &tr->ts,
+ io_translate_timeout_mode(tr->flags));
io_cqring_fill_event(req, ret);
io_commit_cqring(ctx);
@@ -5846,16 +5581,13 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
- if (flags & IORING_TIMEOUT_ABS)
- data->mode = HRTIMER_MODE_ABS;
- else
- data->mode = HRTIMER_MODE_REL;
-
+ data->mode = io_translate_timeout_mode(flags);
hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
+ io_req_track_inflight(req);
return 0;
}
-static int io_timeout(struct io_kiocb *req)
+static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data = req->async_data;
@@ -5905,19 +5637,30 @@ add:
return 0;
}
+struct io_cancel_data {
+ struct io_ring_ctx *ctx;
+ u64 user_data;
+};
+
static bool io_cancel_cb(struct io_wq_work *work, void *data)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_cancel_data *cd = data;
- return req->user_data == (unsigned long) data;
+ return req->ctx == cd->ctx && req->user_data == cd->user_data;
}
-static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
+static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
+ struct io_ring_ctx *ctx)
{
+ struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
enum io_wq_cancel cancel_ret;
int ret = 0;
- cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
+ if (!tctx || !tctx->io_wq)
+ return -ENOENT;
+
+ cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
switch (cancel_ret) {
case IO_WQ_CANCEL_OK:
ret = 0;
@@ -5940,7 +5683,7 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
unsigned long flags;
int ret;
- ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
+ ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
if (ret != -ENOENT) {
spin_lock_irqsave(&ctx->completion_lock, flags);
goto done;
@@ -5978,15 +5721,54 @@ static int io_async_cancel_prep(struct io_kiocb *req,
return 0;
}
-static int io_async_cancel(struct io_kiocb *req)
+static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
+ u64 sqe_addr = req->cancel.addr;
+ struct io_tctx_node *node;
+ int ret;
+
+ /* tasks should wait for their io-wq threads, so safe w/o sync */
+ ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
+ spin_lock_irq(&ctx->completion_lock);
+ if (ret != -ENOENT)
+ goto done;
+ ret = io_timeout_cancel(ctx, sqe_addr);
+ if (ret != -ENOENT)
+ goto done;
+ ret = io_poll_cancel(ctx, sqe_addr);
+ if (ret != -ENOENT)
+ goto done;
+ spin_unlock_irq(&ctx->completion_lock);
+
+ /* slow path, try all io-wq's */
+ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+ ret = -ENOENT;
+ list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+ struct io_uring_task *tctx = node->task->io_uring;
+
+ if (!tctx || !tctx->io_wq)
+ continue;
+ ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
+ if (ret != -ENOENT)
+ break;
+ }
+ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
- io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
+ spin_lock_irq(&ctx->completion_lock);
+done:
+ io_cqring_fill_event(req, ret);
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_put_req(req);
return 0;
}
-static int io_files_update_prep(struct io_kiocb *req,
+static int io_rsrc_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
@@ -5996,34 +5778,33 @@ static int io_files_update_prep(struct io_kiocb *req,
if (sqe->ioprio || sqe->rw_flags)
return -EINVAL;
- req->files_update.offset = READ_ONCE(sqe->off);
- req->files_update.nr_args = READ_ONCE(sqe->len);
- if (!req->files_update.nr_args)
+ req->rsrc_update.offset = READ_ONCE(sqe->off);
+ req->rsrc_update.nr_args = READ_ONCE(sqe->len);
+ if (!req->rsrc_update.nr_args)
return -EINVAL;
- req->files_update.arg = READ_ONCE(sqe->addr);
+ req->rsrc_update.arg = READ_ONCE(sqe->addr);
return 0;
}
-static int io_files_update(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_uring_files_update up;
+ struct io_uring_rsrc_update up;
int ret;
- if (force_nonblock)
+ if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
- up.offset = req->files_update.offset;
- up.fds = req->files_update.arg;
+ up.offset = req->rsrc_update.offset;
+ up.data = req->rsrc_update.arg;
mutex_lock(&ctx->uring_lock);
- ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
+ ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
mutex_unlock(&ctx->uring_lock);
if (ret < 0)
req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
+ __io_req_complete(req, issue_flags, ret, 0);
return 0;
}
@@ -6045,9 +5826,9 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
case IORING_OP_POLL_REMOVE:
return io_poll_remove_prep(req, sqe);
case IORING_OP_FSYNC:
- return io_prep_fsync(req, sqe);
+ return io_fsync_prep(req, sqe);
case IORING_OP_SYNC_FILE_RANGE:
- return io_prep_sfr(req, sqe);
+ return io_sfr_prep(req, sqe);
case IORING_OP_SENDMSG:
case IORING_OP_SEND:
return io_sendmsg_prep(req, sqe);
@@ -6073,7 +5854,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
case IORING_OP_CLOSE:
return io_close_prep(req, sqe);
case IORING_OP_FILES_UPDATE:
- return io_files_update_prep(req, sqe);
+ return io_rsrc_update_prep(req, sqe);
case IORING_OP_STATX:
return io_statx_prep(req, sqe);
case IORING_OP_FADVISE:
@@ -6105,14 +5886,39 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return-EINVAL;
}
-static int io_req_defer_prep(struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_req_prep_async(struct io_kiocb *req)
{
- if (!sqe)
+ switch (req->opcode) {
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
+ return io_rw_prep_async(req, READ);
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
+ return io_rw_prep_async(req, WRITE);
+ case IORING_OP_SENDMSG:
+ case IORING_OP_SEND:
+ return io_sendmsg_prep_async(req);
+ case IORING_OP_RECVMSG:
+ case IORING_OP_RECV:
+ return io_recvmsg_prep_async(req);
+ case IORING_OP_CONNECT:
+ return io_connect_prep_async(req);
+ }
+ return 0;
+}
+
+static int io_req_defer_prep(struct io_kiocb *req)
+{
+ if (!io_op_defs[req->opcode].needs_async_data)
+ return 0;
+ /* some opcodes init it during the inital prep */
+ if (req->async_data)
return 0;
- if (io_alloc_async_data(req))
+ if (__io_alloc_async_data(req))
return -EAGAIN;
- return io_req_prep(req, sqe);
+ return io_req_prep_async(req);
}
static u32 io_get_sequence(struct io_kiocb *req)
@@ -6128,7 +5934,7 @@ static u32 io_get_sequence(struct io_kiocb *req)
return total_submitted - nr_reqs;
}
-static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_req_defer(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_defer_entry *de;
@@ -6145,11 +5951,9 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
return 0;
- if (!req->async_data) {
- ret = io_req_defer_prep(req, sqe);
- if (ret)
- return ret;
- }
+ ret = io_req_defer_prep(req);
+ if (ret)
+ return ret;
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de)
@@ -6171,25 +5975,6 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EIOCBQUEUED;
}
-static void io_req_drop_files(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_uring_task *tctx = req->task->io_uring;
- unsigned long flags;
-
- if (req->work.flags & IO_WQ_WORK_FILES) {
- put_files_struct(req->work.identity->files);
- put_nsproxy(req->work.identity->nsproxy);
- }
- spin_lock_irqsave(&ctx->inflight_lock, flags);
- list_del(&req->inflight_entry);
- spin_unlock_irqrestore(&ctx->inflight_lock, flags);
- req->flags &= ~REQ_F_INFLIGHT;
- req->work.flags &= ~IO_WQ_WORK_FILES;
- if (atomic_read(&tctx->in_idle))
- wake_up(&tctx->wait);
-}
-
static void __io_clean_op(struct io_kiocb *req)
{
if (req->flags & REQ_F_BUFFER_SELECTED) {
@@ -6223,8 +6008,8 @@ static void __io_clean_op(struct io_kiocb *req)
case IORING_OP_RECVMSG:
case IORING_OP_SENDMSG: {
struct io_async_msghdr *io = req->async_data;
- if (io->iov != io->fast_iov)
- kfree(io->iov);
+
+ kfree(io->free_iov);
break;
}
case IORING_OP_SPLICE:
@@ -6249,118 +6034,124 @@ static void __io_clean_op(struct io_kiocb *req)
}
}
-static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
+static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
+ const struct cred *creds = NULL;
int ret;
+ if (req->work.creds && req->work.creds != current_cred())
+ creds = override_creds(req->work.creds);
+
switch (req->opcode) {
case IORING_OP_NOP:
- ret = io_nop(req, cs);
+ ret = io_nop(req, issue_flags);
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
- ret = io_read(req, force_nonblock, cs);
+ ret = io_read(req, issue_flags);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
- ret = io_write(req, force_nonblock, cs);
+ ret = io_write(req, issue_flags);
break;
case IORING_OP_FSYNC:
- ret = io_fsync(req, force_nonblock);
+ ret = io_fsync(req, issue_flags);
break;
case IORING_OP_POLL_ADD:
- ret = io_poll_add(req);
+ ret = io_poll_add(req, issue_flags);
break;
case IORING_OP_POLL_REMOVE:
- ret = io_poll_remove(req);
+ ret = io_poll_remove(req, issue_flags);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, force_nonblock);
+ ret = io_sync_file_range(req, issue_flags);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, force_nonblock, cs);
+ ret = io_sendmsg(req, issue_flags);
break;
case IORING_OP_SEND:
- ret = io_send(req, force_nonblock, cs);
+ ret = io_send(req, issue_flags);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, force_nonblock, cs);
+ ret = io_recvmsg(req, issue_flags);
break;
case IORING_OP_RECV:
- ret = io_recv(req, force_nonblock, cs);
+ ret = io_recv(req, issue_flags);
break;
case IORING_OP_TIMEOUT:
- ret = io_timeout(req);
+ ret = io_timeout(req, issue_flags);
break;
case IORING_OP_TIMEOUT_REMOVE:
- ret = io_timeout_remove(req);
+ ret = io_timeout_remove(req, issue_flags);
break;
case IORING_OP_ACCEPT:
- ret = io_accept(req, force_nonblock, cs);
+ ret = io_accept(req, issue_flags);
break;
case IORING_OP_CONNECT:
- ret = io_connect(req, force_nonblock, cs);
+ ret = io_connect(req, issue_flags);
break;
case IORING_OP_ASYNC_CANCEL:
- ret = io_async_cancel(req);
+ ret = io_async_cancel(req, issue_flags);
break;
case IORING_OP_FALLOCATE:
- ret = io_fallocate(req, force_nonblock);
+ ret = io_fallocate(req, issue_flags);
break;
case IORING_OP_OPENAT:
- ret = io_openat(req, force_nonblock);
+ ret = io_openat(req, issue_flags);
break;
case IORING_OP_CLOSE:
- ret = io_close(req, force_nonblock, cs);
+ ret = io_close(req, issue_flags);
break;
case IORING_OP_FILES_UPDATE:
- ret = io_files_update(req, force_nonblock, cs);
+ ret = io_files_update(req, issue_flags);
break;
case IORING_OP_STATX:
- ret = io_statx(req, force_nonblock);
+ ret = io_statx(req, issue_flags);
break;
case IORING_OP_FADVISE:
- ret = io_fadvise(req, force_nonblock);
+ ret = io_fadvise(req, issue_flags);
break;
case IORING_OP_MADVISE:
- ret = io_madvise(req, force_nonblock);
+ ret = io_madvise(req, issue_flags);
break;
case IORING_OP_OPENAT2:
- ret = io_openat2(req, force_nonblock);
+ ret = io_openat2(req, issue_flags);
break;
case IORING_OP_EPOLL_CTL:
- ret = io_epoll_ctl(req, force_nonblock, cs);
+ ret = io_epoll_ctl(req, issue_flags);
break;
case IORING_OP_SPLICE:
- ret = io_splice(req, force_nonblock);
+ ret = io_splice(req, issue_flags);
break;
case IORING_OP_PROVIDE_BUFFERS:
- ret = io_provide_buffers(req, force_nonblock, cs);
+ ret = io_provide_buffers(req, issue_flags);
break;
case IORING_OP_REMOVE_BUFFERS:
- ret = io_remove_buffers(req, force_nonblock, cs);
+ ret = io_remove_buffers(req, issue_flags);
break;
case IORING_OP_TEE:
- ret = io_tee(req, force_nonblock);
+ ret = io_tee(req, issue_flags);
break;
case IORING_OP_SHUTDOWN:
- ret = io_shutdown(req, force_nonblock);
+ ret = io_shutdown(req, issue_flags);
break;
case IORING_OP_RENAMEAT:
- ret = io_renameat(req, force_nonblock);
+ ret = io_renameat(req, issue_flags);
break;
case IORING_OP_UNLINKAT:
- ret = io_unlinkat(req, force_nonblock);
+ ret = io_unlinkat(req, issue_flags);
break;
default:
ret = -EINVAL;
break;
}
+ if (creds)
+ revert_creds(creds);
+
if (ret)
return ret;
@@ -6381,7 +6172,7 @@ static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
return 0;
}
-static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
+static void io_wq_submit_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_kiocb *timeout;
@@ -6391,15 +6182,12 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
if (timeout)
io_queue_linked_timeout(timeout);
- /* if NO_CANCEL is set, we must still run the work */
- if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
- IO_WQ_WORK_CANCEL) {
+ if (work->flags & IO_WQ_WORK_CANCEL)
ret = -ECANCELED;
- }
if (!ret) {
do {
- ret = io_issue_sqe(req, false, NULL);
+ ret = io_issue_sqe(req, 0);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
@@ -6411,38 +6199,18 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
} while (1);
}
+ /* avoid locking problems by failing it from a clean context */
if (ret) {
- struct io_ring_ctx *lock_ctx = NULL;
-
- if (req->ctx->flags & IORING_SETUP_IOPOLL)
- lock_ctx = req->ctx;
-
- /*
- * io_iopoll_complete() does not hold completion_lock to
- * complete polled io, so here for polled io, we can not call
- * io_req_complete() directly, otherwise there maybe concurrent
- * access to cqring, defer_list, etc, which is not safe. Given
- * that io_iopoll_complete() is always called under uring_lock,
- * so here for polled io, we also get uring_lock to complete
- * it.
- */
- if (lock_ctx)
- mutex_lock(&lock_ctx->uring_lock);
-
- req_set_fail_links(req);
- io_req_complete(req, ret);
-
- if (lock_ctx)
- mutex_unlock(&lock_ctx->uring_lock);
+ /* io-wq is going to take one down */
+ refcount_inc(&req->refs);
+ io_req_task_queue_fail(req, ret);
}
-
- return io_steal_work(req);
}
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
int index)
{
- struct fixed_file_table *table;
+ struct fixed_rsrc_table *table;
table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
return table->files[index & IORING_FILE_TABLE_MASK];
@@ -6465,16 +6233,8 @@ static struct file *io_file_get(struct io_submit_state *state,
file = __io_file_get(state, fd);
}
- if (file && file->f_op == &io_uring_fops &&
- !(req->flags & REQ_F_INFLIGHT)) {
- io_req_init_async(req);
- req->flags |= REQ_F_INFLIGHT;
-
- spin_lock_irq(&ctx->inflight_lock);
- list_add(&req->inflight_entry, &ctx->inflight_list);
- spin_unlock_irq(&ctx->inflight_lock);
- }
-
+ if (file && unlikely(file->f_op == &io_uring_fops))
+ io_req_track_inflight(req);
return file;
}
@@ -6501,11 +6261,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {
- req_set_fail_links(prev);
io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
- io_put_req(prev);
+ io_put_req_deferred(prev, 1);
} else {
- io_req_complete(req, -ETIME);
+ io_req_complete_post(req, -ETIME, 0);
+ io_put_req_deferred(req, 1);
}
return HRTIMER_NORESTART;
}
@@ -6551,27 +6311,12 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
return nxt;
}
-static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
+static void __io_queue_sqe(struct io_kiocb *req)
{
- struct io_kiocb *linked_timeout;
- const struct cred *old_creds = NULL;
+ struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
int ret;
-again:
- linked_timeout = io_prep_linked_timeout(req);
-
- if ((req->flags & REQ_F_WORK_INITIALIZED) &&
- (req->work.flags & IO_WQ_WORK_CREDS) &&
- req->work.identity->creds != current_cred()) {
- if (old_creds)
- revert_creds(old_creds);
- if (old_creds == req->work.identity->creds)
- old_creds = NULL; /* restored original creds */
- else
- old_creds = override_creds(req->work.identity->creds);
- }
-
- ret = io_issue_sqe(req, true, cs);
+ ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -6585,38 +6330,32 @@ again:
*/
io_queue_async_work(req);
}
-
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
} else if (likely(!ret)) {
/* drop submission reference */
- req = io_put_req_find_next(req);
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
+ if (req->flags & REQ_F_COMPLETE_INLINE) {
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_comp_state *cs = &ctx->submit_state.comp;
- if (req) {
- if (!(req->flags & REQ_F_FORCE_ASYNC))
- goto again;
- io_queue_async_work(req);
+ cs->reqs[cs->nr++] = req;
+ if (cs->nr == ARRAY_SIZE(cs->reqs))
+ io_submit_flush_completions(cs, ctx);
+ } else {
+ io_put_req(req);
}
} else {
- /* un-prep timeout, so it'll be killed as any other linked */
- req->flags &= ~REQ_F_LINK_TIMEOUT;
req_set_fail_links(req);
io_put_req(req);
io_req_complete(req, ret);
}
-
- if (old_creds)
- revert_creds(old_creds);
+ if (linked_timeout)
+ io_queue_linked_timeout(linked_timeout);
}
-static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_comp_state *cs)
+static void io_queue_sqe(struct io_kiocb *req)
{
int ret;
- ret = io_req_defer(req, sqe);
+ ret = io_req_defer(req);
if (ret) {
if (ret != -EIOCBQUEUED) {
fail_req:
@@ -6625,43 +6364,140 @@ fail_req:
io_req_complete(req, ret);
}
} else if (req->flags & REQ_F_FORCE_ASYNC) {
- if (!req->async_data) {
- ret = io_req_defer_prep(req, sqe);
- if (unlikely(ret))
- goto fail_req;
- }
+ ret = io_req_defer_prep(req);
+ if (unlikely(ret))
+ goto fail_req;
io_queue_async_work(req);
} else {
- if (sqe) {
- ret = io_req_prep(req, sqe);
- if (unlikely(ret))
- goto fail_req;
- }
- __io_queue_sqe(req, cs);
+ __io_queue_sqe(req);
}
}
-static inline void io_queue_link_head(struct io_kiocb *req,
- struct io_comp_state *cs)
+/*
+ * Check SQE restrictions (opcode and flags).
+ *
+ * Returns 'true' if SQE is allowed, 'false' otherwise.
+ */
+static inline bool io_check_restriction(struct io_ring_ctx *ctx,
+ struct io_kiocb *req,
+ unsigned int sqe_flags)
{
- if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
- io_put_req(req);
- io_req_complete(req, -ECANCELED);
- } else
- io_queue_sqe(req, NULL, cs);
+ if (!ctx->restricted)
+ return true;
+
+ if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
+ return false;
+
+ if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
+ ctx->restrictions.sqe_flags_required)
+ return false;
+
+ if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
+ ctx->restrictions.sqe_flags_required))
+ return false;
+
+ return true;
}
-struct io_submit_link {
- struct io_kiocb *head;
- struct io_kiocb *last;
-};
+static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ struct io_submit_state *state;
+ unsigned int sqe_flags;
+ int personality, ret = 0;
-static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_submit_link *link, struct io_comp_state *cs)
+ req->opcode = READ_ONCE(sqe->opcode);
+ /* same numerical values with corresponding REQ_F_*, safe to copy */
+ req->flags = sqe_flags = READ_ONCE(sqe->flags);
+ req->user_data = READ_ONCE(sqe->user_data);
+ req->async_data = NULL;
+ req->file = NULL;
+ req->ctx = ctx;
+ req->link = NULL;
+ req->fixed_rsrc_refs = NULL;
+ /* one is dropped after submission, the other at completion */
+ refcount_set(&req->refs, 2);
+ req->task = current;
+ req->result = 0;
+ req->work.list.next = NULL;
+ req->work.creds = NULL;
+ req->work.flags = 0;
+
+ /* enforce forwards compatibility on users */
+ if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
+ req->flags = 0;
+ return -EINVAL;
+ }
+
+ if (unlikely(req->opcode >= IORING_OP_LAST))
+ return -EINVAL;
+
+ if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
+ return -EACCES;
+
+ if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
+ !io_op_defs[req->opcode].buffer_select)
+ return -EOPNOTSUPP;
+
+ personality = READ_ONCE(sqe->personality);
+ if (personality) {
+ req->work.creds = xa_load(&ctx->personalities, personality);
+ if (!req->work.creds)
+ return -EINVAL;
+ get_cred(req->work.creds);
+ }
+ state = &ctx->submit_state;
+
+ /*
+ * Plug now if we have more than 1 IO left after this, and the target
+ * is potentially a read/write to block based storage.
+ */
+ if (!state->plug_started && state->ios_left > 1 &&
+ io_op_defs[req->opcode].plug) {
+ blk_start_plug(&state->plug);
+ state->plug_started = true;
+ }
+
+ if (io_op_defs[req->opcode].needs_file) {
+ bool fixed = req->flags & REQ_F_FIXED_FILE;
+
+ req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
+ if (unlikely(!req->file))
+ ret = -EBADF;
+ }
+
+ state->ios_left--;
+ return ret;
+}
+
+static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
- struct io_ring_ctx *ctx = req->ctx;
+ struct io_submit_link *link = &ctx->submit_state.link;
int ret;
+ ret = io_init_req(ctx, req, sqe);
+ if (unlikely(ret)) {
+fail_req:
+ io_put_req(req);
+ io_req_complete(req, ret);
+ if (link->head) {
+ /* fail even hard links since we don't submit */
+ link->head->flags |= REQ_F_FAIL_LINK;
+ io_put_req(link->head);
+ io_req_complete(link->head, -ECANCELED);
+ link->head = NULL;
+ }
+ return ret;
+ }
+ ret = io_req_prep(req, sqe);
+ if (unlikely(ret))
+ goto fail_req;
+
+ /* don't need @sqe from now on */
+ trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
+ true, ctx->flags & IORING_SETUP_SQPOLL);
+
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
@@ -6683,19 +6519,16 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1;
}
- ret = io_req_defer_prep(req, sqe);
- if (unlikely(ret)) {
- /* fail even hard links since we don't submit */
- head->flags |= REQ_F_FAIL_LINK;
- return ret;
- }
+ ret = io_req_defer_prep(req);
+ if (unlikely(ret))
+ goto fail_req;
trace_io_uring_link(ctx, req, head);
link->last->link = req;
link->last = req;
/* last request of a link, enqueue the link */
if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
- io_queue_link_head(head, cs);
+ io_queue_sqe(head);
link->head = NULL;
}
} else {
@@ -6704,13 +6537,10 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ctx->drain_next = 0;
}
if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
- ret = io_req_defer_prep(req, sqe);
- if (unlikely(ret))
- req->flags |= REQ_F_FAIL_LINK;
link->head = req;
link->last = req;
} else {
- io_queue_sqe(req, sqe, cs);
+ io_queue_sqe(req);
}
}
@@ -6720,30 +6550,28 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/*
* Batched submission is done, ensure local IO is flushed out.
*/
-static void io_submit_state_end(struct io_submit_state *state)
+static void io_submit_state_end(struct io_submit_state *state,
+ struct io_ring_ctx *ctx)
{
- if (!list_empty(&state->comp.list))
- io_submit_flush_completions(&state->comp);
+ if (state->link.head)
+ io_queue_sqe(state->link.head);
+ if (state->comp.nr)
+ io_submit_flush_completions(&state->comp, ctx);
if (state->plug_started)
blk_finish_plug(&state->plug);
io_state_file_put(state);
- if (state->free_reqs)
- kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
}
/*
* Start submission side cache.
*/
static void io_submit_state_start(struct io_submit_state *state,
- struct io_ring_ctx *ctx, unsigned int max_ios)
+ unsigned int max_ios)
{
state->plug_started = false;
- state->comp.nr = 0;
- INIT_LIST_HEAD(&state->comp.list);
- state->comp.ctx = ctx;
- state->free_reqs = 0;
- state->file_refs = 0;
state->ios_left = max_ios;
+ /* set only head, no need to init link_last in advance */
+ state->link.head = NULL;
}
static void io_commit_sqring(struct io_ring_ctx *ctx)
@@ -6779,7 +6607,7 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
* 2) allows the kernel side to track the head on its own, even
* though the application is the one updating it.
*/
- head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
+ head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
if (likely(head < ctx->sq_entries))
return &ctx->sq_sqes[head];
@@ -6789,126 +6617,9 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
return NULL;
}
-static inline void io_consume_sqe(struct io_ring_ctx *ctx)
-{
- ctx->cached_sq_head++;
-}
-
-/*
- * Check SQE restrictions (opcode and flags).
- *
- * Returns 'true' if SQE is allowed, 'false' otherwise.
- */
-static inline bool io_check_restriction(struct io_ring_ctx *ctx,
- struct io_kiocb *req,
- unsigned int sqe_flags)
-{
- if (!ctx->restricted)
- return true;
-
- if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
- return false;
-
- if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
- ctx->restrictions.sqe_flags_required)
- return false;
-
- if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
- ctx->restrictions.sqe_flags_required))
- return false;
-
- return true;
-}
-
-#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
- IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
- IOSQE_BUFFER_SELECT)
-
-static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe,
- struct io_submit_state *state)
-{
- unsigned int sqe_flags;
- int id, ret;
-
- req->opcode = READ_ONCE(sqe->opcode);
- req->user_data = READ_ONCE(sqe->user_data);
- req->async_data = NULL;
- req->file = NULL;
- req->ctx = ctx;
- req->flags = 0;
- req->link = NULL;
- req->fixed_file_refs = NULL;
- /* one is dropped after submission, the other at completion */
- refcount_set(&req->refs, 2);
- req->task = current;
- req->result = 0;
-
- if (unlikely(req->opcode >= IORING_OP_LAST))
- return -EINVAL;
-
- if (unlikely(io_sq_thread_acquire_mm_files(ctx, req)))
- return -EFAULT;
-
- sqe_flags = READ_ONCE(sqe->flags);
- /* enforce forwards compatibility on users */
- if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
- return -EINVAL;
-
- if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
- return -EACCES;
-
- if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
- !io_op_defs[req->opcode].buffer_select)
- return -EOPNOTSUPP;
-
- id = READ_ONCE(sqe->personality);
- if (id) {
- struct io_identity *iod;
-
- iod = idr_find(&ctx->personality_idr, id);
- if (unlikely(!iod))
- return -EINVAL;
- refcount_inc(&iod->count);
-
- __io_req_init_async(req);
- get_cred(iod->creds);
- req->work.identity = iod;
- req->work.flags |= IO_WQ_WORK_CREDS;
- }
-
- /* same numerical values with corresponding REQ_F_*, safe to copy */
- req->flags |= sqe_flags;
-
- /*
- * Plug now if we have more than 1 IO left after this, and the target
- * is potentially a read/write to block based storage.
- */
- if (!state->plug_started && state->ios_left > 1 &&
- io_op_defs[req->opcode].plug) {
- blk_start_plug(&state->plug);
- state->plug_started = true;
- }
-
- ret = 0;
- if (io_op_defs[req->opcode].needs_file) {
- bool fixed = req->flags & REQ_F_FIXED_FILE;
-
- req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
- if (unlikely(!req->file &&
- !io_op_defs[req->opcode].needs_file_no_error))
- ret = -EBADF;
- }
-
- state->ios_left--;
- return ret;
-}
-
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
{
- struct io_submit_state state;
- struct io_submit_link link;
- int i, submitted = 0;
+ int submitted = 0;
/* if we have a backlog and couldn't flush it all, return BUSY */
if (test_bit(0, &ctx->sq_check_overflow)) {
@@ -6924,43 +6635,27 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
percpu_counter_add(&current->io_uring->inflight, nr);
refcount_add(nr, &current->usage);
+ io_submit_state_start(&ctx->submit_state, nr);
- io_submit_state_start(&state, ctx, nr);
- link.head = NULL;
-
- for (i = 0; i < nr; i++) {
+ while (submitted < nr) {
const struct io_uring_sqe *sqe;
struct io_kiocb *req;
- int err;
- sqe = io_get_sqe(ctx);
- if (unlikely(!sqe)) {
- io_consume_sqe(ctx);
- break;
- }
- req = io_alloc_req(ctx, &state);
+ req = io_alloc_req(ctx);
if (unlikely(!req)) {
if (!submitted)
submitted = -EAGAIN;
break;
}
- io_consume_sqe(ctx);
+ sqe = io_get_sqe(ctx);
+ if (unlikely(!sqe)) {
+ kmem_cache_free(req_cachep, req);
+ break;
+ }
/* will complete beyond this point, count as submitted */
submitted++;
-
- err = io_init_req(ctx, req, sqe, &state);
- if (unlikely(err)) {
-fail_req:
- io_put_req(req);
- io_req_complete(req, err);
+ if (io_submit_sqe(ctx, req, sqe))
break;
- }
-
- trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
- true, io_async_submit(ctx));
- err = io_submit_sqe(req, sqe, &link, &state.comp);
- if (err)
- goto fail_req;
}
if (unlikely(submitted != nr)) {
@@ -6972,10 +6667,8 @@ fail_req:
percpu_counter_sub(&tctx->inflight, unused);
put_task_struct_many(current, unused);
}
- if (link.head)
- io_queue_link_head(link.head, &state.comp);
- io_submit_state_end(&state);
+ io_submit_state_end(&ctx->submit_state, ctx);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);
@@ -7014,8 +6707,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (!list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, &nr_events, 0);
- if (to_submit && !ctx->sqo_dead &&
- likely(!percpu_ref_is_dying(&ctx->refs)))
+ if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
+ !(ctx->flags & IORING_SETUP_R_DISABLED))
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
@@ -7039,93 +6732,62 @@ static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
sqd->sq_thread_idle = sq_thread_idle;
}
-static void io_sqd_init_new(struct io_sq_data *sqd)
-{
- struct io_ring_ctx *ctx;
-
- while (!list_empty(&sqd->ctx_new_list)) {
- ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
- list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
- complete(&ctx->sq_thread_comp);
- }
-
- io_sqd_update_thread_idle(sqd);
-}
-
static int io_sq_thread(void *data)
{
- struct cgroup_subsys_state *cur_css = NULL;
- struct files_struct *old_files = current->files;
- struct nsproxy *old_nsproxy = current->nsproxy;
- const struct cred *old_cred = NULL;
struct io_sq_data *sqd = data;
struct io_ring_ctx *ctx;
unsigned long timeout = 0;
+ char buf[TASK_COMM_LEN];
DEFINE_WAIT(wait);
- task_lock(current);
- current->files = NULL;
- current->nsproxy = NULL;
- task_unlock(current);
+ sprintf(buf, "iou-sqp-%d", sqd->task_pid);
+ set_task_comm(current, buf);
+ current->pf_io_worker = NULL;
+
+ if (sqd->sq_cpu != -1)
+ set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
+ else
+ set_cpus_allowed_ptr(current, cpu_online_mask);
+ current->flags |= PF_NO_SETAFFINITY;
- while (!kthread_should_stop()) {
+ mutex_lock(&sqd->lock);
+ while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
int ret;
bool cap_entries, sqt_spin, needs_sched;
- /*
- * Any changes to the sqd lists are synchronized through the
- * kthread parking. This synchronizes the thread vs users,
- * the users are synchronized on the sqd->ctx_lock.
- */
- if (kthread_should_park()) {
- kthread_parkme();
- /*
- * When sq thread is unparked, in case the previous park operation
- * comes from io_put_sq_data(), which means that sq thread is going
- * to be stopped, so here needs to have a check.
- */
- if (kthread_should_stop())
- break;
- }
-
- if (unlikely(!list_empty(&sqd->ctx_new_list))) {
- io_sqd_init_new(sqd);
+ if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
+ mutex_unlock(&sqd->lock);
+ cond_resched();
+ mutex_lock(&sqd->lock);
+ io_run_task_work();
+ io_run_task_work_head(&sqd->park_task_work);
timeout = jiffies + sqd->sq_thread_idle;
+ continue;
}
-
+ if (fatal_signal_pending(current))
+ break;
sqt_spin = false;
cap_entries = !list_is_singular(&sqd->ctx_list);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
- if (current->cred != ctx->creds) {
- if (old_cred)
- revert_creds(old_cred);
- old_cred = override_creds(ctx->creds);
- }
- io_sq_thread_associate_blkcg(ctx, &cur_css);
-#ifdef CONFIG_AUDIT
- current->loginuid = ctx->loginuid;
- current->sessionid = ctx->sessionid;
-#endif
+ const struct cred *creds = NULL;
+ if (ctx->sq_creds != current_cred())
+ creds = override_creds(ctx->sq_creds);
ret = __io_sq_thread(ctx, cap_entries);
+ if (creds)
+ revert_creds(creds);
if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
sqt_spin = true;
-
- io_sq_thread_drop_mm_files();
}
if (sqt_spin || !time_after(jiffies, timeout)) {
io_run_task_work();
- io_sq_thread_drop_mm_files();
cond_resched();
if (sqt_spin)
timeout = jiffies + sqd->sq_thread_idle;
continue;
}
- if (kthread_should_park())
- continue;
-
needs_sched = true;
prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -7140,35 +6802,34 @@ static int io_sq_thread(void *data)
}
}
- if (needs_sched) {
+ if (needs_sched && !test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
io_ring_set_wakeup_flag(ctx);
+ mutex_unlock(&sqd->lock);
schedule();
+ try_to_freeze();
+ mutex_lock(&sqd->lock);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
io_ring_clear_wakeup_flag(ctx);
}
finish_wait(&sqd->wait, &wait);
+ io_run_task_work_head(&sqd->park_task_work);
timeout = jiffies + sqd->sq_thread_idle;
}
- io_run_task_work();
- io_sq_thread_drop_mm_files();
-
- if (cur_css)
- io_sq_thread_unassociate_blkcg();
- if (old_cred)
- revert_creds(old_cred);
-
- task_lock(current);
- current->files = old_files;
- current->nsproxy = old_nsproxy;
- task_unlock(current);
-
- kthread_parkme();
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ io_uring_cancel_sqpoll(ctx);
+ sqd->thread = NULL;
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ io_ring_set_wakeup_flag(ctx);
+ mutex_unlock(&sqd->lock);
- return 0;
+ io_run_task_work();
+ io_run_task_work_head(&sqd->park_task_work);
+ complete(&sqd->exited);
+ do_exit(0);
}
struct io_wait_queue {
@@ -7217,6 +6878,25 @@ static int io_run_task_work_sig(void)
return -EINTR;
}
+/* when returns >0, the caller should retry */
+static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq,
+ signed long *timeout)
+{
+ int ret;
+
+ /* make sure we run task_work before checking for signals */
+ ret = io_run_task_work_sig();
+ if (ret || io_should_wake(iowq))
+ return ret;
+ /* let the caller flush overflows, retry */
+ if (test_bit(0, &ctx->cq_check_overflow))
+ return 1;
+
+ *timeout = schedule_timeout(*timeout);
+ return !*timeout ? -ETIME : 1;
+}
+
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
@@ -7235,9 +6915,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
.to_wait = min_events,
};
struct io_rings *rings = ctx->rings;
- struct timespec64 ts;
- signed long timeout = 0;
- int ret = 0;
+ signed long timeout = MAX_SCHEDULE_TIMEOUT;
+ int ret;
do {
io_cqring_overflow_flush(ctx, false, NULL, NULL);
@@ -7261,6 +6940,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
}
if (uts) {
+ struct timespec64 ts;
+
if (get_timespec64(&ts, uts))
return -EFAULT;
timeout = timespec64_to_jiffies(&ts);
@@ -7269,34 +6950,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
trace_io_uring_cqring_wait(ctx, min_events);
do {
- io_cqring_overflow_flush(ctx, false, NULL, NULL);
- prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
- TASK_INTERRUPTIBLE);
- /* make sure we run task_work before checking for signals */
- ret = io_run_task_work_sig();
- if (ret > 0) {
- finish_wait(&ctx->wait, &iowq.wq);
- continue;
- }
- else if (ret < 0)
+ /* if we can't even flush overflow, don't wait for more */
+ if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
+ ret = -EBUSY;
break;
- if (io_should_wake(&iowq))
- break;
- if (test_bit(0, &ctx->cq_check_overflow)) {
- finish_wait(&ctx->wait, &iowq.wq);
- continue;
}
- if (uts) {
- timeout = schedule_timeout(timeout);
- if (timeout == 0) {
- ret = -ETIME;
- break;
- }
- } else {
- schedule();
- }
- } while (1);
- finish_wait(&ctx->wait, &iowq.wq);
+ prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+ TASK_INTERRUPTIBLE);
+ ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+ finish_wait(&ctx->wait, &iowq.wq);
+ cond_resched();
+ } while (ret > 0);
restore_saved_sigmask_unless(ret == -EINTR);
@@ -7326,90 +6990,208 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
#endif
}
-static void io_file_ref_kill(struct percpu_ref *ref)
+static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
{
- struct fixed_file_data *data;
+ struct fixed_rsrc_data *data;
- data = container_of(ref, struct fixed_file_data, refs);
+ data = container_of(ref, struct fixed_rsrc_data, refs);
complete(&data->done);
}
-static void io_sqe_files_set_node(struct fixed_file_data *file_data,
- struct fixed_file_ref_node *ref_node)
+static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
{
- spin_lock_bh(&file_data->lock);
- file_data->node = ref_node;
- list_add_tail(&ref_node->node, &file_data->ref_list);
- spin_unlock_bh(&file_data->lock);
- percpu_ref_get(&file_data->refs);
+ spin_lock_bh(&ctx->rsrc_ref_lock);
}
-static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
{
- struct fixed_file_data *data = ctx->file_data;
- struct fixed_file_ref_node *backup_node, *ref_node = NULL;
- unsigned nr_tables, i;
- int ret;
+ spin_unlock_bh(&ctx->rsrc_ref_lock);
+}
- if (!data)
- return -ENXIO;
- backup_node = alloc_fixed_file_ref_node(ctx);
- if (!backup_node)
- return -ENOMEM;
+static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
+ struct fixed_rsrc_data *rsrc_data,
+ struct fixed_rsrc_ref_node *ref_node)
+{
+ io_rsrc_ref_lock(ctx);
+ rsrc_data->node = ref_node;
+ list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
+ io_rsrc_ref_unlock(ctx);
+ percpu_ref_get(&rsrc_data->refs);
+}
- spin_lock_bh(&data->lock);
+static void io_sqe_rsrc_kill_node(struct io_ring_ctx *ctx, struct fixed_rsrc_data *data)
+{
+ struct fixed_rsrc_ref_node *ref_node = NULL;
+
+ io_rsrc_ref_lock(ctx);
ref_node = data->node;
- spin_unlock_bh(&data->lock);
+ data->node = NULL;
+ io_rsrc_ref_unlock(ctx);
if (ref_node)
percpu_ref_kill(&ref_node->refs);
+}
+
+static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
+ struct io_ring_ctx *ctx,
+ void (*rsrc_put)(struct io_ring_ctx *ctx,
+ struct io_rsrc_put *prsrc))
+{
+ struct fixed_rsrc_ref_node *backup_node;
+ int ret;
- percpu_ref_kill(&data->refs);
+ if (data->quiesce)
+ return -ENXIO;
- /* wait for all refs nodes to complete */
- flush_delayed_work(&ctx->file_put_work);
+ data->quiesce = true;
do {
+ ret = -ENOMEM;
+ backup_node = alloc_fixed_rsrc_ref_node(ctx);
+ if (!backup_node)
+ break;
+ backup_node->rsrc_data = data;
+ backup_node->rsrc_put = rsrc_put;
+
+ io_sqe_rsrc_kill_node(ctx, data);
+ percpu_ref_kill(&data->refs);
+ flush_delayed_work(&ctx->rsrc_put_work);
+
ret = wait_for_completion_interruptible(&data->done);
if (!ret)
break;
+
+ percpu_ref_resurrect(&data->refs);
+ io_sqe_rsrc_set_node(ctx, data, backup_node);
+ backup_node = NULL;
+ reinit_completion(&data->done);
+ mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig();
- if (ret < 0) {
- percpu_ref_resurrect(&data->refs);
- reinit_completion(&data->done);
- io_sqe_files_set_node(data, backup_node);
- return ret;
- }
- } while (1);
+ mutex_lock(&ctx->uring_lock);
+ } while (ret >= 0);
+ data->quiesce = false;
+
+ if (backup_node)
+ destroy_fixed_rsrc_ref_node(backup_node);
+ return ret;
+}
+
+static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
+{
+ struct fixed_rsrc_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
+ kfree(data);
+ return NULL;
+ }
+ data->ctx = ctx;
+ init_completion(&data->done);
+ return data;
+}
+
+static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
+{
+ percpu_ref_exit(&data->refs);
+ kfree(data->table);
+ kfree(data);
+}
+
+static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+{
+ struct fixed_rsrc_data *data = ctx->file_data;
+ unsigned nr_tables, i;
+ int ret;
+
+ /*
+ * percpu_ref_is_dying() is to stop parallel files unregister
+ * Since we possibly drop uring lock later in this function to
+ * run task work.
+ */
+ if (!data || percpu_ref_is_dying(&data->refs))
+ return -ENXIO;
+ ret = io_rsrc_ref_quiesce(data, ctx, io_ring_file_put);
+ if (ret)
+ return ret;
__io_sqe_files_unregister(ctx);
nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
for (i = 0; i < nr_tables; i++)
kfree(data->table[i].files);
- kfree(data->table);
- percpu_ref_exit(&data->refs);
- kfree(data);
+ free_fixed_rsrc_data(data);
ctx->file_data = NULL;
ctx->nr_user_files = 0;
- destroy_fixed_file_ref_node(backup_node);
return 0;
}
+static void io_sq_thread_unpark(struct io_sq_data *sqd)
+ __releases(&sqd->lock)
+{
+ WARN_ON_ONCE(sqd->thread == current);
+
+ /*
+ * Do the dance but not conditional clear_bit() because it'd race with
+ * other threads incrementing park_pending and setting the bit.
+ */
+ clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+ if (atomic_dec_return(&sqd->park_pending))
+ set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+ mutex_unlock(&sqd->lock);
+}
+
+static void io_sq_thread_park(struct io_sq_data *sqd)
+ __acquires(&sqd->lock)
+{
+ WARN_ON_ONCE(sqd->thread == current);
+
+ atomic_inc(&sqd->park_pending);
+ set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+ mutex_lock(&sqd->lock);
+ if (sqd->thread)
+ wake_up_process(sqd->thread);
+}
+
+static void io_sq_thread_stop(struct io_sq_data *sqd)
+{
+ WARN_ON_ONCE(sqd->thread == current);
+
+ mutex_lock(&sqd->lock);
+ set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
+ if (sqd->thread)
+ wake_up_process(sqd->thread);
+ mutex_unlock(&sqd->lock);
+ wait_for_completion(&sqd->exited);
+}
+
static void io_put_sq_data(struct io_sq_data *sqd)
{
if (refcount_dec_and_test(&sqd->refs)) {
- /*
- * The park is a bit of a work-around, without it we get
- * warning spews on shutdown with SQPOLL set and affinity
- * set to a single CPU.
- */
- if (sqd->thread) {
- kthread_park(sqd->thread);
- kthread_stop(sqd->thread);
- }
+ WARN_ON_ONCE(atomic_read(&sqd->park_pending));
+ io_sq_thread_stop(sqd);
kfree(sqd);
}
}
+static void io_sq_thread_finish(struct io_ring_ctx *ctx)
+{
+ struct io_sq_data *sqd = ctx->sq_data;
+
+ if (sqd) {
+ io_sq_thread_park(sqd);
+ list_del_init(&ctx->sqd_list);
+ io_sqd_update_thread_idle(sqd);
+ io_sq_thread_unpark(sqd);
+
+ io_put_sq_data(sqd);
+ ctx->sq_data = NULL;
+ if (ctx->sq_creds)
+ put_cred(ctx->sq_creds);
+ }
+}
+
static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
{
struct io_ring_ctx *ctx_attach;
@@ -7430,91 +7212,46 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
fdput(f);
return ERR_PTR(-EINVAL);
}
+ if (sqd->task_tgid != current->tgid) {
+ fdput(f);
+ return ERR_PTR(-EPERM);
+ }
refcount_inc(&sqd->refs);
fdput(f);
return sqd;
}
-static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
+static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
+ bool *attached)
{
struct io_sq_data *sqd;
- if (p->flags & IORING_SETUP_ATTACH_WQ)
- return io_attach_sq_data(p);
+ *attached = false;
+ if (p->flags & IORING_SETUP_ATTACH_WQ) {
+ sqd = io_attach_sq_data(p);
+ if (!IS_ERR(sqd)) {
+ *attached = true;
+ return sqd;
+ }
+ /* fall through for EPERM case, setup new sqd/task */
+ if (PTR_ERR(sqd) != -EPERM)
+ return sqd;
+ }
sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
if (!sqd)
return ERR_PTR(-ENOMEM);
+ atomic_set(&sqd->park_pending, 0);
refcount_set(&sqd->refs, 1);
INIT_LIST_HEAD(&sqd->ctx_list);
- INIT_LIST_HEAD(&sqd->ctx_new_list);
- mutex_init(&sqd->ctx_lock);
mutex_init(&sqd->lock);
init_waitqueue_head(&sqd->wait);
+ init_completion(&sqd->exited);
return sqd;
}
-static void io_sq_thread_unpark(struct io_sq_data *sqd)
- __releases(&sqd->lock)
-{
- if (!sqd->thread)
- return;
- kthread_unpark(sqd->thread);
- mutex_unlock(&sqd->lock);
-}
-
-static void io_sq_thread_park(struct io_sq_data *sqd)
- __acquires(&sqd->lock)
-{
- if (!sqd->thread)
- return;
- mutex_lock(&sqd->lock);
- kthread_park(sqd->thread);
-}
-
-static void io_sq_thread_stop(struct io_ring_ctx *ctx)
-{
- struct io_sq_data *sqd = ctx->sq_data;
-
- if (sqd) {
- if (sqd->thread) {
- /*
- * We may arrive here from the error branch in
- * io_sq_offload_create() where the kthread is created
- * without being waked up, thus wake it up now to make
- * sure the wait will complete.
- */
- wake_up_process(sqd->thread);
- wait_for_completion(&ctx->sq_thread_comp);
-
- io_sq_thread_park(sqd);
- }
-
- mutex_lock(&sqd->ctx_lock);
- list_del(&ctx->sqd_list);
- io_sqd_update_thread_idle(sqd);
- mutex_unlock(&sqd->ctx_lock);
-
- if (sqd->thread)
- io_sq_thread_unpark(sqd);
-
- io_put_sq_data(sqd);
- ctx->sq_data = NULL;
- }
-}
-
-static void io_finish_async(struct io_ring_ctx *ctx)
-{
- io_sq_thread_stop(ctx);
-
- if (ctx->io_wq) {
- io_wq_destroy(ctx->io_wq);
- ctx->io_wq = NULL;
- }
-}
-
#if defined(CONFIG_UNIX)
/*
* Ensure the UNIX gc is aware of our file set, so we are certain that
@@ -7541,7 +7278,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
skb->sk = sk;
nr_files = 0;
- fpl->user = get_uid(ctx->user);
+ fpl->user = get_uid(current_user());
for (i = 0; i < nr; i++) {
struct file *file = io_file_from_index(ctx, i + offset);
@@ -7612,13 +7349,13 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
}
#endif
-static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
+static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
unsigned nr_tables, unsigned nr_files)
{
int i;
for (i = 0; i < nr_tables; i++) {
- struct fixed_file_table *table = &file_data->table[i];
+ struct fixed_rsrc_table *table = &file_data->table[i];
unsigned this_files;
this_files = min(nr_files, IORING_MAX_FILES_TABLE);
@@ -7633,14 +7370,15 @@ static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
return 0;
for (i = 0; i < nr_tables; i++) {
- struct fixed_file_table *table = &file_data->table[i];
+ struct fixed_rsrc_table *table = &file_data->table[i];
kfree(table->files);
}
return 1;
}
-static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
+static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
{
+ struct file *file = prsrc->file;
#if defined(CONFIG_UNIX)
struct sock *sock = ctx->ring_sock->sk;
struct sk_buff_head list, *head = &sock->sk_receive_queue;
@@ -7701,108 +7439,119 @@ static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
#endif
}
-struct io_file_put {
- struct list_head list;
- struct file *file;
-};
-
-static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
+static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
{
- struct fixed_file_data *file_data = ref_node->file_data;
- struct io_ring_ctx *ctx = file_data->ctx;
- struct io_file_put *pfile, *tmp;
+ struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
+ struct io_ring_ctx *ctx = rsrc_data->ctx;
+ struct io_rsrc_put *prsrc, *tmp;
- list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
- list_del(&pfile->list);
- io_ring_file_put(ctx, pfile->file);
- kfree(pfile);
+ list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
+ list_del(&prsrc->list);
+ ref_node->rsrc_put(ctx, prsrc);
+ kfree(prsrc);
}
percpu_ref_exit(&ref_node->refs);
kfree(ref_node);
- percpu_ref_put(&file_data->refs);
+ percpu_ref_put(&rsrc_data->refs);
}
-static void io_file_put_work(struct work_struct *work)
+static void io_rsrc_put_work(struct work_struct *work)
{
struct io_ring_ctx *ctx;
struct llist_node *node;
- ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
- node = llist_del_all(&ctx->file_put_llist);
+ ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
+ node = llist_del_all(&ctx->rsrc_put_llist);
while (node) {
- struct fixed_file_ref_node *ref_node;
+ struct fixed_rsrc_ref_node *ref_node;
struct llist_node *next = node->next;
- ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
- __io_file_put_work(ref_node);
+ ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
+ __io_rsrc_put_work(ref_node);
node = next;
}
}
-static void io_file_data_ref_zero(struct percpu_ref *ref)
+static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
+ unsigned i)
+{
+ struct fixed_rsrc_table *table;
+
+ table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
+ return &table->files[i & IORING_FILE_TABLE_MASK];
+}
+
+static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
{
- struct fixed_file_ref_node *ref_node;
- struct fixed_file_data *data;
+ struct fixed_rsrc_ref_node *ref_node;
+ struct fixed_rsrc_data *data;
struct io_ring_ctx *ctx;
bool first_add = false;
int delay = HZ;
- ref_node = container_of(ref, struct fixed_file_ref_node, refs);
- data = ref_node->file_data;
+ ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
+ data = ref_node->rsrc_data;
ctx = data->ctx;
- spin_lock_bh(&data->lock);
+ io_rsrc_ref_lock(ctx);
ref_node->done = true;
- while (!list_empty(&data->ref_list)) {
- ref_node = list_first_entry(&data->ref_list,
- struct fixed_file_ref_node, node);
+ while (!list_empty(&ctx->rsrc_ref_list)) {
+ ref_node = list_first_entry(&ctx->rsrc_ref_list,
+ struct fixed_rsrc_ref_node, node);
/* recycle ref nodes in order */
if (!ref_node->done)
break;
list_del(&ref_node->node);
- first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
+ first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
}
- spin_unlock_bh(&data->lock);
+ io_rsrc_ref_unlock(ctx);
if (percpu_ref_is_dying(&data->refs))
delay = 0;
if (!delay)
- mod_delayed_work(system_wq, &ctx->file_put_work, 0);
+ mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
else if (first_add)
- queue_delayed_work(system_wq, &ctx->file_put_work, delay);
+ queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
}
-static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
+static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
struct io_ring_ctx *ctx)
{
- struct fixed_file_ref_node *ref_node;
+ struct fixed_rsrc_ref_node *ref_node;
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
if (!ref_node)
return NULL;
- if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
+ if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
0, GFP_KERNEL)) {
kfree(ref_node);
return NULL;
}
INIT_LIST_HEAD(&ref_node->node);
- INIT_LIST_HEAD(&ref_node->file_list);
- ref_node->file_data = ctx->file_data;
+ INIT_LIST_HEAD(&ref_node->rsrc_list);
ref_node->done = false;
return ref_node;
}
-static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
+static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
+ struct fixed_rsrc_ref_node *ref_node)
+{
+ ref_node->rsrc_data = ctx->file_data;
+ ref_node->rsrc_put = io_ring_file_put;
+}
+
+static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
{
percpu_ref_exit(&ref_node->refs);
kfree(ref_node);
}
+
static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
@@ -7810,8 +7559,8 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_tables, i;
struct file *file;
int fd, ret = -ENOMEM;
- struct fixed_file_ref_node *ref_node;
- struct fixed_file_data *file_data;
+ struct fixed_rsrc_ref_node *ref_node;
+ struct fixed_rsrc_data *file_data;
if (ctx->file_data)
return -EBUSY;
@@ -7820,13 +7569,10 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
if (nr_args > IORING_MAX_FIXED_FILES)
return -EMFILE;
- file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
+ file_data = alloc_fixed_rsrc_data(ctx);
if (!file_data)
return -ENOMEM;
- file_data->ctx = ctx;
- init_completion(&file_data->done);
- INIT_LIST_HEAD(&file_data->ref_list);
- spin_lock_init(&file_data->lock);
+ ctx->file_data = file_data;
nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
@@ -7834,18 +7580,10 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
if (!file_data->table)
goto out_free;
- if (percpu_ref_init(&file_data->refs, io_file_ref_kill,
- PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
- goto out_free;
-
if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
- goto out_ref;
- ctx->file_data = file_data;
+ goto out_free;
for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
- struct fixed_file_table *table;
- unsigned index;
-
if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
ret = -EFAULT;
goto out_fput;
@@ -7870,9 +7608,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
fput(file);
goto out_fput;
}
- table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
- index = i & IORING_FILE_TABLE_MASK;
- table->files[index] = file;
+ *io_fixed_file_slot(file_data, i) = file;
}
ret = io_sqe_files_scm(ctx);
@@ -7881,13 +7617,14 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
- ref_node = alloc_fixed_file_ref_node(ctx);
+ ref_node = alloc_fixed_rsrc_ref_node(ctx);
if (!ref_node) {
io_sqe_files_unregister(ctx);
return -ENOMEM;
}
+ init_fixed_file_ref_node(ctx, ref_node);
- io_sqe_files_set_node(file_data, ref_node);
+ io_sqe_rsrc_set_node(ctx, file_data, ref_node);
return ret;
out_fput:
for (i = 0; i < ctx->nr_user_files; i++) {
@@ -7898,11 +7635,8 @@ out_fput:
for (i = 0; i < nr_tables; i++)
kfree(file_data->table[i].files);
ctx->nr_user_files = 0;
-out_ref:
- percpu_ref_exit(&file_data->refs);
out_free:
- kfree(file_data->table);
- kfree(file_data);
+ free_fixed_rsrc_data(ctx->file_data);
ctx->file_data = NULL;
return ret;
}
@@ -7950,29 +7684,34 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
#endif
}
-static int io_queue_file_removal(struct fixed_file_data *data,
- struct file *file)
+static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
{
- struct io_file_put *pfile;
- struct fixed_file_ref_node *ref_node = data->node;
+ struct io_rsrc_put *prsrc;
+ struct fixed_rsrc_ref_node *ref_node = data->node;
- pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
- if (!pfile)
+ prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+ if (!prsrc)
return -ENOMEM;
- pfile->file = file;
- list_add(&pfile->list, &ref_node->file_list);
+ prsrc->rsrc = rsrc;
+ list_add(&prsrc->list, &ref_node->rsrc_list);
return 0;
}
+static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
+ struct file *file)
+{
+ return io_queue_rsrc_removal(data, (void *)file);
+}
+
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
- struct io_uring_files_update *up,
+ struct io_uring_rsrc_update *up,
unsigned nr_args)
{
- struct fixed_file_data *data = ctx->file_data;
- struct fixed_file_ref_node *ref_node;
- struct file *file;
+ struct fixed_rsrc_data *data = ctx->file_data;
+ struct fixed_rsrc_ref_node *ref_node;
+ struct file *file, **file_slot;
__s32 __user *fds;
int fd, i, err;
__u32 done;
@@ -7983,30 +7722,29 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (done > ctx->nr_user_files)
return -EINVAL;
- ref_node = alloc_fixed_file_ref_node(ctx);
+ ref_node = alloc_fixed_rsrc_ref_node(ctx);
if (!ref_node)
return -ENOMEM;
+ init_fixed_file_ref_node(ctx, ref_node);
- done = 0;
- fds = u64_to_user_ptr(up->fds);
- while (nr_args) {
- struct fixed_file_table *table;
- unsigned index;
-
+ fds = u64_to_user_ptr(up->data);
+ for (done = 0; done < nr_args; done++) {
err = 0;
if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
err = -EFAULT;
break;
}
- i = array_index_nospec(up->offset, ctx->nr_user_files);
- table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
- index = i & IORING_FILE_TABLE_MASK;
- if (table->files[index]) {
- file = table->files[index];
- err = io_queue_file_removal(data, file);
+ if (fd == IORING_REGISTER_FILES_SKIP)
+ continue;
+
+ i = array_index_nospec(up->offset + done, ctx->nr_user_files);
+ file_slot = io_fixed_file_slot(ctx->file_data, i);
+
+ if (*file_slot) {
+ err = io_queue_file_removal(data, *file_slot);
if (err)
break;
- table->files[index] = NULL;
+ *file_slot = NULL;
needs_switch = true;
}
if (fd != -1) {
@@ -8028,24 +7766,21 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
err = -EBADF;
break;
}
- table->files[index] = file;
+ *file_slot = file;
err = io_sqe_file_register(ctx, file, i);
if (err) {
- table->files[index] = NULL;
+ *file_slot = NULL;
fput(file);
break;
}
}
- nr_args--;
- done++;
- up->offset++;
}
if (needs_switch) {
percpu_ref_kill(&data->node->refs);
- io_sqe_files_set_node(data, ref_node);
+ io_sqe_rsrc_set_node(ctx, data, ref_node);
} else
- destroy_fixed_file_ref_node(ref_node);
+ destroy_fixed_rsrc_ref_node(ref_node);
return done ? done : err;
}
@@ -8053,7 +7788,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
- struct io_uring_files_update up;
+ struct io_uring_rsrc_update up;
if (!ctx->file_data)
return -ENXIO;
@@ -8067,62 +7802,42 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
return __io_sqe_files_update(ctx, &up, nr_args);
}
-static void io_free_work(struct io_wq_work *work)
+static struct io_wq_work *io_free_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- /* Consider that io_steal_work() relies on this ref */
- io_put_req(req);
+ req = io_put_req_find_next(req);
+ return req ? &req->work : NULL;
}
-static int io_init_wq_offload(struct io_ring_ctx *ctx,
- struct io_uring_params *p)
+static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx)
{
+ struct io_wq_hash *hash;
struct io_wq_data data;
- struct fd f;
- struct io_ring_ctx *ctx_attach;
unsigned int concurrency;
- int ret = 0;
- data.user = ctx->user;
- data.free_work = io_free_work;
- data.do_work = io_wq_submit_work;
-
- if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
- /* Do QD, or 4 * CPUS, whatever is smallest */
- concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
-
- ctx->io_wq = io_wq_create(concurrency, &data);
- if (IS_ERR(ctx->io_wq)) {
- ret = PTR_ERR(ctx->io_wq);
- ctx->io_wq = NULL;
- }
- return ret;
+ hash = ctx->hash_map;
+ if (!hash) {
+ hash = kzalloc(sizeof(*hash), GFP_KERNEL);
+ if (!hash)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&hash->refs, 1);
+ init_waitqueue_head(&hash->wait);
+ ctx->hash_map = hash;
}
- f = fdget(p->wq_fd);
- if (!f.file)
- return -EBADF;
-
- if (f.file->f_op != &io_uring_fops) {
- ret = -EINVAL;
- goto out_fput;
- }
+ data.hash = hash;
+ data.free_work = io_free_work;
+ data.do_work = io_wq_submit_work;
- ctx_attach = f.file->private_data;
- /* @io_wq is protected by holding the fd */
- if (!io_wq_get(ctx_attach->io_wq, &data)) {
- ret = -EINVAL;
- goto out_fput;
- }
+ /* Do QD, or 4 * CPUS, whatever is smallest */
+ concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
- ctx->io_wq = ctx_attach->io_wq;
-out_fput:
- fdput(f);
- return ret;
+ return io_wq_create(concurrency, &data);
}
-static int io_uring_alloc_task_context(struct task_struct *task)
+static int io_uring_alloc_task_context(struct task_struct *task,
+ struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx;
int ret;
@@ -8137,14 +7852,23 @@ static int io_uring_alloc_task_context(struct task_struct *task)
return ret;
}
+ tctx->io_wq = io_init_wq_offload(ctx);
+ if (IS_ERR(tctx->io_wq)) {
+ ret = PTR_ERR(tctx->io_wq);
+ percpu_counter_destroy(&tctx->inflight);
+ kfree(tctx);
+ return ret;
+ }
+
xa_init(&tctx->xa);
init_waitqueue_head(&tctx->wait);
tctx->last = NULL;
atomic_set(&tctx->in_idle, 0);
- tctx->sqpoll = false;
- io_init_identity(&tctx->__identity);
- tctx->identity = &tctx->__identity;
task->io_uring = tctx;
+ spin_lock_init(&tctx->task_lock);
+ INIT_WQ_LIST(&tctx->task_list);
+ tctx->task_state = 0;
+ init_task_work(&tctx->task_work, tctx_task_work);
return 0;
}
@@ -8153,9 +7877,8 @@ void __io_uring_free(struct task_struct *tsk)
struct io_uring_task *tctx = tsk->io_uring;
WARN_ON_ONCE(!xa_empty(&tctx->xa));
- WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
- if (tctx->identity != &tctx->__identity)
- kfree(tctx->identity);
+ WARN_ON_ONCE(tctx->io_wq);
+
percpu_counter_destroy(&tctx->inflight);
kfree(tctx);
tsk->io_uring = NULL;
@@ -8166,54 +7889,80 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
{
int ret;
+ /* Retain compatibility with failing for an invalid attach attempt */
+ if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
+ IORING_SETUP_ATTACH_WQ) {
+ struct fd f;
+
+ f = fdget(p->wq_fd);
+ if (!f.file)
+ return -ENXIO;
+ if (f.file->f_op != &io_uring_fops) {
+ fdput(f);
+ return -EINVAL;
+ }
+ fdput(f);
+ }
if (ctx->flags & IORING_SETUP_SQPOLL) {
+ struct task_struct *tsk;
struct io_sq_data *sqd;
+ bool attached;
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
goto err;
- sqd = io_get_sq_data(p);
+ sqd = io_get_sq_data(p, &attached);
if (IS_ERR(sqd)) {
ret = PTR_ERR(sqd);
goto err;
}
+ ctx->sq_creds = get_current_cred();
ctx->sq_data = sqd;
- io_sq_thread_park(sqd);
- mutex_lock(&sqd->ctx_lock);
- list_add(&ctx->sqd_list, &sqd->ctx_new_list);
- mutex_unlock(&sqd->ctx_lock);
- io_sq_thread_unpark(sqd);
-
ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
if (!ctx->sq_thread_idle)
ctx->sq_thread_idle = HZ;
- if (sqd->thread)
- goto done;
+ ret = 0;
+ io_sq_thread_park(sqd);
+ list_add(&ctx->sqd_list, &sqd->ctx_list);
+ io_sqd_update_thread_idle(sqd);
+ /* don't attach to a dying SQPOLL thread, would be racy */
+ if (attached && !sqd->thread)
+ ret = -ENXIO;
+ io_sq_thread_unpark(sqd);
+
+ if (ret < 0)
+ goto err;
+ if (attached)
+ return 0;
if (p->flags & IORING_SETUP_SQ_AFF) {
int cpu = p->sq_thread_cpu;
ret = -EINVAL;
if (cpu >= nr_cpu_ids)
- goto err;
+ goto err_sqpoll;
if (!cpu_online(cpu))
- goto err;
+ goto err_sqpoll;
- sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
- cpu, "io_uring-sq");
+ sqd->sq_cpu = cpu;
} else {
- sqd->thread = kthread_create(io_sq_thread, sqd,
- "io_uring-sq");
+ sqd->sq_cpu = -1;
}
- if (IS_ERR(sqd->thread)) {
- ret = PTR_ERR(sqd->thread);
- sqd->thread = NULL;
- goto err;
+
+ sqd->task_pid = current->pid;
+ sqd->task_tgid = current->tgid;
+ tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
+ if (IS_ERR(tsk)) {
+ ret = PTR_ERR(tsk);
+ goto err_sqpoll;
}
- ret = io_uring_alloc_task_context(sqd->thread);
+
+ sqd->thread = tsk;
+ ret = io_uring_alloc_task_context(tsk, ctx);
+ wake_up_new_task(tsk);
if (ret)
goto err;
} else if (p->flags & IORING_SETUP_SQ_AFF) {
@@ -8222,23 +7971,13 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
goto err;
}
-done:
- ret = io_init_wq_offload(ctx, p);
- if (ret)
- goto err;
-
return 0;
err:
- io_finish_async(ctx);
+ io_sq_thread_finish(ctx);
return ret;
-}
-
-static void io_sq_offload_start(struct io_ring_ctx *ctx)
-{
- struct io_sq_data *sqd = ctx->sq_data;
-
- if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
- wake_up_process(sqd->thread);
+err_sqpoll:
+ complete(&ctx->sq_data->exited);
+ goto err;
}
static inline void __io_unaccount_mem(struct user_struct *user,
@@ -8266,43 +8005,27 @@ static inline int __io_account_mem(struct user_struct *user,
return 0;
}
-static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
- enum io_mem_account acct)
+static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
{
- if (ctx->limit_mem)
+ if (ctx->user)
__io_unaccount_mem(ctx->user, nr_pages);
- if (ctx->mm_account) {
- if (acct == ACCT_LOCKED) {
- mmap_write_lock(ctx->mm_account);
- ctx->mm_account->locked_vm -= nr_pages;
- mmap_write_unlock(ctx->mm_account);
- }else if (acct == ACCT_PINNED) {
- atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
- }
- }
+ if (ctx->mm_account)
+ atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
}
-static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
- enum io_mem_account acct)
+static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
{
int ret;
- if (ctx->limit_mem) {
+ if (ctx->user) {
ret = __io_account_mem(ctx->user, nr_pages);
if (ret)
return ret;
}
- if (ctx->mm_account) {
- if (acct == ACCT_LOCKED) {
- mmap_write_lock(ctx->mm_account);
- ctx->mm_account->locked_vm += nr_pages;
- mmap_write_unlock(ctx->mm_account);
- } else if (acct == ACCT_PINNED) {
- atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
- }
- }
+ if (ctx->mm_account)
+ atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
return 0;
}
@@ -8322,7 +8045,7 @@ static void io_mem_free(void *ptr)
static void *io_mem_alloc(size_t size)
{
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
- __GFP_NORETRY;
+ __GFP_NORETRY | __GFP_ACCOUNT;
return (void *) __get_free_pages(gfp_flags, get_order(size));
}
@@ -8356,19 +8079,7 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
return off;
}
-static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
-{
- size_t pages;
-
- pages = (size_t)1 << get_order(
- rings_size(sq_entries, cq_entries, NULL));
- pages += (size_t)1 << get_order(
- array_size(sizeof(struct io_uring_sqe), sq_entries));
-
- return pages;
-}
-
-static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
+static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
{
int i, j;
@@ -8382,7 +8093,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
unpin_user_page(imu->bvec[j].bv_page);
if (imu->acct_pages)
- io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
+ io_unaccount_mem(ctx, imu->acct_pages);
kvfree(imu->bvec);
imu->nr_bvecs = 0;
}
@@ -8480,21 +8191,105 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
if (!imu->acct_pages)
return 0;
- ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
+ ret = io_account_mem(ctx, imu->acct_pages);
if (ret)
imu->acct_pages = 0;
return ret;
}
-static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
- unsigned nr_args)
+static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
+ struct io_mapped_ubuf *imu,
+ struct page **last_hpage)
{
struct vm_area_struct **vmas = NULL;
struct page **pages = NULL;
- struct page *last_hpage = NULL;
- int i, j, got_pages = 0;
- int ret = -EINVAL;
+ unsigned long off, start, end, ubuf;
+ size_t size;
+ int ret, pret, nr_pages, i;
+
+ ubuf = (unsigned long) iov->iov_base;
+ end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = ubuf >> PAGE_SHIFT;
+ nr_pages = end - start;
+
+ ret = -ENOMEM;
+
+ pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto done;
+
+ vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
+ GFP_KERNEL);
+ if (!vmas)
+ goto done;
+
+ imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
+ GFP_KERNEL);
+ if (!imu->bvec)
+ goto done;
+
+ ret = 0;
+ mmap_read_lock(current->mm);
+ pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ pages, vmas);
+ if (pret == nr_pages) {
+ /* don't support file backed memory */
+ for (i = 0; i < nr_pages; i++) {
+ struct vm_area_struct *vma = vmas[i];
+
+ if (vma->vm_file &&
+ !is_file_hugepages(vma->vm_file)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ }
+ } else {
+ ret = pret < 0 ? pret : -EFAULT;
+ }
+ mmap_read_unlock(current->mm);
+ if (ret) {
+ /*
+ * if we did partial map, or found file backed vmas,
+ * release any pages we did get
+ */
+ if (pret > 0)
+ unpin_user_pages(pages, pret);
+ kvfree(imu->bvec);
+ goto done;
+ }
+ ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
+ if (ret) {
+ unpin_user_pages(pages, pret);
+ kvfree(imu->bvec);
+ goto done;
+ }
+
+ off = ubuf & ~PAGE_MASK;
+ size = iov->iov_len;
+ for (i = 0; i < nr_pages; i++) {
+ size_t vec_len;
+
+ vec_len = min_t(size_t, size, PAGE_SIZE - off);
+ imu->bvec[i].bv_page = pages[i];
+ imu->bvec[i].bv_len = vec_len;
+ imu->bvec[i].bv_offset = off;
+ off = 0;
+ size -= vec_len;
+ }
+ /* store original address for later verification */
+ imu->ubuf = ubuf;
+ imu->len = iov->iov_len;
+ imu->nr_bvecs = nr_pages;
+ ret = 0;
+done:
+ kvfree(pages);
+ kvfree(vmas);
+ return ret;
+}
+
+static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
+{
if (ctx->user_bufs)
return -EBUSY;
if (!nr_args || nr_args > UIO_MAXIOV)
@@ -8505,121 +8300,58 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
if (!ctx->user_bufs)
return -ENOMEM;
- for (i = 0; i < nr_args; i++) {
- struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
- unsigned long off, start, end, ubuf;
- int pret, nr_pages;
- struct iovec iov;
- size_t size;
+ return 0;
+}
- ret = io_copy_iov(ctx, &iov, arg, i);
- if (ret)
- goto err;
+static int io_buffer_validate(struct iovec *iov)
+{
+ /*
+ * Don't impose further limits on the size and buffer
+ * constraints here, we'll -EINVAL later when IO is
+ * submitted if they are wrong.
+ */
+ if (!iov->iov_base || !iov->iov_len)
+ return -EFAULT;
- /*
- * Don't impose further limits on the size and buffer
- * constraints here, we'll -EINVAL later when IO is
- * submitted if they are wrong.
- */
- ret = -EFAULT;
- if (!iov.iov_base || !iov.iov_len)
- goto err;
+ /* arbitrary limit, but we need something */
+ if (iov->iov_len > SZ_1G)
+ return -EFAULT;
- /* arbitrary limit, but we need something */
- if (iov.iov_len > SZ_1G)
- goto err;
+ return 0;
+}
- ubuf = (unsigned long) iov.iov_base;
- end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- start = ubuf >> PAGE_SHIFT;
- nr_pages = end - start;
+static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned int nr_args)
+{
+ int i, ret;
+ struct iovec iov;
+ struct page *last_hpage = NULL;
- ret = 0;
- if (!pages || nr_pages > got_pages) {
- kvfree(vmas);
- kvfree(pages);
- pages = kvmalloc_array(nr_pages, sizeof(struct page *),
- GFP_KERNEL);
- vmas = kvmalloc_array(nr_pages,
- sizeof(struct vm_area_struct *),
- GFP_KERNEL);
- if (!pages || !vmas) {
- ret = -ENOMEM;
- goto err;
- }
- got_pages = nr_pages;
- }
+ ret = io_buffers_map_alloc(ctx, nr_args);
+ if (ret)
+ return ret;
- imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
- GFP_KERNEL);
- ret = -ENOMEM;
- if (!imu->bvec)
- goto err;
+ for (i = 0; i < nr_args; i++) {
+ struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
- ret = 0;
- mmap_read_lock(current->mm);
- pret = pin_user_pages(ubuf, nr_pages,
- FOLL_WRITE | FOLL_LONGTERM,
- pages, vmas);
- if (pret == nr_pages) {
- /* don't support file backed memory */
- for (j = 0; j < nr_pages; j++) {
- struct vm_area_struct *vma = vmas[j];
-
- if (vma->vm_file &&
- !is_file_hugepages(vma->vm_file)) {
- ret = -EOPNOTSUPP;
- break;
- }
- }
- } else {
- ret = pret < 0 ? pret : -EFAULT;
- }
- mmap_read_unlock(current->mm);
- if (ret) {
- /*
- * if we did partial map, or found file backed vmas,
- * release any pages we did get
- */
- if (pret > 0)
- unpin_user_pages(pages, pret);
- kvfree(imu->bvec);
- goto err;
- }
+ ret = io_copy_iov(ctx, &iov, arg, i);
+ if (ret)
+ break;
- ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
- if (ret) {
- unpin_user_pages(pages, pret);
- kvfree(imu->bvec);
- goto err;
- }
+ ret = io_buffer_validate(&iov);
+ if (ret)
+ break;
- off = ubuf & ~PAGE_MASK;
- size = iov.iov_len;
- for (j = 0; j < nr_pages; j++) {
- size_t vec_len;
-
- vec_len = min_t(size_t, size, PAGE_SIZE - off);
- imu->bvec[j].bv_page = pages[j];
- imu->bvec[j].bv_len = vec_len;
- imu->bvec[j].bv_offset = off;
- off = 0;
- size -= vec_len;
- }
- /* store original address for later verification */
- imu->ubuf = ubuf;
- imu->len = iov.iov_len;
- imu->nr_bvecs = nr_pages;
+ ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
+ if (ret)
+ break;
ctx->nr_user_bufs++;
}
- kvfree(pages);
- kvfree(vmas);
- return 0;
-err:
- kvfree(pages);
- kvfree(vmas);
- io_sqe_buffer_unregister(ctx);
+
+ if (ret)
+ io_sqe_buffers_unregister(ctx);
+
return ret;
}
@@ -8655,42 +8387,75 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
return -ENXIO;
}
-static int __io_destroy_buffers(int id, void *p, void *data)
+static void io_destroy_buffers(struct io_ring_ctx *ctx)
{
- struct io_ring_ctx *ctx = data;
- struct io_buffer *buf = p;
+ struct io_buffer *buf;
+ unsigned long index;
- __io_remove_buffers(ctx, buf, id, -1U);
- return 0;
+ xa_for_each(&ctx->io_buffers, index, buf)
+ __io_remove_buffers(ctx, buf, index, -1U);
}
-static void io_destroy_buffers(struct io_ring_ctx *ctx)
+static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
{
- idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
- idr_destroy(&ctx->io_buffer_idr);
+ struct io_kiocb *req, *nxt;
+
+ list_for_each_entry_safe(req, nxt, list, compl.list) {
+ if (tsk && req->task != tsk)
+ continue;
+ list_del(&req->compl.list);
+ kmem_cache_free(req_cachep, req);
+ }
+}
+
+static void io_req_caches_free(struct io_ring_ctx *ctx)
+{
+ struct io_submit_state *submit_state = &ctx->submit_state;
+ struct io_comp_state *cs = &ctx->submit_state.comp;
+
+ mutex_lock(&ctx->uring_lock);
+
+ if (submit_state->free_reqs) {
+ kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
+ submit_state->reqs);
+ submit_state->free_reqs = 0;
+ }
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_splice_init(&cs->locked_free_list, &cs->free_list);
+ cs->locked_free_nr = 0;
+ spin_unlock_irq(&ctx->completion_lock);
+
+ io_req_cache_free(&cs->free_list, NULL);
+
+ mutex_unlock(&ctx->uring_lock);
}
static void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
- io_finish_async(ctx);
- io_sqe_buffer_unregister(ctx);
+ /*
+ * Some may use context even when all refs and requests have been put,
+ * and they are free to do so while still holding uring_lock or
+ * completion_lock, see __io_req_task_submit(). Wait for them to finish.
+ */
+ mutex_lock(&ctx->uring_lock);
+ mutex_unlock(&ctx->uring_lock);
+ spin_lock_irq(&ctx->completion_lock);
+ spin_unlock_irq(&ctx->completion_lock);
- if (ctx->sqo_task) {
- put_task_struct(ctx->sqo_task);
- ctx->sqo_task = NULL;
+ io_sq_thread_finish(ctx);
+ io_sqe_buffers_unregister(ctx);
+
+ if (ctx->mm_account) {
mmdrop(ctx->mm_account);
ctx->mm_account = NULL;
}
-#ifdef CONFIG_BLK_CGROUP
- if (ctx->sqo_blkcg_css)
- css_put(ctx->sqo_blkcg_css);
-#endif
-
+ mutex_lock(&ctx->uring_lock);
io_sqe_files_unregister(ctx);
+ mutex_unlock(&ctx->uring_lock);
io_eventfd_unregister(ctx);
io_destroy_buffers(ctx);
- idr_destroy(&ctx->personality_idr);
#if defined(CONFIG_UNIX)
if (ctx->ring_sock) {
@@ -8704,9 +8469,10 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
- put_cred(ctx->creds);
+ io_req_caches_free(ctx);
+ if (ctx->hash_map)
+ io_wq_put_hash(ctx->hash_map);
kfree(ctx->cancel_hash);
- kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx);
}
@@ -8723,8 +8489,21 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
smp_rmb();
if (!io_sqring_full(ctx))
mask |= EPOLLOUT | EPOLLWRNORM;
- io_cqring_overflow_flush(ctx, false, NULL, NULL);
- if (io_cqring_events(ctx))
+
+ /*
+ * Don't flush cqring overflow list here, just do a simple check.
+ * Otherwise there could possible be ABBA deadlock:
+ * CPU0 CPU1
+ * ---- ----
+ * lock(&ctx->uring_lock);
+ * lock(&ep->mtx);
+ * lock(&ctx->uring_lock);
+ * lock(&ep->mtx);
+ *
+ * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
+ * pushs them to do the flush.
+ */
+ if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
@@ -8737,24 +8516,52 @@ static int io_uring_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &ctx->cq_fasync);
}
-static int io_remove_personalities(int id, void *p, void *data)
+static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
{
- struct io_ring_ctx *ctx = data;
- struct io_identity *iod;
+ const struct cred *creds;
- iod = idr_remove(&ctx->personality_idr, id);
- if (iod) {
- put_cred(iod->creds);
- if (refcount_dec_and_test(&iod->count))
- kfree(iod);
+ creds = xa_erase(&ctx->personalities, id);
+ if (creds) {
+ put_cred(creds);
+ return 0;
}
- return 0;
+
+ return -EINVAL;
+}
+
+static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
+{
+ return io_run_task_work_head(&ctx->exit_task_work);
+}
+
+struct io_tctx_exit {
+ struct callback_head task_work;
+ struct completion completion;
+ struct io_ring_ctx *ctx;
+};
+
+static void io_tctx_exit_cb(struct callback_head *cb)
+{
+ struct io_uring_task *tctx = current->io_uring;
+ struct io_tctx_exit *work;
+
+ work = container_of(cb, struct io_tctx_exit, task_work);
+ /*
+ * When @in_idle, we're in cancellation and it's racy to remove the
+ * node. It'll be removed by the end of cancellation, just ignore it.
+ */
+ if (!atomic_read(&tctx->in_idle))
+ io_uring_del_task_file((unsigned long)work->ctx);
+ complete(&work->completion);
}
static void io_ring_exit_work(struct work_struct *work)
{
- struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
- exit_work);
+ struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
+ unsigned long timeout = jiffies + HZ * 60 * 5;
+ struct io_tctx_exit exit;
+ struct io_tctx_node *node;
+ int ret;
/*
* If we're doing polled IO and end up having requests being
@@ -8763,49 +8570,63 @@ static void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them.
*/
do {
- __io_uring_cancel_task_requests(ctx, NULL);
+ io_uring_try_cancel_requests(ctx, NULL, NULL);
+
+ WARN_ON_ONCE(time_after(jiffies, timeout));
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
- io_ring_ctx_free(ctx);
-}
-static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ mutex_lock(&ctx->uring_lock);
+ while (!list_empty(&ctx->tctx_list)) {
+ WARN_ON_ONCE(time_after(jiffies, timeout));
+
+ node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
+ ctx_node);
+ exit.ctx = ctx;
+ init_completion(&exit.completion);
+ init_task_work(&exit.task_work, io_tctx_exit_cb);
+ ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
+ if (WARN_ON_ONCE(ret))
+ continue;
+ wake_up_process(node->task);
- return req->ctx == data;
+ mutex_unlock(&ctx->uring_lock);
+ wait_for_completion(&exit.completion);
+ cond_resched();
+ mutex_lock(&ctx->uring_lock);
+ }
+ mutex_unlock(&ctx->uring_lock);
+
+ io_ring_ctx_free(ctx);
}
static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
+ unsigned long index;
+ struct creds *creds;
+
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
-
- if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
- ctx->sqo_dead = 1;
-
/* if force is set, the ring is going away. always drop after that */
ctx->cq_overflow_flushed = 1;
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true, NULL, NULL);
+ xa_for_each(&ctx->personalities, index, creds)
+ io_unregister_personality(ctx, index);
mutex_unlock(&ctx->uring_lock);
+ /* prevent SQPOLL from submitting new requests */
+ if (ctx->sq_data) {
+ io_sq_thread_park(ctx->sq_data);
+ list_del_init(&ctx->sqd_list);
+ io_sqd_update_thread_idle(ctx->sq_data);
+ io_sq_thread_unpark(ctx->sq_data);
+ }
+
io_kill_timeouts(ctx, NULL, NULL);
io_poll_remove_all(ctx, NULL, NULL);
- if (ctx->io_wq)
- io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true);
-
/* if we failed setting up the ctx, we might not have any rings */
io_iopoll_try_reap_events(ctx);
- idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
-
- /*
- * Do this upfront, so we won't have a grace period where the ring
- * is closed but resources aren't reaped yet. This can cause
- * spurious failure in setting up a new ring.
- */
- io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
- ACCT_LOCKED);
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
/*
@@ -8851,11 +8672,11 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
return ret;
}
-static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
struct task_struct *task,
struct files_struct *files)
{
- struct io_defer_entry *de = NULL;
+ struct io_defer_entry *de;
LIST_HEAD(list);
spin_lock_irq(&ctx->completion_lock);
@@ -8866,6 +8687,8 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
}
}
spin_unlock_irq(&ctx->completion_lock);
+ if (list_empty(&list))
+ return false;
while (!list_empty(&list)) {
de = list_first_entry(&list, struct io_defer_entry, list);
@@ -8875,226 +8698,292 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
io_req_complete(de->req, -ECANCELED);
kfree(de);
}
+ return true;
}
-static int io_uring_count_inflight(struct io_ring_ctx *ctx,
- struct task_struct *task,
- struct files_struct *files)
+static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
{
- struct io_kiocb *req;
- int cnt = 0;
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- spin_lock_irq(&ctx->inflight_lock);
- list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
- cnt += io_match_task(req, task, files);
- spin_unlock_irq(&ctx->inflight_lock);
- return cnt;
+ return req->ctx == data;
}
-static void io_uring_cancel_files(struct io_ring_ctx *ctx,
- struct task_struct *task,
- struct files_struct *files)
+static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
{
- while (!list_empty_careful(&ctx->inflight_list)) {
- struct io_task_cancel cancel = { .task = task, .files = files };
- DEFINE_WAIT(wait);
- int inflight;
+ struct io_tctx_node *node;
+ enum io_wq_cancel cret;
+ bool ret = false;
- inflight = io_uring_count_inflight(ctx, task, files);
- if (!inflight)
- break;
-
- io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
- io_poll_remove_all(ctx, task, files);
- io_kill_timeouts(ctx, task, files);
- io_cqring_overflow_flush(ctx, true, task, files);
- /* cancellations _may_ trigger task work */
- io_run_task_work();
+ mutex_lock(&ctx->uring_lock);
+ list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+ struct io_uring_task *tctx = node->task->io_uring;
- prepare_to_wait(&task->io_uring->wait, &wait,
- TASK_UNINTERRUPTIBLE);
- if (inflight == io_uring_count_inflight(ctx, task, files))
- schedule();
- finish_wait(&task->io_uring->wait, &wait);
+ /*
+ * io_wq will stay alive while we hold uring_lock, because it's
+ * killed after ctx nodes, which requires to take the lock.
+ */
+ if (!tctx || !tctx->io_wq)
+ continue;
+ cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
+ ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
+ mutex_unlock(&ctx->uring_lock);
+
+ return ret;
}
-static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
- struct task_struct *task)
+static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files)
{
+ struct io_task_cancel cancel = { .task = task, .files = files, };
+ struct io_uring_task *tctx = task ? task->io_uring : NULL;
+
while (1) {
- struct io_task_cancel cancel = { .task = task, .files = NULL, };
enum io_wq_cancel cret;
bool ret = false;
- if (ctx->io_wq) {
- cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
+ if (!task) {
+ ret |= io_uring_try_cancel_iowq(ctx);
+ } else if (tctx && tctx->io_wq) {
+ /*
+ * Cancels requests of all rings, not only @ctx, but
+ * it's fine as the task is in exit/exec.
+ */
+ cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
&cancel, true);
ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
/* SQPOLL thread does its own polling */
- if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+ if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
+ (ctx->sq_data && ctx->sq_data->thread == current)) {
while (!list_empty_careful(&ctx->iopoll_list)) {
io_iopoll_try_reap_events(ctx);
ret = true;
}
}
- ret |= io_poll_remove_all(ctx, task, NULL);
- ret |= io_kill_timeouts(ctx, task, NULL);
+ ret |= io_cancel_defer_files(ctx, task, files);
+ ret |= io_poll_remove_all(ctx, task, files);
+ ret |= io_kill_timeouts(ctx, task, files);
ret |= io_run_task_work();
+ ret |= io_run_ctx_fallback(ctx);
+ io_cqring_overflow_flush(ctx, true, task, files);
if (!ret)
break;
cond_resched();
}
}
-static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
+static int io_uring_count_inflight(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files)
{
- mutex_lock(&ctx->uring_lock);
- ctx->sqo_dead = 1;
- mutex_unlock(&ctx->uring_lock);
+ struct io_kiocb *req;
+ int cnt = 0;
- /* make sure callers enter the ring to get error */
- if (ctx->rings)
- io_ring_set_wakeup_flag(ctx);
+ spin_lock_irq(&ctx->inflight_lock);
+ list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
+ cnt += io_match_task(req, task, files);
+ spin_unlock_irq(&ctx->inflight_lock);
+ return cnt;
}
-/*
- * We need to iteratively cancel requests, in case a request has dependent
- * hard links. These persist even for failure of cancelations, hence keep
- * looping until none are found.
- */
-static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
- struct files_struct *files)
+static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files)
{
- struct task_struct *task = current;
-
- if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
- io_disable_sqo_submit(ctx);
- task = ctx->sq_data->thread;
- atomic_inc(&task->io_uring->in_idle);
- io_sq_thread_park(ctx->sq_data);
- }
+ while (!list_empty_careful(&ctx->inflight_list)) {
+ DEFINE_WAIT(wait);
+ int inflight;
- io_cancel_defer_files(ctx, task, files);
- io_cqring_overflow_flush(ctx, true, task, files);
+ inflight = io_uring_count_inflight(ctx, task, files);
+ if (!inflight)
+ break;
- io_uring_cancel_files(ctx, task, files);
- if (!files)
- __io_uring_cancel_task_requests(ctx, task);
+ io_uring_try_cancel_requests(ctx, task, files);
- if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
- atomic_dec(&task->io_uring->in_idle);
- io_sq_thread_unpark(ctx->sq_data);
+ prepare_to_wait(&task->io_uring->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (inflight == io_uring_count_inflight(ctx, task, files))
+ schedule();
+ finish_wait(&task->io_uring->wait, &wait);
}
}
/*
* Note that this task has used io_uring. We use it for cancelation purposes.
*/
-static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
+static int io_uring_add_task_file(struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx = current->io_uring;
+ struct io_tctx_node *node;
int ret;
if (unlikely(!tctx)) {
- ret = io_uring_alloc_task_context(current);
+ ret = io_uring_alloc_task_context(current, ctx);
if (unlikely(ret))
return ret;
tctx = current->io_uring;
}
- if (tctx->last != file) {
- void *old = xa_load(&tctx->xa, (unsigned long)file);
+ if (tctx->last != ctx) {
+ void *old = xa_load(&tctx->xa, (unsigned long)ctx);
if (!old) {
- get_file(file);
- ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
- file, GFP_KERNEL));
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->ctx = ctx;
+ node->task = current;
+
+ ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
+ node, GFP_KERNEL));
if (ret) {
- fput(file);
+ kfree(node);
return ret;
}
+
+ mutex_lock(&ctx->uring_lock);
+ list_add(&node->ctx_node, &ctx->tctx_list);
+ mutex_unlock(&ctx->uring_lock);
}
- tctx->last = file;
+ tctx->last = ctx;
}
-
- /*
- * This is race safe in that the task itself is doing this, hence it
- * cannot be going through the exit/cancel paths at the same time.
- * This cannot be modified while exit/cancel is running.
- */
- if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
- tctx->sqpoll = true;
-
return 0;
}
/*
* Remove this io_uring_file -> task mapping.
*/
-static void io_uring_del_task_file(struct file *file)
+static void io_uring_del_task_file(unsigned long index)
{
struct io_uring_task *tctx = current->io_uring;
+ struct io_tctx_node *node;
+
+ if (!tctx)
+ return;
+ node = xa_erase(&tctx->xa, index);
+ if (!node)
+ return;
+
+ WARN_ON_ONCE(current != node->task);
+ WARN_ON_ONCE(list_empty(&node->ctx_node));
- if (tctx->last == file)
+ mutex_lock(&node->ctx->uring_lock);
+ list_del(&node->ctx_node);
+ mutex_unlock(&node->ctx->uring_lock);
+
+ if (tctx->last == node->ctx)
tctx->last = NULL;
- file = xa_erase(&tctx->xa, (unsigned long)file);
- if (file)
- fput(file);
+ kfree(node);
}
-static void io_uring_remove_task_files(struct io_uring_task *tctx)
+static void io_uring_clean_tctx(struct io_uring_task *tctx)
{
- struct file *file;
+ struct io_tctx_node *node;
unsigned long index;
- xa_for_each(&tctx->xa, index, file)
- io_uring_del_task_file(file);
+ xa_for_each(&tctx->xa, index, node)
+ io_uring_del_task_file(index);
+ if (tctx->io_wq) {
+ io_wq_put_and_exit(tctx->io_wq);
+ tctx->io_wq = NULL;
+ }
+}
+
+static s64 tctx_inflight(struct io_uring_task *tctx)
+{
+ return percpu_counter_sum(&tctx->inflight);
+}
+
+static void io_sqpoll_cancel_cb(struct callback_head *cb)
+{
+ struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
+ struct io_ring_ctx *ctx = work->ctx;
+ struct io_sq_data *sqd = ctx->sq_data;
+
+ if (sqd->thread)
+ io_uring_cancel_sqpoll(ctx);
+ complete(&work->completion);
+}
+
+static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
+{
+ struct io_sq_data *sqd = ctx->sq_data;
+ struct io_tctx_exit work = { .ctx = ctx, };
+ struct task_struct *task;
+
+ io_sq_thread_park(sqd);
+ list_del_init(&ctx->sqd_list);
+ io_sqd_update_thread_idle(sqd);
+ task = sqd->thread;
+ if (task) {
+ init_completion(&work.completion);
+ init_task_work(&work.task_work, io_sqpoll_cancel_cb);
+ io_task_work_add_head(&sqd->park_task_work, &work.task_work);
+ wake_up_process(task);
+ }
+ io_sq_thread_unpark(sqd);
+
+ if (task)
+ wait_for_completion(&work.completion);
}
void __io_uring_files_cancel(struct files_struct *files)
{
struct io_uring_task *tctx = current->io_uring;
- struct file *file;
+ struct io_tctx_node *node;
unsigned long index;
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
- xa_for_each(&tctx->xa, index, file)
- io_uring_cancel_task_requests(file->private_data, files);
+ xa_for_each(&tctx->xa, index, node) {
+ struct io_ring_ctx *ctx = node->ctx;
+
+ if (ctx->sq_data) {
+ io_sqpoll_cancel_sync(ctx);
+ continue;
+ }
+ io_uring_cancel_files(ctx, current, files);
+ if (!files)
+ io_uring_try_cancel_requests(ctx, current, NULL);
+ }
atomic_dec(&tctx->in_idle);
if (files)
- io_uring_remove_task_files(tctx);
+ io_uring_clean_tctx(tctx);
}
-static s64 tctx_inflight(struct io_uring_task *tctx)
+/* should only be called by SQPOLL task */
+static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
{
- unsigned long index;
- struct file *file;
+ struct io_sq_data *sqd = ctx->sq_data;
+ struct io_uring_task *tctx = current->io_uring;
s64 inflight;
+ DEFINE_WAIT(wait);
- inflight = percpu_counter_sum(&tctx->inflight);
- if (!tctx->sqpoll)
- return inflight;
-
- /*
- * If we have SQPOLL rings, then we need to iterate and find them, and
- * add the pending count for those.
- */
- xa_for_each(&tctx->xa, index, file) {
- struct io_ring_ctx *ctx = file->private_data;
-
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
+ WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
- inflight += percpu_counter_sum(&__tctx->inflight);
- }
- }
+ atomic_inc(&tctx->in_idle);
+ do {
+ /* read completions before cancelations */
+ inflight = tctx_inflight(tctx);
+ if (!inflight)
+ break;
+ io_uring_try_cancel_requests(ctx, current, NULL);
- return inflight;
+ prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+ /*
+ * If we've seen completions, retry without waiting. This
+ * avoids a race where a completion comes in before we did
+ * prepare_to_wait().
+ */
+ if (inflight == tctx_inflight(tctx))
+ schedule();
+ finish_wait(&tctx->wait, &wait);
+ } while (1);
+ atomic_dec(&tctx->in_idle);
}
/*
@@ -9109,11 +8998,6 @@ void __io_uring_task_cancel(void)
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
-
- /* trigger io_disable_sqo_submit() */
- if (tctx->sqpoll)
- __io_uring_files_cancel(NULL);
-
do {
/* read completions before cancelations */
inflight = tctx_inflight(tctx);
@@ -9135,45 +9019,9 @@ void __io_uring_task_cancel(void)
atomic_dec(&tctx->in_idle);
- io_uring_remove_task_files(tctx);
-}
-
-static int io_uring_flush(struct file *file, void *data)
-{
- struct io_uring_task *tctx = current->io_uring;
- struct io_ring_ctx *ctx = file->private_data;
-
- if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
- io_uring_cancel_task_requests(ctx, NULL);
-
- if (!tctx)
- return 0;
-
- /* we should have cancelled and erased it before PF_EXITING */
- WARN_ON_ONCE((current->flags & PF_EXITING) &&
- xa_load(&tctx->xa, (unsigned long)file));
-
- /*
- * fput() is pending, will be 2 if the only other ref is our potential
- * task file note. If the task is exiting, drop regardless of count.
- */
- if (atomic_long_read(&file->f_count) != 2)
- return 0;
-
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- /* there is only one file note, which is owned by sqo_task */
- WARN_ON_ONCE(ctx->sqo_task != current &&
- xa_load(&tctx->xa, (unsigned long)file));
- /* sqo_dead check is for when this happens after cancellation */
- WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
- !xa_load(&tctx->xa, (unsigned long)file));
-
- io_disable_sqo_submit(ctx);
- }
-
- if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
- io_uring_del_task_file(file);
- return 0;
+ io_uring_clean_tctx(tctx);
+ /* all current's requests should be gone, we can kill tctx */
+ __io_uring_free(current);
}
static void *io_uring_validate_mmap_request(struct file *file,
@@ -9248,29 +9096,20 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
{
- int ret = 0;
DEFINE_WAIT(wait);
do {
if (!io_sqring_full(ctx))
break;
-
prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
- if (unlikely(ctx->sqo_dead)) {
- ret = -EOWNERDEAD;
- goto out;
- }
-
if (!io_sqring_full(ctx))
break;
-
schedule();
} while (!signal_pending(current));
finish_wait(&ctx->sqo_sq_wait, &wait);
-out:
- return ret;
+ return 0;
}
static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
@@ -9345,8 +9184,9 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
io_cqring_overflow_flush(ctx, false, NULL, NULL);
ret = -EOWNERDEAD;
- if (unlikely(ctx->sqo_dead))
+ if (unlikely(ctx->sq_data->thread == NULL)) {
goto out;
+ }
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait);
if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9356,7 +9196,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
}
submitted = to_submit;
} else if (to_submit) {
- ret = io_uring_add_task_file(ctx, f.file);
+ ret = io_uring_add_task_file(ctx);
if (unlikely(ret))
goto out;
mutex_lock(&ctx->uring_lock);
@@ -9398,11 +9238,9 @@ out_fput:
}
#ifdef CONFIG_PROC_FS
-static int io_uring_show_cred(int id, void *p, void *data)
+static int io_uring_show_cred(struct seq_file *m, unsigned int id,
+ const struct cred *cred)
{
- struct io_identity *iod = p;
- const struct cred *cred = iod->creds;
- struct seq_file *m = data;
struct user_namespace *uns = seq_user_ns(m);
struct group_info *gi;
kernel_cap_t cap;
@@ -9446,18 +9284,18 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
*/
has_lock = mutex_trylock(&ctx->uring_lock);
- if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
+ if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
sq = ctx->sq_data;
+ if (!sq->thread)
+ sq = NULL;
+ }
seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
- struct fixed_file_table *table;
- struct file *f;
+ struct file *f = *io_fixed_file_slot(ctx->file_data, i);
- table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
- f = table->files[i & IORING_FILE_TABLE_MASK];
if (f)
seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
else
@@ -9470,9 +9308,13 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
(unsigned int) buf->len);
}
- if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
+ if (has_lock && !xa_empty(&ctx->personalities)) {
+ unsigned long index;
+ const struct cred *cred;
+
seq_printf(m, "Personalities:\n");
- idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
+ xa_for_each(&ctx->personalities, index, cred)
+ io_uring_show_cred(m, index, cred);
}
seq_printf(m, "PollList:\n");
spin_lock_irq(&ctx->completion_lock);
@@ -9502,7 +9344,6 @@ static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
- .flush = io_uring_flush,
.mmap = io_uring_mmap,
#ifndef CONFIG_MMU
.get_unmapped_area = io_uring_nommu_get_unmapped_area,
@@ -9567,7 +9408,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
if (fd < 0)
return fd;
- ret = io_uring_add_task_file(ctx, file);
+ ret = io_uring_add_task_file(ctx);
if (ret) {
put_unused_fd(fd);
return ret;
@@ -9610,10 +9451,8 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
static int io_uring_create(unsigned entries, struct io_uring_params *p,
struct io_uring_params __user *params)
{
- struct user_struct *user = NULL;
struct io_ring_ctx *ctx;
struct file *file;
- bool limit_mem;
int ret;
if (!entries)
@@ -9653,34 +9492,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
p->cq_entries = 2 * p->sq_entries;
}
- user = get_uid(current_user());
- limit_mem = !capable(CAP_IPC_LOCK);
-
- if (limit_mem) {
- ret = __io_account_mem(user,
- ring_pages(p->sq_entries, p->cq_entries));
- if (ret) {
- free_uid(user);
- return ret;
- }
- }
-
ctx = io_ring_ctx_alloc(p);
- if (!ctx) {
- if (limit_mem)
- __io_unaccount_mem(user, ring_pages(p->sq_entries,
- p->cq_entries));
- free_uid(user);
+ if (!ctx)
return -ENOMEM;
- }
ctx->compat = in_compat_syscall();
- ctx->user = user;
- ctx->creds = get_current_cred();
-#ifdef CONFIG_AUDIT
- ctx->loginuid = current->loginuid;
- ctx->sessionid = current->sessionid;
-#endif
- ctx->sqo_task = get_task_struct(current);
+ if (!capable(CAP_IPC_LOCK))
+ ctx->user = get_uid(current_user());
/*
* This is just grabbed for accounting purposes. When a process exits,
@@ -9691,35 +9508,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
mmgrab(current->mm);
ctx->mm_account = current->mm;
-#ifdef CONFIG_BLK_CGROUP
- /*
- * The sq thread will belong to the original cgroup it was inited in.
- * If the cgroup goes offline (e.g. disabling the io controller), then
- * issued bios will be associated with the closest cgroup later in the
- * block layer.
- */
- rcu_read_lock();
- ctx->sqo_blkcg_css = blkcg_css();
- ret = css_tryget_online(ctx->sqo_blkcg_css);
- rcu_read_unlock();
- if (!ret) {
- /* don't init against a dying cgroup, have the user try again */
- ctx->sqo_blkcg_css = NULL;
- ret = -ENODEV;
- goto err;
- }
-#endif
-
- /*
- * Account memory _before_ installing the file descriptor. Once
- * the descriptor is installed, it can get closed at any time. Also
- * do this before hitting the general error path, as ring freeing
- * will un-account as well.
- */
- io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
- ACCT_LOCKED);
- ctx->limit_mem = limit_mem;
-
ret = io_allocate_scq_urings(ctx, p);
if (ret)
goto err;
@@ -9728,9 +9516,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
if (ret)
goto err;
- if (!(p->flags & IORING_SETUP_R_DISABLED))
- io_sq_offload_start(ctx);
-
memset(&p->sq_off, 0, sizeof(p->sq_off));
p->sq_off.head = offsetof(struct io_rings, sq.head);
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
@@ -9753,7 +9538,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
- IORING_FEAT_EXT_ARG;
+ IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -9772,7 +9557,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
*/
ret = io_uring_install_fd(ctx, file);
if (ret < 0) {
- io_disable_sqo_submit(ctx);
/* fput will clean it up */
fput(file);
return ret;
@@ -9781,7 +9565,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
- io_disable_sqo_submit(ctx);
io_ring_ctx_wait_and_kill(ctx);
return ret;
}
@@ -9859,39 +9642,20 @@ out:
static int io_register_personality(struct io_ring_ctx *ctx)
{
- struct io_identity *id;
+ const struct cred *creds;
+ u32 id;
int ret;
- id = kmalloc(sizeof(*id), GFP_KERNEL);
- if (unlikely(!id))
- return -ENOMEM;
-
- io_init_identity(id);
- id->creds = get_current_cred();
+ creds = get_current_cred();
- ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
- if (ret < 0) {
- put_cred(id->creds);
- kfree(id);
- }
+ ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
+ XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
+ if (!ret)
+ return id;
+ put_cred(creds);
return ret;
}
-static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
-{
- struct io_identity *iod;
-
- iod = idr_remove(&ctx->personality_idr, id);
- if (iod) {
- put_cred(iod->creds);
- if (refcount_dec_and_test(&iod->count))
- kfree(iod);
- return 0;
- }
-
- return -EINVAL;
-}
-
static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
unsigned int nr_args)
{
@@ -9971,9 +9735,8 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
ctx->restricted = 1;
ctx->flags &= ~IORING_SETUP_R_DISABLED;
-
- io_sq_offload_start(ctx);
-
+ if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
+ wake_up(&ctx->sq_data->wait);
return 0;
}
@@ -10049,13 +9812,13 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
switch (opcode) {
case IORING_REGISTER_BUFFERS:
- ret = io_sqe_buffer_register(ctx, arg, nr_args);
+ ret = io_sqe_buffers_register(ctx, arg, nr_args);
break;
case IORING_UNREGISTER_BUFFERS:
ret = -EINVAL;
if (arg || nr_args)
break;
- ret = io_sqe_buffer_unregister(ctx);
+ ret = io_sqe_buffers_unregister(ctx);
break;
case IORING_REGISTER_FILES:
ret = io_sqe_files_register(ctx, arg, nr_args);
@@ -10147,6 +9910,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
ctx = f.file->private_data;
+ io_run_task_work();
+
mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock);
@@ -10198,7 +9963,8 @@ static int __init io_uring_init(void)
BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
- req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
+ req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
+ SLAB_ACCOUNT);
return 0;
};
__initcall(io_uring_init);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 16a1e82e3aeb..414769a6ad11 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -278,14 +278,14 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (!is_contig || bio_full(ctx->bio, plen)) {
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
- int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
if (ctx->bio)
submit_bio(ctx->bio);
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
+ ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
@@ -1221,7 +1221,7 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
struct iomap_ioend *ioend;
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
+ bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
bio_set_dev(bio, wpc->iomap.bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
@@ -1252,7 +1252,7 @@ iomap_chain_bio(struct bio *prev)
{
struct bio *new;
- new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
+ new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
bio_copy_dev(new, prev);/* also copies over blkcg information */
new->bi_iter.bi_sector = bio_end_sector(prev);
new->bi_opf = prev->bi_opf;
@@ -1459,13 +1459,6 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
goto redirty;
/*
- * Given that we do not allow direct reclaim to call us, we should
- * never be called in a recursive filesystem reclaim context.
- */
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
- goto redirty;
-
- /*
* Is this page beyond the end of the file?
*
* The page index is less than the end_index, adjust the end_offset
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 933f234d5bec..bdd0d89bbf0a 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -201,6 +201,34 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
iomap_dio_submit_bio(dio, iomap, bio, pos);
}
+/*
+ * Figure out the bio's operation flags from the dio request, the
+ * mapping, and whether or not we want FUA. Note that we can end up
+ * clearing the WRITE_FUA flag in the dio request.
+ */
+static inline unsigned int
+iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua)
+{
+ unsigned int opflags = REQ_SYNC | REQ_IDLE;
+
+ if (!(dio->flags & IOMAP_DIO_WRITE)) {
+ WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND);
+ return REQ_OP_READ;
+ }
+
+ if (iomap->flags & IOMAP_F_ZONE_APPEND)
+ opflags |= REQ_OP_ZONE_APPEND;
+ else
+ opflags |= REQ_OP_WRITE;
+
+ if (use_fua)
+ opflags |= REQ_FUA;
+ else
+ dio->flags &= ~IOMAP_DIO_WRITE_FUA;
+
+ return opflags;
+}
+
static loff_t
iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
struct iomap_dio *dio, struct iomap *iomap)
@@ -208,6 +236,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
unsigned int fs_block_size = i_blocksize(inode), pad;
unsigned int align = iov_iter_alignment(dio->submit.iter);
+ unsigned int bio_opf;
struct bio *bio;
bool need_zeroout = false;
bool use_fua = false;
@@ -250,11 +279,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
orig_count = iov_iter_count(dio->submit.iter);
iov_iter_truncate(dio->submit.iter, length);
- nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
- if (nr_pages <= 0) {
- ret = nr_pages;
+ if (!iov_iter_count(dio->submit.iter))
goto out;
- }
if (need_zeroout) {
/* zero out from the start of the block to the write offset */
@@ -263,6 +289,14 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
iomap_dio_zero(dio, iomap, pos - pad, pad);
}
+ /*
+ * Set the operation flags early so that bio_iov_iter_get_pages
+ * can set up the page vector appropriately for a ZONE_APPEND
+ * operation.
+ */
+ bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
+
+ nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
do {
size_t n;
if (dio->error) {
@@ -278,6 +312,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
bio->bi_ioprio = dio->iocb->ki_ioprio;
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
+ bio->bi_opf = bio_opf;
ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
if (unlikely(ret)) {
@@ -293,14 +328,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
n = bio->bi_iter.bi_size;
if (dio->flags & IOMAP_DIO_WRITE) {
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
- if (use_fua)
- bio->bi_opf |= REQ_FUA;
- else
- dio->flags &= ~IOMAP_DIO_WRITE_FUA;
task_io_account_write(n);
} else {
- bio->bi_opf = REQ_OP_READ;
if (dio->flags & IOMAP_DIO_DIRTY)
bio_set_pages_dirty(bio);
}
@@ -308,7 +337,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
dio->size += n;
copied += n;
- nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
+ nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
+ BIO_MAX_VECS);
iomap_dio_submit_bio(dio, iomap, bio, pos);
pos += n;
} while (nr_pages);
@@ -420,23 +450,22 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
struct iomap_dio *
__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- bool wait_for_completion)
+ unsigned int dio_flags)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = file_inode(iocb->ki_filp);
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
loff_t end = iocb->ki_pos + count - 1, ret = 0;
- unsigned int flags = IOMAP_DIRECT;
+ bool wait_for_completion =
+ is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
+ unsigned int iomap_flags = IOMAP_DIRECT;
struct blk_plug plug;
struct iomap_dio *dio;
if (!count)
return NULL;
- if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion))
- return ERR_PTR(-EIO);
-
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
if (!dio)
return ERR_PTR(-ENOMEM);
@@ -461,7 +490,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (iter_is_iovec(iter))
dio->flags |= IOMAP_DIO_DIRTY;
} else {
- flags |= IOMAP_WRITE;
+ iomap_flags |= IOMAP_WRITE;
dio->flags |= IOMAP_DIO_WRITE;
/* for data sync or sync, we need sync completion processing */
@@ -483,7 +512,14 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
ret = -EAGAIN;
goto out_free_dio;
}
- flags |= IOMAP_NOWAIT;
+ iomap_flags |= IOMAP_NOWAIT;
+ }
+
+ if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
+ ret = -EAGAIN;
+ if (pos >= dio->i_size || pos + count > dio->i_size)
+ goto out_free_dio;
+ iomap_flags |= IOMAP_OVERWRITE_ONLY;
}
ret = filemap_write_and_wait_range(mapping, pos, end);
@@ -514,7 +550,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
blk_start_plug(&plug);
do {
- ret = iomap_apply(inode, pos, count, flags, ops, dio,
+ ret = iomap_apply(inode, pos, count, iomap_flags, ops, dio,
iomap_dio_actor);
if (ret <= 0) {
/* magic error code to fall back to buffered I/O */
@@ -598,11 +634,11 @@ EXPORT_SYMBOL_GPL(__iomap_dio_rw);
ssize_t
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- bool wait_for_completion)
+ unsigned int dio_flags)
{
struct iomap_dio *dio;
- dio = __iomap_dio_rw(iocb, iter, ops, dops, wait_for_completion);
+ dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags);
if (IS_ERR_OR_NULL(dio))
return PTR_ERR_OR_ZERO(dio);
return iomap_dio_complete(dio);
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
index 107ee80c3568..dab1b02eba5b 100644
--- a/fs/iomap/seek.c
+++ b/fs/iomap/seek.c
@@ -10,122 +10,17 @@
#include <linux/pagemap.h>
#include <linux/pagevec.h>
-/*
- * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
- * Returns true if found and updates @lastoff to the offset in file.
- */
-static bool
-page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
- int whence)
-{
- const struct address_space_operations *ops = inode->i_mapping->a_ops;
- unsigned int bsize = i_blocksize(inode), off;
- bool seek_data = whence == SEEK_DATA;
- loff_t poff = page_offset(page);
-
- if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
- return false;
-
- if (*lastoff < poff) {
- /*
- * Last offset smaller than the start of the page means we found
- * a hole:
- */
- if (whence == SEEK_HOLE)
- return true;
- *lastoff = poff;
- }
-
- /*
- * Just check the page unless we can and should check block ranges:
- */
- if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
- return PageUptodate(page) == seek_data;
-
- lock_page(page);
- if (unlikely(page->mapping != inode->i_mapping))
- goto out_unlock_not_found;
-
- for (off = 0; off < PAGE_SIZE; off += bsize) {
- if (offset_in_page(*lastoff) >= off + bsize)
- continue;
- if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
- unlock_page(page);
- return true;
- }
- *lastoff = poff + off + bsize;
- }
-
-out_unlock_not_found:
- unlock_page(page);
- return false;
-}
-
-/*
- * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
- *
- * Within unwritten extents, the page cache determines which parts are holes
- * and which are data: uptodate buffer heads count as data; everything else
- * counts as a hole.
- *
- * Returns the resulting offset on successs, and -ENOENT otherwise.
- */
static loff_t
-page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
- int whence)
-{
- pgoff_t index = offset >> PAGE_SHIFT;
- pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
- loff_t lastoff = offset;
- struct pagevec pvec;
-
- if (length <= 0)
- return -ENOENT;
-
- pagevec_init(&pvec);
-
- do {
- unsigned nr_pages, i;
-
- nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
- end - 1);
- if (nr_pages == 0)
- break;
-
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- if (page_seek_hole_data(inode, page, &lastoff, whence))
- goto check_range;
- lastoff = page_offset(page) + PAGE_SIZE;
- }
- pagevec_release(&pvec);
- } while (index < end);
-
- /* When no page at lastoff and we are not done, we found a hole. */
- if (whence != SEEK_HOLE)
- goto not_found;
-
-check_range:
- if (lastoff < offset + length)
- goto out;
-not_found:
- lastoff = -ENOENT;
-out:
- pagevec_release(&pvec);
- return lastoff;
-}
-
-
-static loff_t
-iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
+iomap_seek_hole_actor(struct inode *inode, loff_t start, loff_t length,
void *data, struct iomap *iomap, struct iomap *srcmap)
{
+ loff_t offset = start;
+
switch (iomap->type) {
case IOMAP_UNWRITTEN:
- offset = page_cache_seek_hole_data(inode, offset, length,
- SEEK_HOLE);
- if (offset < 0)
+ offset = mapping_seek_hole_data(inode->i_mapping, start,
+ start + length, SEEK_HOLE);
+ if (offset == start + length)
return length;
fallthrough;
case IOMAP_HOLE:
@@ -164,15 +59,17 @@ iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
EXPORT_SYMBOL_GPL(iomap_seek_hole);
static loff_t
-iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
+iomap_seek_data_actor(struct inode *inode, loff_t start, loff_t length,
void *data, struct iomap *iomap, struct iomap *srcmap)
{
+ loff_t offset = start;
+
switch (iomap->type) {
case IOMAP_HOLE:
return length;
case IOMAP_UNWRITTEN:
- offset = page_cache_seek_hole_data(inode, offset, length,
- SEEK_DATA);
+ offset = mapping_seek_hole_data(inode->i_mapping, start,
+ start + length, SEEK_DATA);
if (offset < 0)
return length;
fallthrough;
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
index a648dbf6991e..a5e478de1417 100644
--- a/fs/iomap/swapfile.c
+++ b/fs/iomap/swapfile.c
@@ -170,6 +170,16 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
return ret;
}
+ /*
+ * If this swapfile doesn't contain even a single page-aligned
+ * contiguous range of blocks, reject this useless swapfile to
+ * prevent confusion later on.
+ */
+ if (isi.nr_pages == 0) {
+ pr_warn("swapon: Cannot find a single usable page in file.\n");
+ return -EINVAL;
+ }
+
*pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
sis->max = isi.nr_pages;
sis->pages = isi.nr_pages - 1;
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index f0fe641893a5..b9e6a7ec78be 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -152,6 +152,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
inode->i_ino);
+ brelse(bh);
return -EIO;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ec90773527ee..21edc423b79f 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -339,6 +339,7 @@ static int parse_options(char *options, struct iso9660_options *popt)
{
char *p;
int option;
+ unsigned int uv;
popt->map = 'n';
popt->rock = 1;
@@ -434,17 +435,17 @@ static int parse_options(char *options, struct iso9660_options *popt)
case Opt_ignore:
break;
case Opt_uid:
- if (match_int(&args[0], &option))
+ if (match_uint(&args[0], &uv))
return 0;
- popt->uid = make_kuid(current_user_ns(), option);
+ popt->uid = make_kuid(current_user_ns(), uv);
if (!uid_valid(popt->uid))
return 0;
popt->uid_set = 1;
break;
case Opt_gid:
- if (match_int(&args[0], &option))
+ if (match_uint(&args[0], &uv))
return 0;
- popt->gid = make_kgid(current_user_ns(), option);
+ popt->gid = make_kgid(current_user_ns(), uv);
if (!gid_valid(popt->gid))
return 0;
popt->gid_set = 1;
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 402769881c32..58f80e1b3ac0 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -102,6 +102,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
dir->i_ino);
+ brelse(bh);
return 0;
}
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 472932b9e6bc..63b526d44886 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -416,7 +416,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
* jbd2_cleanup_journal_tail() doesn't get called all that often.
*/
if (journal->j_flags & JBD2_BARRIER)
- blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
+ blkdev_issue_flush(journal->j_fs_dev);
return __jbd2_update_log_tail(journal, first_tid, blocknr);
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index b121d7d434c6..3cc4ab2ba7f4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -825,7 +825,7 @@ start_journal_io:
if (commit_transaction->t_need_data_flush &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
- blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
+ blkdev_issue_flush(journal->j_fs_dev);
/* Done it all: now write the commit record asynchronously. */
if (jbd2_has_feature_async_commit(journal)) {
@@ -932,7 +932,7 @@ start_journal_io:
stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
- blkdev_issue_flush(journal->j_dev, GFP_NOFS);
+ blkdev_issue_flush(journal->j_dev);
}
if (err)
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index dc0694fcfcd1..69f18fe20923 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -326,7 +326,7 @@ int jbd2_journal_recover(journal_t *journal)
err = err2;
/* Make sure all replayed data is on permanent storage */
if (journal->j_flags & JBD2_BARRIER) {
- err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL);
+ err2 = blkdev_issue_flush(journal->j_fs_dev);
if (!err)
err = err2;
}
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 093ffbd82395..55a79df70d24 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -226,7 +226,8 @@ static int __jffs2_set_acl(struct inode *inode, int xprefix, struct posix_acl *a
return rc;
}
-int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int jffs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int rc, xprefix;
@@ -236,7 +237,8 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (acl) {
umode_t mode;
- rc = posix_acl_update_mode(inode, &mode, &acl);
+ rc = posix_acl_update_mode(&init_user_ns, inode, &mode,
+ &acl);
if (rc)
return rc;
if (inode->i_mode != mode) {
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 12d0271bdde3..62c50da9d493 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -28,7 +28,8 @@ struct jffs2_acl_header {
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
-int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int jffs2_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
extern int jffs2_init_acl_pre(struct inode *, struct inode *, umode_t *);
extern int jffs2_init_acl_post(struct inode *);
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 406d9cc84ba8..79e771ab624f 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -37,6 +37,9 @@ static int jffs2_rtime_compress(unsigned char *data_in,
int outpos = 0;
int pos=0;
+ if (*dstlen <= 3)
+ return -1;
+
memset(positions,0,sizeof(positions));
while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 776493713153..c0aabbcbfd58 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -24,18 +24,21 @@
static int jffs2_readdir (struct file *, struct dir_context *);
-static int jffs2_create (struct inode *,struct dentry *,umode_t,
- bool);
+static int jffs2_create (struct user_namespace *, struct inode *,
+ struct dentry *, umode_t, bool);
static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
unsigned int);
static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
static int jffs2_unlink (struct inode *,struct dentry *);
-static int jffs2_symlink (struct inode *,struct dentry *,const char *);
-static int jffs2_mkdir (struct inode *,struct dentry *,umode_t);
+static int jffs2_symlink (struct user_namespace *, struct inode *,
+ struct dentry *, const char *);
+static int jffs2_mkdir (struct user_namespace *, struct inode *,struct dentry *,
+ umode_t);
static int jffs2_rmdir (struct inode *,struct dentry *);
-static int jffs2_mknod (struct inode *,struct dentry *,umode_t,dev_t);
-static int jffs2_rename (struct inode *, struct dentry *,
- struct inode *, struct dentry *,
+static int jffs2_mknod (struct user_namespace *, struct inode *,struct dentry *,
+ umode_t,dev_t);
+static int jffs2_rename (struct user_namespace *, struct inode *,
+ struct dentry *, struct inode *, struct dentry *,
unsigned int);
const struct file_operations jffs2_dir_operations =
@@ -157,8 +160,8 @@ static int jffs2_readdir(struct file *file, struct dir_context *ctx)
/***********************************************************************/
-static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
- umode_t mode, bool excl)
+static int jffs2_create(struct user_namespace *mnt_userns, struct inode *dir_i,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct jffs2_raw_inode *ri;
struct jffs2_inode_info *f, *dir_f;
@@ -276,7 +279,8 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
/***********************************************************************/
-static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char *target)
+static int jffs2_symlink (struct user_namespace *mnt_userns, struct inode *dir_i,
+ struct dentry *dentry, const char *target)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
@@ -438,7 +442,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
}
-static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode)
+static int jffs2_mkdir (struct user_namespace *mnt_userns, struct inode *dir_i,
+ struct dentry *dentry, umode_t mode)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
@@ -609,7 +614,8 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
return ret;
}
-static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev)
+static int jffs2_mknod (struct user_namespace *mnt_userns, struct inode *dir_i,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
@@ -756,7 +762,8 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
return ret;
}
-static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
+static int jffs2_rename (struct user_namespace *mnt_userns,
+ struct inode *old_dir_i, struct dentry *old_dentry,
struct inode *new_dir_i, struct dentry *new_dentry,
unsigned int flags)
{
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 78858f6e9583..2ac410477c4f 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -190,18 +190,19 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return 0;
}
-int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
+int jffs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int rc;
- rc = setattr_prepare(dentry, iattr);
+ rc = setattr_prepare(&init_user_ns, dentry, iattr);
if (rc)
return rc;
rc = jffs2_do_setattr(inode, iattr);
if (!rc && (iattr->ia_valid & ATTR_MODE))
- rc = posix_acl_chmod(inode, inode->i_mode);
+ rc = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
return rc;
}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index ef1cfa61549e..173eccac691d 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -164,7 +164,7 @@ long jffs2_ioctl(struct file *, unsigned int, unsigned long);
extern const struct inode_operations jffs2_symlink_inode_operations;
/* fs.c */
-int jffs2_setattr (struct dentry *, struct iattr *);
+int jffs2_setattr (struct user_namespace *, struct dentry *, struct iattr *);
int jffs2_do_setattr (struct inode *, struct iattr *);
struct inode *jffs2_iget(struct super_block *, unsigned long);
void jffs2_evict_inode (struct inode *);
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index c2332e30f218..aef5522551db 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -57,6 +57,7 @@ static int jffs2_security_getxattr(const struct xattr_handler *handler,
}
static int jffs2_security_setxattr(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index be7c8a6a5748..4fe64519870f 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
je16_to_cpu(temp->u.nodetype));
jffs2_sum_disable_collecting(c->summary);
+ /* The above call removes the list, nothing more to do */
+ goto bail_rwcompat;
} else {
BUG(); /* unknown node in summary information */
}
@@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
c->summary->sum_num--;
}
+ bail_rwcompat:
jffs2_sum_reset_collected(c->summary);
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index 5d6030826c52..cc3f24883e7d 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -25,6 +25,7 @@ static int jffs2_trusted_getxattr(const struct xattr_handler *handler,
}
static int jffs2_trusted_setxattr(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index 9d027b4abcf9..fb945977c013 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -25,6 +25,7 @@ static int jffs2_user_getxattr(const struct xattr_handler *handler,
}
static int jffs2_user_setxattr(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 92cc0ac2d1fc..43c285c3d2a7 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -91,7 +91,8 @@ out:
return rc;
}
-int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int jfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int rc;
tid_t tid;
@@ -101,7 +102,7 @@ int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
tid = txBegin(inode->i_sb, 0);
mutex_lock(&JFS_IP(inode)->commit_mutex);
if (type == ACL_TYPE_ACCESS && acl) {
- rc = posix_acl_update_mode(inode, &mode, &acl);
+ rc = posix_acl_update_mode(&init_user_ns, inode, &mode, &acl);
if (rc)
goto end_tx;
if (mode != inode->i_mode)
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 930d2701f206..28b70e7c7dd4 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -85,12 +85,13 @@ static int jfs_release(struct inode *inode, struct file *file)
return 0;
}
-int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
+int jfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int rc;
- rc = setattr_prepare(dentry, iattr);
+ rc = setattr_prepare(&init_user_ns, dentry, iattr);
if (rc)
return rc;
@@ -118,11 +119,11 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
jfs_truncate(inode);
}
- setattr_copy(inode, iattr);
+ setattr_copy(&init_user_ns, inode, iattr);
mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE)
- rc = posix_acl_chmod(inode, inode->i_mode);
+ rc = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
return rc;
}
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 10ee0ecca1a8..2581d4db58ff 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -76,7 +76,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err)
return err;
- if (!inode_owner_or_capable(inode)) {
+ if (!inode_owner_or_capable(&init_user_ns, inode)) {
err = -EACCES;
goto setflags_out;
}
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h
index 9f8f92dd6f84..7ae389a7a366 100644
--- a/fs/jfs/jfs_acl.h
+++ b/fs/jfs/jfs_acl.h
@@ -8,7 +8,8 @@
#ifdef CONFIG_JFS_POSIX_ACL
struct posix_acl *jfs_get_acl(struct inode *inode, int type);
-int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int jfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
int jfs_init_acl(tid_t, struct inode *, struct inode *);
#else
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 94b7c1cb5ceb..7aee15608619 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -1656,7 +1656,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
} else if (rc == -ENOSPC) {
/* search for next smaller log2 block */
l2nb = BLKSTOL2(nblocks) - 1;
- nblocks = 1 << l2nb;
+ nblocks = 1LL << l2nb;
} else {
/* Trim any already allocated blocks */
jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
index 1e899298f7f0..b5d702df7111 100644
--- a/fs/jfs/jfs_filsys.h
+++ b/fs/jfs/jfs_filsys.h
@@ -268,5 +268,6 @@
* fsck() must be run to repair
*/
#define FM_EXTENDFS 0x00000008 /* file system extendfs() in progress */
+#define FM_STATE_MAX 0x0000000f /* max value of s_state */
#endif /* _H_JFS_FILSYS */
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 4cef170630db..59379089e939 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -64,7 +64,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
goto fail_put;
}
- inode_init_owner(inode, parent, mode);
+ inode_init_owner(&init_user_ns, inode, parent, mode);
/*
* New inodes need to save sane values on disk when
* uid & gid mount options are used
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 70a0d12e427e..01daa0cb0ae5 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -26,7 +26,7 @@ extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type);
extern void jfs_set_inode_flags(struct inode *);
extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
-extern int jfs_setattr(struct dentry *, struct iattr *);
+extern int jfs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
extern const struct address_space_operations jfs_aops;
extern const struct inode_operations jfs_dir_inode_operations;
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index 2935d4c776ec..5d7d7170c03c 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -37,6 +37,7 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
+#include <linux/log2.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
@@ -366,6 +367,15 @@ static int chkSuper(struct super_block *sb)
sbi->bsize = bsize;
sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize);
+ /* check some fields for possible corruption */
+ if (sbi->l2bsize != ilog2((u32)bsize) ||
+ j_sb->pad != 0 ||
+ le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) {
+ rc = -EINVAL;
+ jfs_err("jfs_mount: Mount Failure: superblock is corrupt!");
+ goto out;
+ }
+
/*
* For now, ignore s_pbsize, l2bfactor. All I/O going through buffer
* cache.
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index dca8edd2378c..053295cd7bc6 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -148,10 +148,10 @@ static struct {
/*
* forward references
*/
-static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
- struct tlock * tlck, struct commit * cd);
-static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
- struct tlock * tlck);
+static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
+ struct tlock *tlck, struct commit *cd);
+static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
+ struct tlock *tlck);
static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
@@ -159,8 +159,8 @@ static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
static void txAllocPMap(struct inode *ip, struct maplock * maplock,
struct tblock * tblk);
static void txForce(struct tblock * tblk);
-static int txLog(struct jfs_log * log, struct tblock * tblk,
- struct commit * cd);
+static void txLog(struct jfs_log *log, struct tblock *tblk,
+ struct commit *cd);
static void txUpdateMap(struct tblock * tblk);
static void txRelease(struct tblock * tblk);
static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
@@ -1256,8 +1256,7 @@ int txCommit(tid_t tid, /* transaction identifier */
*
* txUpdateMap() resets XAD_NEW in XAD.
*/
- if ((rc = txLog(log, tblk, &cd)))
- goto TheEnd;
+ txLog(log, tblk, &cd);
/*
* Ensure that inode isn't reused before
@@ -1365,9 +1364,8 @@ int txCommit(tid_t tid, /* transaction identifier */
*
* RETURN :
*/
-static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
+static void txLog(struct jfs_log *log, struct tblock *tblk, struct commit *cd)
{
- int rc = 0;
struct inode *ip;
lid_t lid;
struct tlock *tlck;
@@ -1414,7 +1412,7 @@ static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
}
}
- return rc;
+ return;
}
/*
@@ -1422,10 +1420,9 @@ static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
*
* function: log inode tlock and format maplock to update bmap;
*/
-static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
- struct tlock * tlck, struct commit * cd)
+static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
+ struct tlock *tlck, struct commit *cd)
{
- int rc = 0;
struct metapage *mp;
pxd_t *pxd;
struct pxd_lock *pxdlock;
@@ -1527,7 +1524,7 @@ static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
}
#endif /* _JFS_WIP */
- return rc;
+ return;
}
/*
@@ -1535,8 +1532,8 @@ static int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
*
* function: log data tlock
*/
-static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
- struct tlock * tlck)
+static void dataLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
+ struct tlock *tlck)
{
struct metapage *mp;
pxd_t *pxd;
@@ -1562,7 +1559,7 @@ static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
metapage_homeok(mp);
discard_metapage(mp);
tlck->mp = NULL;
- return 0;
+ return;
}
PXDaddress(pxd, mp->index);
@@ -1573,7 +1570,7 @@ static int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
/* mark page as homeward bound */
tlck->flag |= tlckWRITEPAGE;
- return 0;
+ return;
}
/*
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 7a55d14cc1af..9abed0d750e5 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -59,8 +59,8 @@ static inline void free_ea_wmap(struct inode *inode)
* RETURN: Errors from subroutines
*
*/
-static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
- bool excl)
+static int jfs_create(struct user_namespace *mnt_userns, struct inode *dip,
+ struct dentry *dentry, umode_t mode, bool excl)
{
int rc = 0;
tid_t tid; /* transaction id */
@@ -192,7 +192,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
* note:
* EACCES: user needs search+write permission on the parent directory
*/
-static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
+static int jfs_mkdir(struct user_namespace *mnt_userns, struct inode *dip,
+ struct dentry *dentry, umode_t mode)
{
int rc = 0;
tid_t tid; /* transaction id */
@@ -868,8 +869,8 @@ static int jfs_link(struct dentry *old_dentry,
* an intermediate result whose length exceeds PATH_MAX [XPG4.2]
*/
-static int jfs_symlink(struct inode *dip, struct dentry *dentry,
- const char *name)
+static int jfs_symlink(struct user_namespace *mnt_userns, struct inode *dip,
+ struct dentry *dentry, const char *name)
{
int rc;
tid_t tid;
@@ -1058,9 +1059,9 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
*
* FUNCTION: rename a file or directory
*/
-static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int jfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct btstack btstack;
ino_t ino;
@@ -1344,8 +1345,8 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
*
* FUNCTION: Create a special file (device)
*/
-static int jfs_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int jfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct jfs_inode_info *jfs_ip;
struct btstack btstack;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index b2dc4d1f9dcc..1f0ffabbde56 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -551,7 +551,6 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
ret = -ENOMEM;
goto out_unload;
}
- inode->i_ino = 0;
inode->i_size = i_size_read(sb->s_bdev->bd_inode);
inode->i_mapping->a_ops = &jfs_metapage_aops;
inode_fake_hash(inode);
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index db41e7803163..f9273f6901c8 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -932,6 +932,7 @@ static int jfs_xattr_get(const struct xattr_handler *handler,
}
static int jfs_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -950,6 +951,7 @@ static int jfs_xattr_get_os2(const struct xattr_handler *handler,
}
static int jfs_xattr_set_os2(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 7a53eed69fef..7e0e62deab53 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1110,7 +1110,8 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
return ret;
}
-static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
+static int kernfs_iop_mkdir(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry,
umode_t mode)
{
struct kernfs_node *parent = dir->i_private;
@@ -1147,7 +1148,8 @@ static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
return ret;
}
-static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
+static int kernfs_iop_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index fc2469a20fed..d73950fc3d57 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -112,7 +112,8 @@ int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
return ret;
}
-int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr)
+int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
struct kernfs_node *kn = inode->i_private;
@@ -122,7 +123,7 @@ int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr)
return -EINVAL;
mutex_lock(&kernfs_mutex);
- error = setattr_prepare(dentry, iattr);
+ error = setattr_prepare(&init_user_ns, dentry, iattr);
if (error)
goto out;
@@ -131,7 +132,7 @@ int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr)
goto out;
/* this ignores size changes */
- setattr_copy(inode, iattr);
+ setattr_copy(&init_user_ns, inode, iattr);
out:
mutex_unlock(&kernfs_mutex);
@@ -183,7 +184,8 @@ static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
set_nlink(inode, kn->dir.subdirs + 2);
}
-int kernfs_iop_getattr(const struct path *path, struct kstat *stat,
+int kernfs_iop_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
@@ -193,7 +195,7 @@ int kernfs_iop_getattr(const struct path *path, struct kstat *stat,
kernfs_refresh_inode(kn, inode);
mutex_unlock(&kernfs_mutex);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
return 0;
}
@@ -272,7 +274,8 @@ void kernfs_evict_inode(struct inode *inode)
kernfs_put(kn);
}
-int kernfs_iop_permission(struct inode *inode, int mask)
+int kernfs_iop_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
struct kernfs_node *kn;
@@ -285,7 +288,7 @@ int kernfs_iop_permission(struct inode *inode, int mask)
kernfs_refresh_inode(kn, inode);
mutex_unlock(&kernfs_mutex);
- return generic_permission(inode, mask);
+ return generic_permission(&init_user_ns, inode, mask);
}
int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
@@ -319,6 +322,7 @@ static int kernfs_vfs_xattr_get(const struct xattr_handler *handler,
}
static int kernfs_vfs_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *suffix, const void *value,
size_t size, int flags)
@@ -385,6 +389,7 @@ static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
}
static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *suffix, const void *value,
size_t size, int flags)
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 7ee97ef59184..ccc3b44f6306 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -89,9 +89,12 @@ extern struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
*/
extern const struct xattr_handler *kernfs_xattr_handlers[];
void kernfs_evict_inode(struct inode *inode);
-int kernfs_iop_permission(struct inode *inode, int mask);
-int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr);
-int kernfs_iop_getattr(const struct path *path, struct kstat *stat,
+int kernfs_iop_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask);
+int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr);
+int kernfs_iop_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags);
ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
diff --git a/fs/libfs.c b/fs/libfs.c
index d1c3bade9f30..e2de5401abca 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -27,11 +27,12 @@
#include "internal.h"
-int simple_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int simple_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
return 0;
}
@@ -447,9 +448,9 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
}
EXPORT_SYMBOL(simple_rmdir);
-int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+int simple_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct inode *inode = d_inode(old_dentry);
int they_are_dirs = d_is_dir(old_dentry);
@@ -492,18 +493,19 @@ EXPORT_SYMBOL(simple_rename);
* on simple regular filesystems. Anything that needs to change on-disk
* or wire state on size changes needs its own setattr method.
*/
-int simple_setattr(struct dentry *dentry, struct iattr *iattr)
+int simple_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(dentry, iattr);
+ error = setattr_prepare(mnt_userns, dentry, iattr);
if (error)
return error;
if (iattr->ia_valid & ATTR_SIZE)
truncate_setsize(inode, iattr->ia_size);
- setattr_copy(inode, iattr);
+ setattr_copy(mnt_userns, inode, iattr);
mark_inode_dirty(inode);
return 0;
}
@@ -1117,7 +1119,7 @@ int generic_file_fsync(struct file *file, loff_t start, loff_t end,
err = __generic_file_fsync(file, start, end, datasync);
if (err)
return err;
- return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ return blkdev_issue_flush(inode->i_sb->s_bdev);
}
EXPORT_SYMBOL(generic_file_fsync);
@@ -1214,11 +1216,6 @@ static int anon_set_page_dirty(struct page *page)
return 0;
};
-/*
- * A single inode exists for all anon_inode files. Contrary to pipes,
- * anon_inode inodes have no associated per-instance data, so we need
- * only allocate one of them.
- */
struct inode *alloc_anon_inode(struct super_block *s)
{
static const struct address_space_operations anon_aops = {
@@ -1300,15 +1297,17 @@ static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(-ENOENT);
}
-static int empty_dir_getattr(const struct path *path, struct kstat *stat,
+static int empty_dir_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
return 0;
}
-static int empty_dir_setattr(struct dentry *dentry, struct iattr *attr)
+static int empty_dir_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
return -EPERM;
}
@@ -1388,8 +1387,8 @@ static bool needs_casefold(const struct inode *dir)
*
* Return: 0 if names match, 1 if mismatch, or -ERRNO
*/
-int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
- const char *str, const struct qstr *name)
+static int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name)
{
const struct dentry *parent = READ_ONCE(dentry->d_parent);
const struct inode *dir = READ_ONCE(parent->d_inode);
@@ -1426,7 +1425,6 @@ fallback:
return 1;
return !!memcmp(str, name->name, len);
}
-EXPORT_SYMBOL(generic_ci_d_compare);
/**
* generic_ci_d_hash - generic d_hash implementation for casefolding filesystems
@@ -1435,7 +1433,7 @@ EXPORT_SYMBOL(generic_ci_d_compare);
*
* Return: 0 if hash was successful or unchanged, and -EINVAL on error
*/
-int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
+static int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
{
const struct inode *dir = READ_ONCE(dentry->d_inode);
struct super_block *sb = dentry->d_sb;
@@ -1450,7 +1448,6 @@ int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str)
return -EINVAL;
return 0;
}
-EXPORT_SYMBOL(generic_ci_d_hash);
static const struct dentry_operations generic_ci_dentry_ops = {
.d_hash = generic_ci_d_hash,
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index fa41dda39925..4c10fb5138f1 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -512,6 +512,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "NULL",
},
[NLMPROC_TEST] = {
.pc_func = nlm4svc_proc_test,
@@ -520,6 +521,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+2+No+Rg,
+ .pc_name = "TEST",
},
[NLMPROC_LOCK] = {
.pc_func = nlm4svc_proc_lock,
@@ -528,6 +530,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "LOCK",
},
[NLMPROC_CANCEL] = {
.pc_func = nlm4svc_proc_cancel,
@@ -536,6 +539,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "CANCEL",
},
[NLMPROC_UNLOCK] = {
.pc_func = nlm4svc_proc_unlock,
@@ -544,6 +548,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "UNLOCK",
},
[NLMPROC_GRANTED] = {
.pc_func = nlm4svc_proc_granted,
@@ -552,6 +557,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "GRANTED",
},
[NLMPROC_TEST_MSG] = {
.pc_func = nlm4svc_proc_test_msg,
@@ -560,6 +566,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "TEST_MSG",
},
[NLMPROC_LOCK_MSG] = {
.pc_func = nlm4svc_proc_lock_msg,
@@ -568,6 +575,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "LOCK_MSG",
},
[NLMPROC_CANCEL_MSG] = {
.pc_func = nlm4svc_proc_cancel_msg,
@@ -576,6 +584,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "CANCEL_MSG",
},
[NLMPROC_UNLOCK_MSG] = {
.pc_func = nlm4svc_proc_unlock_msg,
@@ -584,6 +593,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "UNLOCK_MSG",
},
[NLMPROC_GRANTED_MSG] = {
.pc_func = nlm4svc_proc_granted_msg,
@@ -592,6 +602,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "GRANTED_MSG",
},
[NLMPROC_TEST_RES] = {
.pc_func = nlm4svc_proc_null,
@@ -600,6 +611,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "TEST_RES",
},
[NLMPROC_LOCK_RES] = {
.pc_func = nlm4svc_proc_null,
@@ -608,6 +620,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "LOCK_RES",
},
[NLMPROC_CANCEL_RES] = {
.pc_func = nlm4svc_proc_null,
@@ -616,6 +629,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "CANCEL_RES",
},
[NLMPROC_UNLOCK_RES] = {
.pc_func = nlm4svc_proc_null,
@@ -624,6 +638,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "UNLOCK_RES",
},
[NLMPROC_GRANTED_RES] = {
.pc_func = nlm4svc_proc_granted_res,
@@ -632,6 +647,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "GRANTED_RES",
},
[NLMPROC_NSM_NOTIFY] = {
.pc_func = nlm4svc_proc_sm_notify,
@@ -640,6 +656,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_reboot),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "SM_NOTIFY",
},
[17] = {
.pc_func = nlm4svc_proc_unused,
@@ -648,6 +665,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
+ .pc_name = "UNUSED",
},
[18] = {
.pc_func = nlm4svc_proc_unused,
@@ -656,6 +674,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
+ .pc_name = "UNUSED",
},
[19] = {
.pc_func = nlm4svc_proc_unused,
@@ -664,6 +683,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
+ .pc_name = "UNUSED",
},
[NLMPROC_SHARE] = {
.pc_func = nlm4svc_proc_share,
@@ -672,6 +692,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
+ .pc_name = "SHARE",
},
[NLMPROC_UNSHARE] = {
.pc_func = nlm4svc_proc_unshare,
@@ -680,6 +701,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
+ .pc_name = "UNSHARE",
},
[NLMPROC_NM_LOCK] = {
.pc_func = nlm4svc_proc_nm_lock,
@@ -688,6 +710,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "NM_LOCK",
},
[NLMPROC_FREE_ALL] = {
.pc_func = nlm4svc_proc_free_all,
@@ -696,5 +719,6 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "FREE_ALL",
},
};
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 50855f2c1f4b..4ae4b63b5392 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -554,6 +554,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "NULL",
},
[NLMPROC_TEST] = {
.pc_func = nlmsvc_proc_test,
@@ -562,6 +563,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+2+No+Rg,
+ .pc_name = "TEST",
},
[NLMPROC_LOCK] = {
.pc_func = nlmsvc_proc_lock,
@@ -570,6 +572,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "LOCK",
},
[NLMPROC_CANCEL] = {
.pc_func = nlmsvc_proc_cancel,
@@ -578,6 +581,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "CANCEL",
},
[NLMPROC_UNLOCK] = {
.pc_func = nlmsvc_proc_unlock,
@@ -586,6 +590,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "UNLOCK",
},
[NLMPROC_GRANTED] = {
.pc_func = nlmsvc_proc_granted,
@@ -594,6 +599,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "GRANTED",
},
[NLMPROC_TEST_MSG] = {
.pc_func = nlmsvc_proc_test_msg,
@@ -602,6 +608,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "TEST_MSG",
},
[NLMPROC_LOCK_MSG] = {
.pc_func = nlmsvc_proc_lock_msg,
@@ -610,6 +617,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "LOCK_MSG",
},
[NLMPROC_CANCEL_MSG] = {
.pc_func = nlmsvc_proc_cancel_msg,
@@ -618,6 +626,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "CANCEL_MSG",
},
[NLMPROC_UNLOCK_MSG] = {
.pc_func = nlmsvc_proc_unlock_msg,
@@ -626,6 +635,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "UNLOCK_MSG",
},
[NLMPROC_GRANTED_MSG] = {
.pc_func = nlmsvc_proc_granted_msg,
@@ -634,6 +644,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "GRANTED_MSG",
},
[NLMPROC_TEST_RES] = {
.pc_func = nlmsvc_proc_null,
@@ -642,6 +653,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "TEST_RES",
},
[NLMPROC_LOCK_RES] = {
.pc_func = nlmsvc_proc_null,
@@ -650,6 +662,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "LOCK_RES",
},
[NLMPROC_CANCEL_RES] = {
.pc_func = nlmsvc_proc_null,
@@ -658,6 +671,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "CANCEL_RES",
},
[NLMPROC_UNLOCK_RES] = {
.pc_func = nlmsvc_proc_null,
@@ -666,6 +680,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "UNLOCK_RES",
},
[NLMPROC_GRANTED_RES] = {
.pc_func = nlmsvc_proc_granted_res,
@@ -674,6 +689,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "GRANTED_RES",
},
[NLMPROC_NSM_NOTIFY] = {
.pc_func = nlmsvc_proc_sm_notify,
@@ -682,6 +698,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_reboot),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "SM_NOTIFY",
},
[17] = {
.pc_func = nlmsvc_proc_unused,
@@ -690,6 +707,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "UNUSED",
},
[18] = {
.pc_func = nlmsvc_proc_unused,
@@ -698,6 +716,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "UNUSED",
},
[19] = {
.pc_func = nlmsvc_proc_unused,
@@ -706,6 +725,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
+ .pc_name = "UNUSED",
},
[NLMPROC_SHARE] = {
.pc_func = nlmsvc_proc_share,
@@ -714,6 +734,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
+ .pc_name = "SHARE",
},
[NLMPROC_UNSHARE] = {
.pc_func = nlmsvc_proc_unshare,
@@ -722,6 +743,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
+ .pc_name = "UNSHARE",
},
[NLMPROC_NM_LOCK] = {
.pc_func = nlmsvc_proc_nm_lock,
@@ -730,6 +752,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
+ .pc_name = "NM_LOCK",
},
[NLMPROC_FREE_ALL] = {
.pc_func = nlmsvc_proc_free_all,
@@ -738,5 +761,6 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
+ .pc_name = "FREE_ALL",
},
};
diff --git a/fs/locks.c b/fs/locks.c
index 99ca97e81b7a..6125d2de39b8 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1808,9 +1808,6 @@ check_conflicting_open(struct file *filp, const long arg, int flags)
if (flags & FL_LAYOUT)
return 0;
- if (flags & FL_DELEG)
- /* We leave these checks to the caller. */
- return 0;
if (arg == F_RDLCK)
return inode_is_open_for_write(inode) ? -EAGAIN : 0;
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index f4e5e5181a14..9115948c624e 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -252,7 +252,7 @@ struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error)
iput(inode);
return NULL;
}
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_ino = j;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
inode->i_blocks = 0;
diff --git a/fs/minix/file.c b/fs/minix/file.c
index c50b0a20fcd9..6a7bd2d9eec0 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -22,12 +22,13 @@ const struct file_operations minix_file_operations = {
.splice_read = generic_file_splice_read,
};
-static int minix_setattr(struct dentry *dentry, struct iattr *attr)
+static int minix_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
@@ -41,7 +42,7 @@ static int minix_setattr(struct dentry *dentry, struct iattr *attr)
minix_truncate(inode);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 34f546404aa1..a532a99bbe81 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -652,13 +652,13 @@ static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
-int minix_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int minix_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct super_block *sb = path->dentry->d_sb;
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
if (INODE_VERSION(inode) == MINIX_V1)
stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
else
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 168d45d3de73..202173368025 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -51,7 +51,8 @@ extern unsigned long minix_count_free_inodes(struct super_block *sb);
extern int minix_new_block(struct inode * inode);
extern void minix_free_block(struct inode *inode, unsigned long block);
extern unsigned long minix_count_free_blocks(struct super_block *sb);
-extern int minix_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int minix_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
extern void V1_minix_truncate(struct inode *);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 1a6084d2b02e..937fa5fae2b8 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -33,7 +33,8 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, un
return d_splice_alias(inode, dentry);
}
-static int minix_mknod(struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
+static int minix_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
int error;
struct inode *inode;
@@ -51,7 +52,8 @@ static int minix_mknod(struct inode * dir, struct dentry *dentry, umode_t mode,
return error;
}
-static int minix_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int minix_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int error;
struct inode *inode = minix_new_inode(dir, mode, &error);
@@ -63,14 +65,14 @@ static int minix_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
return error;
}
-static int minix_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int minix_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
- return minix_mknod(dir, dentry, mode, 0);
+ return minix_mknod(mnt_userns, dir, dentry, mode, 0);
}
-static int minix_symlink(struct inode * dir, struct dentry *dentry,
- const char * symname)
+static int minix_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
int err = -ENAMETOOLONG;
int i = strlen(symname)+1;
@@ -109,7 +111,8 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
return add_nondir(dentry, inode);
}
-static int minix_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode)
+static int minix_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode * inode;
int err;
@@ -181,8 +184,9 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry)
return err;
}
-static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
- struct inode * new_dir, struct dentry *new_dentry,
+static int minix_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct inode * old_inode = d_inode(old_dentry);
diff --git a/fs/mount.h b/fs/mount.h
index ce6c376e0bc2..0b6e08cf8afb 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -124,16 +124,6 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
extern seqlock_t mount_lock;
-static inline void lock_mount_hash(void)
-{
- write_seqlock(&mount_lock);
-}
-
-static inline void unlock_mount_hash(void)
-{
- write_sequnlock(&mount_lock);
-}
-
struct proc_mounts {
struct mnt_namespace *ns;
struct path root;
diff --git a/fs/mpage.c b/fs/mpage.c
index 830e6cc2a9e7..334e7d09aa65 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -304,9 +304,7 @@ alloc_new:
goto out;
}
args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- min_t(int, args->nr_pages,
- BIO_MAX_PAGES),
- gfp);
+ bio_max_segs(args->nr_pages), gfp);
if (args->bio == NULL)
goto confused;
}
@@ -618,7 +616,7 @@ alloc_new:
goto out;
}
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
+ BIO_MAX_VECS, GFP_NOFS|__GFP_HIGH);
if (bio == NULL)
goto confused;
diff --git a/fs/namei.c b/fs/namei.c
index 78443a85480a..216f16e74351 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -259,7 +259,24 @@ void putname(struct filename *name)
__putname(name);
}
-static int check_acl(struct inode *inode, int mask)
+/**
+ * check_acl - perform ACL permission checking
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to check permissions on
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
+ *
+ * This function performs the ACL permission checking. Since this function
+ * retrieve POSIX acls it needs to know whether it is called from a blocking or
+ * non-blocking context and thus cares about the MAY_NOT_BLOCK bit.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ */
+static int check_acl(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *acl;
@@ -271,14 +288,14 @@ static int check_acl(struct inode *inode, int mask)
/* no ->get_acl() calls in RCU mode... */
if (is_uncached_acl(acl))
return -ECHILD;
- return posix_acl_permission(inode, acl, mask);
+ return posix_acl_permission(mnt_userns, inode, acl, mask);
}
acl = get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
- int error = posix_acl_permission(inode, acl, mask);
+ int error = posix_acl_permission(mnt_userns, inode, acl, mask);
posix_acl_release(acl);
return error;
}
@@ -287,18 +304,31 @@ static int check_acl(struct inode *inode, int mask)
return -EAGAIN;
}
-/*
- * This does the basic UNIX permission checking.
+/**
+ * acl_permission_check - perform basic UNIX permission checking
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to check permissions on
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
+ *
+ * This function performs the basic UNIX permission checking. Since this
+ * function may retrieve POSIX acls it needs to know whether it is called from a
+ * blocking or non-blocking context and thus cares about the MAY_NOT_BLOCK bit.
*
- * Note that the POSIX ACL check cares about the MAY_NOT_BLOCK bit,
- * for RCU walking.
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
*/
-static int acl_permission_check(struct inode *inode, int mask)
+static int acl_permission_check(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
+ kuid_t i_uid;
/* Are we the owner? If so, ACL's don't matter */
- if (likely(uid_eq(current_fsuid(), inode->i_uid))) {
+ i_uid = i_uid_into_mnt(mnt_userns, inode);
+ if (likely(uid_eq(current_fsuid(), i_uid))) {
mask &= 7;
mode >>= 6;
return (mask & ~mode) ? -EACCES : 0;
@@ -306,7 +336,7 @@ static int acl_permission_check(struct inode *inode, int mask)
/* Do we have ACL's? */
if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
- int error = check_acl(inode, mask);
+ int error = check_acl(mnt_userns, inode, mask);
if (error != -EAGAIN)
return error;
}
@@ -320,7 +350,8 @@ static int acl_permission_check(struct inode *inode, int mask)
* about? Need to check group ownership if so.
*/
if (mask & (mode ^ (mode >> 3))) {
- if (in_group_p(inode->i_gid))
+ kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
+ if (in_group_p(kgid))
mode >>= 3;
}
@@ -330,6 +361,7 @@ static int acl_permission_check(struct inode *inode, int mask)
/**
* generic_permission - check for access rights on a Posix-like filesystem
+ * @mnt_userns: user namespace of the mount the inode was found from
* @inode: inode to check access rights for
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC,
* %MAY_NOT_BLOCK ...)
@@ -342,25 +374,33 @@ static int acl_permission_check(struct inode *inode, int mask)
* generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
* request cannot be satisfied (eg. requires blocking or too much complexity).
* It would then be called again in ref-walk mode.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
*/
-int generic_permission(struct inode *inode, int mask)
+int generic_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
int ret;
/*
* Do the basic permission checks.
*/
- ret = acl_permission_check(inode, mask);
+ ret = acl_permission_check(mnt_userns, inode, mask);
if (ret != -EACCES)
return ret;
if (S_ISDIR(inode->i_mode)) {
/* DACs are overridable for directories */
if (!(mask & MAY_WRITE))
- if (capable_wrt_inode_uidgid(inode,
+ if (capable_wrt_inode_uidgid(mnt_userns, inode,
CAP_DAC_READ_SEARCH))
return 0;
- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+ if (capable_wrt_inode_uidgid(mnt_userns, inode,
+ CAP_DAC_OVERRIDE))
return 0;
return -EACCES;
}
@@ -370,7 +410,8 @@ int generic_permission(struct inode *inode, int mask)
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ)
- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
+ if (capable_wrt_inode_uidgid(mnt_userns, inode,
+ CAP_DAC_READ_SEARCH))
return 0;
/*
* Read/write DACs are always overridable.
@@ -378,31 +419,38 @@ int generic_permission(struct inode *inode, int mask)
* at least one exec bit set.
*/
if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
+ if (capable_wrt_inode_uidgid(mnt_userns, inode,
+ CAP_DAC_OVERRIDE))
return 0;
return -EACCES;
}
EXPORT_SYMBOL(generic_permission);
-/*
+/**
+ * do_inode_permission - UNIX permission checking
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: inode to check permissions on
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC ...)
+ *
* We _really_ want to just do "generic_permission()" without
* even looking at the inode->i_op values. So we keep a cache
* flag in inode->i_opflags, that says "this has not special
* permission function, use the fast case".
*/
-static inline int do_inode_permission(struct inode *inode, int mask)
+static inline int do_inode_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
if (likely(inode->i_op->permission))
- return inode->i_op->permission(inode, mask);
+ return inode->i_op->permission(mnt_userns, inode, mask);
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_FASTPERM;
spin_unlock(&inode->i_lock);
}
- return generic_permission(inode, mask);
+ return generic_permission(mnt_userns, inode, mask);
}
/**
@@ -427,8 +475,9 @@ static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
/**
* inode_permission - Check for access rights to a given inode
- * @inode: Inode to check permission on
- * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
+ * @mnt_userns: User namespace of the mount the inode was found from
+ * @inode: Inode to check permission on
+ * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Check for read/write/execute permissions on an inode. We use fs[ug]id for
* this, letting us set arbitrary permissions for filesystem access without
@@ -436,7 +485,8 @@ static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*/
-int inode_permission(struct inode *inode, int mask)
+int inode_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
int retval;
@@ -456,11 +506,11 @@ int inode_permission(struct inode *inode, int mask)
* written back improperly if their true value is unknown
* to the vfs.
*/
- if (HAS_UNMAPPED_ID(inode))
+ if (HAS_UNMAPPED_ID(mnt_userns, inode))
return -EACCES;
}
- retval = do_inode_permission(inode, mask);
+ retval = do_inode_permission(mnt_userns, inode, mask);
if (retval)
return retval;
@@ -630,6 +680,11 @@ static inline bool legitimize_path(struct nameidata *nd,
static bool legitimize_links(struct nameidata *nd)
{
int i;
+ if (unlikely(nd->flags & LOOKUP_CACHED)) {
+ drop_links(nd);
+ nd->depth = 0;
+ return false;
+ }
for (i = 0; i < nd->depth; i++) {
struct saved *last = nd->stack + i;
if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
@@ -669,17 +724,17 @@ static bool legitimize_root(struct nameidata *nd)
*/
/**
- * unlazy_walk - try to switch to ref-walk mode.
+ * try_to_unlazy - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
- * Returns: 0 on success, -ECHILD on failure
+ * Returns: true on success, false on failure
*
- * unlazy_walk attempts to legitimize the current nd->path and nd->root
+ * try_to_unlazy attempts to legitimize the current nd->path and nd->root
* for ref-walk mode.
* Must be called from rcu-walk context.
- * Nothing should touch nameidata between unlazy_walk() failure and
+ * Nothing should touch nameidata between try_to_unlazy() failure and
* terminate_walk().
*/
-static int unlazy_walk(struct nameidata *nd)
+static bool try_to_unlazy(struct nameidata *nd)
{
struct dentry *parent = nd->path.dentry;
@@ -694,30 +749,30 @@ static int unlazy_walk(struct nameidata *nd)
goto out;
rcu_read_unlock();
BUG_ON(nd->inode != parent->d_inode);
- return 0;
+ return true;
out1:
nd->path.mnt = NULL;
nd->path.dentry = NULL;
out:
rcu_read_unlock();
- return -ECHILD;
+ return false;
}
/**
- * unlazy_child - try to switch to ref-walk mode.
+ * try_to_unlazy_next - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
- * @dentry: child of nd->path.dentry
- * @seq: seq number to check dentry against
- * Returns: 0 on success, -ECHILD on failure
+ * @dentry: next dentry to step into
+ * @seq: seq number to check @dentry against
+ * Returns: true on success, false on failure
*
- * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry
- * for ref-walk mode. @dentry must be a path found by a do_lookup call on
- * @nd. Must be called from rcu-walk context.
- * Nothing should touch nameidata between unlazy_child() failure and
+ * Similar to to try_to_unlazy(), but here we have the next dentry already
+ * picked by rcu-walk and want to legitimize that in addition to the current
+ * nd->path and nd->root for ref-walk mode. Must be called from rcu-walk context.
+ * Nothing should touch nameidata between try_to_unlazy_next() failure and
* terminate_walk().
*/
-static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq)
+static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry, unsigned seq)
{
BUG_ON(!(nd->flags & LOOKUP_RCU));
@@ -747,7 +802,7 @@ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned se
if (unlikely(!legitimize_root(nd)))
goto out_dput;
rcu_read_unlock();
- return 0;
+ return true;
out2:
nd->path.mnt = NULL;
@@ -755,11 +810,11 @@ out1:
nd->path.dentry = NULL;
out:
rcu_read_unlock();
- return -ECHILD;
+ return false;
out_dput:
rcu_read_unlock();
dput(dentry);
- return -ECHILD;
+ return false;
}
static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
@@ -792,7 +847,8 @@ static int complete_walk(struct nameidata *nd)
*/
if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED)))
nd->root.mnt = NULL;
- if (unlikely(unlazy_walk(nd)))
+ nd->flags &= ~LOOKUP_CACHED;
+ if (!try_to_unlazy(nd))
return -ECHILD;
}
@@ -954,11 +1010,16 @@ int sysctl_protected_regular __read_mostly;
*/
static inline int may_follow_link(struct nameidata *nd, const struct inode *inode)
{
+ struct user_namespace *mnt_userns;
+ kuid_t i_uid;
+
if (!sysctl_protected_symlinks)
return 0;
+ mnt_userns = mnt_user_ns(nd->path.mnt);
+ i_uid = i_uid_into_mnt(mnt_userns, inode);
/* Allowed if owner and follower match. */
- if (uid_eq(current_cred()->fsuid, inode->i_uid))
+ if (uid_eq(current_cred()->fsuid, i_uid))
return 0;
/* Allowed if parent directory not sticky and world-writable. */
@@ -966,7 +1027,7 @@ static inline int may_follow_link(struct nameidata *nd, const struct inode *inod
return 0;
/* Allowed if parent directory and link owner match. */
- if (uid_valid(nd->dir_uid) && uid_eq(nd->dir_uid, inode->i_uid))
+ if (uid_valid(nd->dir_uid) && uid_eq(nd->dir_uid, i_uid))
return 0;
if (nd->flags & LOOKUP_RCU)
@@ -979,6 +1040,7 @@ static inline int may_follow_link(struct nameidata *nd, const struct inode *inod
/**
* safe_hardlink_source - Check for safe hardlink conditions
+ * @mnt_userns: user namespace of the mount the inode was found from
* @inode: the source inode to hardlink from
*
* Return false if at least one of the following conditions:
@@ -989,7 +1051,8 @@ static inline int may_follow_link(struct nameidata *nd, const struct inode *inod
*
* Otherwise returns true.
*/
-static bool safe_hardlink_source(struct inode *inode)
+static bool safe_hardlink_source(struct user_namespace *mnt_userns,
+ struct inode *inode)
{
umode_t mode = inode->i_mode;
@@ -1006,7 +1069,7 @@ static bool safe_hardlink_source(struct inode *inode)
return false;
/* Hardlinking to unreadable or unwritable sources is dangerous. */
- if (inode_permission(inode, MAY_READ | MAY_WRITE))
+ if (inode_permission(mnt_userns, inode, MAY_READ | MAY_WRITE))
return false;
return true;
@@ -1014,6 +1077,7 @@ static bool safe_hardlink_source(struct inode *inode)
/**
* may_linkat - Check permissions for creating a hardlink
+ * @mnt_userns: user namespace of the mount the inode was found from
* @link: the source to hardlink from
*
* Block hardlink when all of:
@@ -1022,14 +1086,21 @@ static bool safe_hardlink_source(struct inode *inode)
* - hardlink source is unsafe (see safe_hardlink_source() above)
* - not CAP_FOWNER in a namespace with the inode owner uid mapped
*
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ *
* Returns 0 if successful, -ve on error.
*/
-int may_linkat(struct path *link)
+int may_linkat(struct user_namespace *mnt_userns, struct path *link)
{
struct inode *inode = link->dentry->d_inode;
/* Inode writeback is not safe when the uid or gid are invalid. */
- if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
+ if (!uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
+ !gid_valid(i_gid_into_mnt(mnt_userns, inode)))
return -EOVERFLOW;
if (!sysctl_protected_hardlinks)
@@ -1038,7 +1109,8 @@ int may_linkat(struct path *link)
/* Source inode owner (or CAP_FOWNER) can hardlink all they like,
* otherwise, it must be a safe source.
*/
- if (safe_hardlink_source(inode) || inode_owner_or_capable(inode))
+ if (safe_hardlink_source(mnt_userns, inode) ||
+ inode_owner_or_capable(mnt_userns, inode))
return 0;
audit_log_path_denied(AUDIT_ANOM_LINK, "linkat");
@@ -1049,6 +1121,7 @@ int may_linkat(struct path *link)
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
+ * @mnt_userns: user namespace of the mount the inode was found from
* @dir_mode: mode bits of directory
* @dir_uid: owner of directory
* @inode: the inode of the file to open
@@ -1064,16 +1137,25 @@ int may_linkat(struct path *link)
* the directory doesn't have to be world writable: being group writable will
* be enough.
*
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ *
* Returns 0 if the open is allowed, -ve on error.
*/
-static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
- struct inode * const inode)
+static int may_create_in_sticky(struct user_namespace *mnt_userns,
+ struct nameidata *nd, struct inode *const inode)
{
+ umode_t dir_mode = nd->dir_mode;
+ kuid_t dir_uid = nd->dir_uid;
+
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
likely(!(dir_mode & S_ISVTX)) ||
- uid_eq(inode->i_uid, dir_uid) ||
- uid_eq(current_fsuid(), inode->i_uid))
+ uid_eq(i_uid_into_mnt(mnt_userns, inode), dir_uid) ||
+ uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)))
return 0;
if (likely(dir_mode & 0002) ||
@@ -1372,7 +1454,7 @@ static inline int handle_mounts(struct nameidata *nd, struct dentry *dentry,
return -ENOENT;
if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
return 0;
- if (unlazy_child(nd, dentry, seq))
+ if (!try_to_unlazy_next(nd, dentry, seq))
return -ECHILD;
// *path might've been clobbered by __follow_mount_rcu()
path->mnt = nd->path.mnt;
@@ -1466,7 +1548,7 @@ static struct dentry *lookup_fast(struct nameidata *nd,
unsigned seq;
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
if (unlikely(!dentry)) {
- if (unlazy_walk(nd))
+ if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
return NULL;
}
@@ -1493,9 +1575,9 @@ static struct dentry *lookup_fast(struct nameidata *nd,
status = d_revalidate(dentry, nd->flags);
if (likely(status > 0))
return dentry;
- if (unlazy_child(nd, dentry, seq))
+ if (!try_to_unlazy_next(nd, dentry, seq))
return ERR_PTR(-ECHILD);
- if (unlikely(status == -ECHILD))
+ if (status == -ECHILD)
/* we'd been told to redo it in non-rcu mode */
status = d_revalidate(dentry, nd->flags);
} else {
@@ -1563,16 +1645,15 @@ static struct dentry *lookup_slow(const struct qstr *name,
return res;
}
-static inline int may_lookup(struct nameidata *nd)
+static inline int may_lookup(struct user_namespace *mnt_userns,
+ struct nameidata *nd)
{
if (nd->flags & LOOKUP_RCU) {
- int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
- if (err != -ECHILD)
+ int err = inode_permission(mnt_userns, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
+ if (err != -ECHILD || !try_to_unlazy(nd))
return err;
- if (unlazy_walk(nd))
- return -ECHILD;
}
- return inode_permission(nd->inode, MAY_EXEC);
+ return inode_permission(mnt_userns, nd->inode, MAY_EXEC);
}
static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
@@ -1592,7 +1673,7 @@ static int reserve_stack(struct nameidata *nd, struct path *link, unsigned seq)
// unlazy even if we fail to grab the link - cleanup needs it
bool grabbed_link = legitimize_path(nd, link, seq);
- if (unlazy_walk(nd) != 0 || !grabbed_link)
+ if (!try_to_unlazy(nd) != 0 || !grabbed_link)
return -ECHILD;
if (nd_alloc_stack(nd))
@@ -1634,7 +1715,7 @@ static const char *pick_link(struct nameidata *nd, struct path *link,
touch_atime(&last->link);
cond_resched();
} else if (atime_needs_update(&last->link, inode)) {
- if (unlikely(unlazy_walk(nd)))
+ if (!try_to_unlazy(nd))
return ERR_PTR(-ECHILD);
touch_atime(&last->link);
}
@@ -1651,11 +1732,8 @@ static const char *pick_link(struct nameidata *nd, struct path *link,
get = inode->i_op->get_link;
if (nd->flags & LOOKUP_RCU) {
res = get(NULL, inode, &last->done);
- if (res == ERR_PTR(-ECHILD)) {
- if (unlikely(unlazy_walk(nd)))
- return ERR_PTR(-ECHILD);
+ if (res == ERR_PTR(-ECHILD) && try_to_unlazy(nd))
res = get(link->dentry, inode, &last->done);
- }
} else {
res = get(link->dentry, inode, &last->done);
}
@@ -2121,11 +2199,13 @@ static int link_path_walk(const char *name, struct nameidata *nd)
/* At this point we know we have a real path component. */
for(;;) {
+ struct user_namespace *mnt_userns;
const char *link;
u64 hash_len;
int type;
- err = may_lookup(nd);
+ mnt_userns = mnt_user_ns(nd->path.mnt);
+ err = may_lookup(mnt_userns, nd);
if (err)
return err;
@@ -2173,7 +2253,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
OK:
/* pathname or trailing symlink, done */
if (!depth) {
- nd->dir_uid = nd->inode->i_uid;
+ nd->dir_uid = i_uid_into_mnt(mnt_userns, nd->inode);
nd->dir_mode = nd->inode->i_mode;
nd->flags &= ~LOOKUP_PARENT;
return 0;
@@ -2195,7 +2275,7 @@ OK:
}
if (unlikely(!d_can_lookup(nd->path.dentry))) {
if (nd->flags & LOOKUP_RCU) {
- if (unlazy_walk(nd))
+ if (!try_to_unlazy(nd))
return -ECHILD;
}
return -ENOTDIR;
@@ -2209,6 +2289,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
int error;
const char *s = nd->name->name;
+ /* LOOKUP_CACHED requires RCU, ask caller to retry */
+ if ((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED)
+ return ERR_PTR(-EAGAIN);
+
if (!*s)
flags &= ~LOOKUP_RCU;
if (flags & LOOKUP_RCU)
@@ -2506,7 +2590,7 @@ static int lookup_one_len_common(const char *name, struct dentry *base,
return err;
}
- return inode_permission(base->d_inode, MAY_EXEC);
+ return inode_permission(&init_user_ns, base->d_inode, MAY_EXEC);
}
/**
@@ -2651,15 +2735,16 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
}
EXPORT_SYMBOL(user_path_at_empty);
-int __check_sticky(struct inode *dir, struct inode *inode)
+int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir,
+ struct inode *inode)
{
kuid_t fsuid = current_fsuid();
- if (uid_eq(inode->i_uid, fsuid))
+ if (uid_eq(i_uid_into_mnt(mnt_userns, inode), fsuid))
return 0;
- if (uid_eq(dir->i_uid, fsuid))
+ if (uid_eq(i_uid_into_mnt(mnt_userns, dir), fsuid))
return 0;
- return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
+ return !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FOWNER);
}
EXPORT_SYMBOL(__check_sticky);
@@ -2683,7 +2768,8 @@ EXPORT_SYMBOL(__check_sticky);
* 11. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
-static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
+static int may_delete(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *victim, bool isdir)
{
struct inode *inode = d_backing_inode(victim);
int error;
@@ -2695,19 +2781,21 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
BUG_ON(victim->d_parent->d_inode != dir);
/* Inode writeback is not safe when the uid or gid are invalid. */
- if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
+ if (!uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
+ !gid_valid(i_gid_into_mnt(mnt_userns, inode)))
return -EOVERFLOW;
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
- error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
- if (check_sticky(dir, inode) || IS_APPEND(inode) ||
- IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) || HAS_UNMAPPED_ID(inode))
+ if (check_sticky(mnt_userns, dir, inode) || IS_APPEND(inode) ||
+ IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) ||
+ HAS_UNMAPPED_ID(mnt_userns, inode))
return -EPERM;
if (isdir) {
if (!d_is_dir(victim))
@@ -2732,7 +2820,8 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
* 4. We should have write and exec permissions on dir
* 5. We can't do it if dir is immutable (done in permission())
*/
-static inline int may_create(struct inode *dir, struct dentry *child)
+static inline int may_create(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *child)
{
struct user_namespace *s_user_ns;
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
@@ -2741,10 +2830,10 @@ static inline int may_create(struct inode *dir, struct dentry *child)
if (IS_DEADDIR(dir))
return -ENOENT;
s_user_ns = dir->i_sb->s_user_ns;
- if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
- !kgid_has_mapping(s_user_ns, current_fsgid()))
+ if (!kuid_has_mapping(s_user_ns, fsuid_into_mnt(mnt_userns)) ||
+ !kgid_has_mapping(s_user_ns, fsgid_into_mnt(mnt_userns)))
return -EOVERFLOW;
- return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ return inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
}
/*
@@ -2791,10 +2880,26 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
}
EXPORT_SYMBOL(unlock_rename);
-int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool want_excl)
+/**
+ * vfs_create - create new file
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dir: inode of @dentry
+ * @dentry: pointer to dentry of the base directory
+ * @mode: mode of the new file
+ * @want_excl: whether the file must not yet exist
+ *
+ * Create a new file.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ */
+int vfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool want_excl)
{
- int error = may_create(dir, dentry);
+ int error = may_create(mnt_userns, dir, dentry);
if (error)
return error;
@@ -2805,7 +2910,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
- error = dir->i_op->create(dir, dentry, mode, want_excl);
+ error = dir->i_op->create(mnt_userns, dir, dentry, mode, want_excl);
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -2817,7 +2922,7 @@ int vfs_mkobj(struct dentry *dentry, umode_t mode,
void *arg)
{
struct inode *dir = dentry->d_parent->d_inode;
- int error = may_create(dir, dentry);
+ int error = may_create(&init_user_ns, dir, dentry);
if (error)
return error;
@@ -2839,7 +2944,8 @@ bool may_open_dev(const struct path *path)
!(path->mnt->mnt_sb->s_iflags & SB_I_NODEV);
}
-static int may_open(const struct path *path, int acc_mode, int flag)
+static int may_open(struct user_namespace *mnt_userns, const struct path *path,
+ int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
struct inode *inode = dentry->d_inode;
@@ -2874,7 +2980,7 @@ static int may_open(const struct path *path, int acc_mode, int flag)
break;
}
- error = inode_permission(inode, MAY_OPEN | acc_mode);
+ error = inode_permission(mnt_userns, inode, MAY_OPEN | acc_mode);
if (error)
return error;
@@ -2889,13 +2995,13 @@ static int may_open(const struct path *path, int acc_mode, int flag)
}
/* O_NOATIME can only be set by the owner or superuser */
- if (flag & O_NOATIME && !inode_owner_or_capable(inode))
+ if (flag & O_NOATIME && !inode_owner_or_capable(mnt_userns, inode))
return -EPERM;
return 0;
}
-static int handle_truncate(struct file *filp)
+static int handle_truncate(struct user_namespace *mnt_userns, struct file *filp)
{
const struct path *path = &filp->f_path;
struct inode *inode = path->dentry->d_inode;
@@ -2909,7 +3015,7 @@ static int handle_truncate(struct file *filp)
if (!error)
error = security_path_truncate(path);
if (!error) {
- error = do_truncate(path->dentry, 0,
+ error = do_truncate(mnt_userns, path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
filp);
}
@@ -2924,7 +3030,9 @@ static inline int open_to_namei_flags(int flag)
return flag;
}
-static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
+static int may_o_create(struct user_namespace *mnt_userns,
+ const struct path *dir, struct dentry *dentry,
+ umode_t mode)
{
struct user_namespace *s_user_ns;
int error = security_path_mknod(dir, dentry, mode, 0);
@@ -2932,11 +3040,12 @@ static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t m
return error;
s_user_ns = dir->dentry->d_sb->s_user_ns;
- if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
- !kgid_has_mapping(s_user_ns, current_fsgid()))
+ if (!kuid_has_mapping(s_user_ns, fsuid_into_mnt(mnt_userns)) ||
+ !kgid_has_mapping(s_user_ns, fsgid_into_mnt(mnt_userns)))
return -EOVERFLOW;
- error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
+ error = inode_permission(mnt_userns, dir->dentry->d_inode,
+ MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -3015,6 +3124,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
const struct open_flags *op,
bool got_write)
{
+ struct user_namespace *mnt_userns;
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
int open_flag = op->open_flag;
@@ -3062,13 +3172,15 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
*/
if (unlikely(!got_write))
open_flag &= ~O_TRUNC;
+ mnt_userns = mnt_user_ns(nd->path.mnt);
if (open_flag & O_CREAT) {
if (open_flag & O_EXCL)
open_flag &= ~O_TRUNC;
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
if (likely(got_write))
- create_error = may_o_create(&nd->path, dentry, mode);
+ create_error = may_o_create(mnt_userns, &nd->path,
+ dentry, mode);
else
create_error = -EROFS;
}
@@ -3103,8 +3215,9 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
error = -EACCES;
goto out_dput;
}
- error = dir_inode->i_op->create(dir_inode, dentry, mode,
- open_flag & O_EXCL);
+
+ error = dir_inode->i_op->create(mnt_userns, dir_inode, dentry,
+ mode, open_flag & O_EXCL);
if (error)
goto out_dput;
}
@@ -3129,7 +3242,6 @@ static const char *open_last_lookups(struct nameidata *nd,
struct inode *inode;
struct dentry *dentry;
const char *res;
- int error;
nd->flags |= op->intent;
@@ -3153,9 +3265,8 @@ static const char *open_last_lookups(struct nameidata *nd,
} else {
/* create side of things */
if (nd->flags & LOOKUP_RCU) {
- error = unlazy_walk(nd);
- if (unlikely(error))
- return ERR_PTR(error);
+ if (!try_to_unlazy(nd))
+ return ERR_PTR(-ECHILD);
}
audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
/* trailing slashes? */
@@ -3164,9 +3275,7 @@ static const char *open_last_lookups(struct nameidata *nd,
}
if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
- error = mnt_want_write(nd->path.mnt);
- if (!error)
- got_write = true;
+ got_write = !mnt_want_write(nd->path.mnt);
/*
* do _not_ fail yet - we might not need that or fail with
* a different error; let lookup_open() decide; we'll be
@@ -3212,6 +3321,7 @@ finish_lookup:
static int do_open(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
+ struct user_namespace *mnt_userns;
int open_flag = op->open_flag;
bool do_truncate;
int acc_mode;
@@ -3224,12 +3334,13 @@ static int do_open(struct nameidata *nd,
}
if (!(file->f_mode & FMODE_CREATED))
audit_inode(nd->name, nd->path.dentry, 0);
+ mnt_userns = mnt_user_ns(nd->path.mnt);
if (open_flag & O_CREAT) {
if ((open_flag & O_EXCL) && !(file->f_mode & FMODE_CREATED))
return -EEXIST;
if (d_is_dir(nd->path.dentry))
return -EISDIR;
- error = may_create_in_sticky(nd->dir_mode, nd->dir_uid,
+ error = may_create_in_sticky(mnt_userns, nd,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
return error;
@@ -3249,13 +3360,13 @@ static int do_open(struct nameidata *nd,
return error;
do_truncate = true;
}
- error = may_open(&nd->path, acc_mode, open_flag);
+ error = may_open(mnt_userns, &nd->path, acc_mode, open_flag);
if (!error && !(file->f_mode & FMODE_OPENED))
error = vfs_open(&nd->path, file);
if (!error)
error = ima_file_check(file, op->acc_mode);
if (!error && do_truncate)
- error = handle_truncate(file);
+ error = handle_truncate(mnt_userns, file);
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
@@ -3265,7 +3376,23 @@ static int do_open(struct nameidata *nd,
return error;
}
-struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
+/**
+ * vfs_tmpfile - create tmpfile
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dentry: pointer to dentry of the base directory
+ * @mode: mode of the new tmpfile
+ * @open_flags: flags
+ *
+ * Create a temporary file.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ */
+struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns,
+ struct dentry *dentry, umode_t mode, int open_flag)
{
struct dentry *child = NULL;
struct inode *dir = dentry->d_inode;
@@ -3273,7 +3400,7 @@ struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
int error;
/* we want directory to be writable */
- error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
if (error)
goto out_err;
error = -EOPNOTSUPP;
@@ -3283,7 +3410,7 @@ struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
child = d_alloc(dentry, &slash_name);
if (unlikely(!child))
goto out_err;
- error = dir->i_op->tmpfile(dir, child, mode);
+ error = dir->i_op->tmpfile(mnt_userns, dir, child, mode);
if (error)
goto out_err;
error = -ENOENT;
@@ -3295,7 +3422,7 @@ struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag)
inode->i_state |= I_LINKABLE;
spin_unlock(&inode->i_lock);
}
- ima_post_create_tmpfile(inode);
+ ima_post_create_tmpfile(mnt_userns, inode);
return child;
out_err:
@@ -3308,6 +3435,7 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
const struct open_flags *op,
struct file *file)
{
+ struct user_namespace *mnt_userns;
struct dentry *child;
struct path path;
int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
@@ -3316,7 +3444,8 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
error = mnt_want_write(path.mnt);
if (unlikely(error))
goto out;
- child = vfs_tmpfile(path.dentry, op->mode, op->open_flag);
+ mnt_userns = mnt_user_ns(path.mnt);
+ child = vfs_tmpfile(mnt_userns, path.dentry, op->mode, op->open_flag);
error = PTR_ERR(child);
if (IS_ERR(child))
goto out2;
@@ -3324,11 +3453,9 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
path.dentry = child;
audit_inode(nd->name, child, 0);
/* Don't check for other permissions, the inode was just created */
- error = may_open(&path, 0, op->open_flag);
- if (error)
- goto out2;
- file->f_path.mnt = path.mnt;
- error = finish_open(file, child, NULL);
+ error = may_open(mnt_userns, &path, 0, op->open_flag);
+ if (!error)
+ error = vfs_open(&path, file);
out2:
mnt_drop_write(path.mnt);
out:
@@ -3528,10 +3655,27 @@ inline struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
-int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+/**
+ * vfs_mknod - create device node or file
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dir: inode of @dentry
+ * @dentry: pointer to dentry of the base directory
+ * @mode: mode of the new device node or file
+ * @dev: device number of device to create
+ *
+ * Create a device node or file.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ */
+int vfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
{
bool is_whiteout = S_ISCHR(mode) && dev == WHITEOUT_DEV;
- int error = may_create(dir, dentry);
+ int error = may_create(mnt_userns, dir, dentry);
if (error)
return error;
@@ -3551,7 +3695,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
if (error)
return error;
- error = dir->i_op->mknod(dir, dentry, mode, dev);
+ error = dir->i_op->mknod(mnt_userns, dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -3578,6 +3722,7 @@ static int may_mknod(umode_t mode)
static long do_mknodat(int dfd, const char __user *filename, umode_t mode,
unsigned int dev)
{
+ struct user_namespace *mnt_userns;
struct dentry *dentry;
struct path path;
int error;
@@ -3596,18 +3741,22 @@ retry:
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
+
+ mnt_userns = mnt_user_ns(path.mnt);
switch (mode & S_IFMT) {
case 0: case S_IFREG:
- error = vfs_create(path.dentry->d_inode,dentry,mode,true);
+ error = vfs_create(mnt_userns, path.dentry->d_inode,
+ dentry, mode, true);
if (!error)
- ima_post_path_mknod(dentry);
+ ima_post_path_mknod(mnt_userns, dentry);
break;
case S_IFCHR: case S_IFBLK:
- error = vfs_mknod(path.dentry->d_inode,dentry,mode,
- new_decode_dev(dev));
+ error = vfs_mknod(mnt_userns, path.dentry->d_inode,
+ dentry, mode, new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
- error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
+ error = vfs_mknod(mnt_userns, path.dentry->d_inode,
+ dentry, mode, 0);
break;
}
out:
@@ -3630,9 +3779,25 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d
return do_mknodat(AT_FDCWD, filename, mode, dev);
}
-int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+/**
+ * vfs_mkdir - create directory
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dir: inode of @dentry
+ * @dentry: pointer to dentry of the base directory
+ * @mode: mode of the new directory
+ *
+ * Create a directory.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ */
+int vfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- int error = may_create(dir, dentry);
+ int error = may_create(mnt_userns, dir, dentry);
unsigned max_links = dir->i_sb->s_max_links;
if (error)
@@ -3649,7 +3814,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (max_links && dir->i_nlink >= max_links)
return -EMLINK;
- error = dir->i_op->mkdir(dir, dentry, mode);
+ error = dir->i_op->mkdir(mnt_userns, dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
return error;
@@ -3671,8 +3836,12 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = security_path_mkdir(&path, dentry, mode);
- if (!error)
- error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ if (!error) {
+ struct user_namespace *mnt_userns;
+ mnt_userns = mnt_user_ns(path.mnt);
+ error = vfs_mkdir(mnt_userns, path.dentry->d_inode, dentry,
+ mode);
+ }
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
@@ -3691,9 +3860,24 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
return do_mkdirat(AT_FDCWD, pathname, mode);
}
-int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+/**
+ * vfs_rmdir - remove directory
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dir: inode of @dentry
+ * @dentry: pointer to dentry of the base directory
+ *
+ * Remove a directory.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ */
+int vfs_rmdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry)
{
- int error = may_delete(dir, dentry, 1);
+ int error = may_delete(mnt_userns, dir, dentry, 1);
if (error)
return error;
@@ -3733,6 +3917,7 @@ EXPORT_SYMBOL(vfs_rmdir);
long do_rmdir(int dfd, struct filename *name)
{
+ struct user_namespace *mnt_userns;
int error = 0;
struct dentry *dentry;
struct path path;
@@ -3773,7 +3958,8 @@ retry:
error = security_path_rmdir(&path, dentry);
if (error)
goto exit3;
- error = vfs_rmdir(path.dentry->d_inode, dentry);
+ mnt_userns = mnt_user_ns(path.mnt);
+ error = vfs_rmdir(mnt_userns, path.dentry->d_inode, dentry);
exit3:
dput(dentry);
exit2:
@@ -3796,6 +3982,7 @@ SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
/**
* vfs_unlink - unlink a filesystem object
+ * @mnt_userns: user namespace of the mount the inode was found from
* @dir: parent directory
* @dentry: victim
* @delegated_inode: returns victim inode, if the inode is delegated.
@@ -3811,11 +3998,18 @@ SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
*/
-int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
+int vfs_unlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, struct inode **delegated_inode)
{
struct inode *target = dentry->d_inode;
- int error = may_delete(dir, dentry, 0);
+ int error = may_delete(mnt_userns, dir, dentry, 0);
if (error)
return error;
@@ -3886,6 +4080,8 @@ retry_deleg:
dentry = __lookup_hash(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
+ struct user_namespace *mnt_userns;
+
/* Why not before? Because we want correct error value */
if (last.name[last.len])
goto slashes;
@@ -3896,7 +4092,9 @@ retry_deleg:
error = security_path_unlink(&path, dentry);
if (error)
goto exit2;
- error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode);
+ mnt_userns = mnt_user_ns(path.mnt);
+ error = vfs_unlink(mnt_userns, path.dentry->d_inode, dentry,
+ &delegated_inode);
exit2:
dput(dentry);
}
@@ -3945,9 +4143,25 @@ SYSCALL_DEFINE1(unlink, const char __user *, pathname)
return do_unlinkat(AT_FDCWD, getname(pathname));
}
-int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+/**
+ * vfs_symlink - create symlink
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @dir: inode of @dentry
+ * @dentry: pointer to dentry of the base directory
+ * @oldname: name of the file to link to
+ *
+ * Create a symlink.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
+ */
+int vfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *oldname)
{
- int error = may_create(dir, dentry);
+ int error = may_create(mnt_userns, dir, dentry);
if (error)
return error;
@@ -3959,7 +4173,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
if (error)
return error;
- error = dir->i_op->symlink(dir, dentry, oldname);
+ error = dir->i_op->symlink(mnt_userns, dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -3985,8 +4199,13 @@ retry:
goto out_putname;
error = security_path_symlink(&path, dentry, from->name);
- if (!error)
- error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
+ if (!error) {
+ struct user_namespace *mnt_userns;
+
+ mnt_userns = mnt_user_ns(path.mnt);
+ error = vfs_symlink(mnt_userns, path.dentry->d_inode, dentry,
+ from->name);
+ }
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
@@ -4011,6 +4230,7 @@ SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newn
/**
* vfs_link - create a new link
* @old_dentry: object to be linked
+ * @mnt_userns: the user namespace of the mount
* @dir: new parent
* @new_dentry: where to create the new link
* @delegated_inode: returns inode needing a delegation break
@@ -4026,8 +4246,16 @@ SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newn
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then take
+ * care to map the inode according to @mnt_userns before checking permissions.
+ * On non-idmapped mounts or if permission checking is to be performed on the
+ * raw inode simply passs init_user_ns.
*/
-int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
+int vfs_link(struct dentry *old_dentry, struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *new_dentry,
+ struct inode **delegated_inode)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
@@ -4036,7 +4264,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
if (!inode)
return -ENOENT;
- error = may_create(dir, new_dentry);
+ error = may_create(mnt_userns, dir, new_dentry);
if (error)
return error;
@@ -4053,7 +4281,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
* be writen back improperly if their true value is unknown to
* the vfs.
*/
- if (HAS_UNMAPPED_ID(inode))
+ if (HAS_UNMAPPED_ID(mnt_userns, inode))
return -EPERM;
if (!dir->i_op->link)
return -EPERM;
@@ -4100,6 +4328,7 @@ EXPORT_SYMBOL(vfs_link);
static int do_linkat(int olddfd, const char __user *oldname, int newdfd,
const char __user *newname, int flags)
{
+ struct user_namespace *mnt_userns;
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -4135,13 +4364,15 @@ retry:
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
- error = may_linkat(&old_path);
+ mnt_userns = mnt_user_ns(new_path.mnt);
+ error = may_linkat(mnt_userns, &old_path);
if (unlikely(error))
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_dput;
- error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
+ error = vfs_link(old_path.dentry, mnt_userns, new_path.dentry->d_inode,
+ new_dentry, &delegated_inode);
out_dput:
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
@@ -4175,12 +4406,14 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
/**
* vfs_rename - rename a filesystem object
- * @old_dir: parent of source
- * @old_dentry: source
- * @new_dir: parent of destination
- * @new_dentry: destination
- * @delegated_inode: returns an inode needing a delegation break
- * @flags: rename flags
+ * @old_mnt_userns: old user namespace of the mount the inode was found from
+ * @old_dir: parent of source
+ * @old_dentry: source
+ * @new_mnt_userns: new user namespace of the mount the inode was found from
+ * @new_dir: parent of destination
+ * @new_dentry: destination
+ * @delegated_inode: returns an inode needing a delegation break
+ * @flags: rename flags
*
* The caller must hold multiple mutexes--see lock_rename()).
*
@@ -4223,11 +4456,14 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
* ->i_mutex on parents, which works but leads to some truly excessive
* locking].
*/
-int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- struct inode **delegated_inode, unsigned int flags)
+int vfs_rename(struct renamedata *rd)
{
int error;
+ struct inode *old_dir = rd->old_dir, *new_dir = rd->new_dir;
+ struct dentry *old_dentry = rd->old_dentry;
+ struct dentry *new_dentry = rd->new_dentry;
+ struct inode **delegated_inode = rd->delegated_inode;
+ unsigned int flags = rd->flags;
bool is_dir = d_is_dir(old_dentry);
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
@@ -4238,19 +4474,21 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (source == target)
return 0;
- error = may_delete(old_dir, old_dentry, is_dir);
+ error = may_delete(rd->old_mnt_userns, old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
- error = may_create(new_dir, new_dentry);
+ error = may_create(rd->new_mnt_userns, new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
- error = may_delete(new_dir, new_dentry, is_dir);
+ error = may_delete(rd->new_mnt_userns, new_dir,
+ new_dentry, is_dir);
else
- error = may_delete(new_dir, new_dentry, new_is_dir);
+ error = may_delete(rd->new_mnt_userns, new_dir,
+ new_dentry, new_is_dir);
}
if (error)
return error;
@@ -4264,12 +4502,14 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
if (new_dir != old_dir) {
if (is_dir) {
- error = inode_permission(source, MAY_WRITE);
+ error = inode_permission(rd->old_mnt_userns, source,
+ MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
- error = inode_permission(target, MAY_WRITE);
+ error = inode_permission(rd->new_mnt_userns, target,
+ MAY_WRITE);
if (error)
return error;
}
@@ -4309,8 +4549,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (error)
goto out;
}
- error = old_dir->i_op->rename(old_dir, old_dentry,
- new_dir, new_dentry, flags);
+ error = old_dir->i_op->rename(rd->new_mnt_userns, old_dir, old_dentry,
+ new_dir, new_dentry, flags);
if (error)
goto out;
@@ -4351,6 +4591,7 @@ EXPORT_SYMBOL(vfs_rename);
int do_renameat2(int olddfd, struct filename *from, int newdfd,
struct filename *to, unsigned int flags)
{
+ struct renamedata rd;
struct dentry *old_dentry, *new_dentry;
struct dentry *trap;
struct path old_path, new_path;
@@ -4454,9 +4695,16 @@ retry_deleg:
&new_path, new_dentry, flags);
if (error)
goto exit5;
- error = vfs_rename(old_path.dentry->d_inode, old_dentry,
- new_path.dentry->d_inode, new_dentry,
- &delegated_inode, flags);
+
+ rd.old_dir = old_path.dentry->d_inode;
+ rd.old_dentry = old_dentry;
+ rd.old_mnt_userns = mnt_user_ns(old_path.mnt);
+ rd.new_dir = new_path.dentry->d_inode;
+ rd.new_dentry = new_dentry;
+ rd.new_mnt_userns = mnt_user_ns(new_path.mnt);
+ rd.delegated_inode = &delegated_inode;
+ rd.flags = flags;
+ error = vfs_rename(&rd);
exit5:
dput(new_dentry);
exit4:
diff --git a/fs/namespace.c b/fs/namespace.c
index 9d33909d0f9e..56bb5a5fdc0d 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -25,6 +25,7 @@
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/memblock.h>
+#include <linux/proc_fs.h>
#include <linux/task_work.h>
#include <linux/sched/task.h>
#include <uapi/linux/mount.h>
@@ -73,6 +74,15 @@ static DECLARE_RWSEM(namespace_sem);
static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
+struct mount_kattr {
+ unsigned int attr_set;
+ unsigned int attr_clr;
+ unsigned int propagation;
+ unsigned int lookup_flags;
+ bool recurse;
+ struct user_namespace *mnt_userns;
+};
+
/* /sys/fs */
struct kobject *fs_kobj;
EXPORT_SYMBOL_GPL(fs_kobj);
@@ -87,6 +97,16 @@ EXPORT_SYMBOL_GPL(fs_kobj);
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
+static inline void lock_mount_hash(void)
+{
+ write_seqlock(&mount_lock);
+}
+
+static inline void unlock_mount_hash(void)
+{
+ write_sequnlock(&mount_lock);
+}
+
static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
@@ -210,6 +230,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_HLIST_NODE(&mnt->mnt_mp_list);
INIT_LIST_HEAD(&mnt->mnt_umounting);
INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
+ mnt->mnt.mnt_userns = &init_user_ns;
}
return mnt;
@@ -360,50 +381,36 @@ int mnt_want_write(struct vfsmount *m)
EXPORT_SYMBOL_GPL(mnt_want_write);
/**
- * mnt_clone_write - get write access to a mount
- * @mnt: the mount on which to take a write
- *
- * This is effectively like mnt_want_write, except
- * it must only be used to take an extra write reference
- * on a mountpoint that we already know has a write reference
- * on it. This allows some optimisation.
- *
- * After finished, mnt_drop_write must be called as usual to
- * drop the reference.
- */
-int mnt_clone_write(struct vfsmount *mnt)
-{
- /* superblock may be r/o */
- if (__mnt_is_readonly(mnt))
- return -EROFS;
- preempt_disable();
- mnt_inc_writers(real_mount(mnt));
- preempt_enable();
- return 0;
-}
-EXPORT_SYMBOL_GPL(mnt_clone_write);
-
-/**
* __mnt_want_write_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
- * This is like __mnt_want_write, but it takes a file and can
- * do some optimisations if the file is open for write already
+ * This is like __mnt_want_write, but if the file is already open for writing it
+ * skips incrementing mnt_writers (since the open file already has a reference)
+ * and instead only does the check for emergency r/o remounts. This must be
+ * paired with __mnt_drop_write_file.
*/
int __mnt_want_write_file(struct file *file)
{
- if (!(file->f_mode & FMODE_WRITER))
- return __mnt_want_write(file->f_path.mnt);
- else
- return mnt_clone_write(file->f_path.mnt);
+ if (file->f_mode & FMODE_WRITER) {
+ /*
+ * Superblock may have become readonly while there are still
+ * writable fd's, e.g. due to a fs error with errors=remount-ro
+ */
+ if (__mnt_is_readonly(file->f_path.mnt))
+ return -EROFS;
+ return 0;
+ }
+ return __mnt_want_write(file->f_path.mnt);
}
/**
* mnt_want_write_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
- * This is like mnt_want_write, but it takes a file and can
- * do some optimisations if the file is open for write already
+ * This is like mnt_want_write, but if the file is already open for writing it
+ * skips incrementing mnt_writers (since the open file already has a reference)
+ * and instead only does the freeze protection and the check for emergency r/o
+ * remounts. This must be paired with mnt_drop_write_file.
*/
int mnt_want_write_file(struct file *file)
{
@@ -449,7 +456,8 @@ EXPORT_SYMBOL_GPL(mnt_drop_write);
void __mnt_drop_write_file(struct file *file)
{
- __mnt_drop_write(file->f_path.mnt);
+ if (!(file->f_mode & FMODE_WRITER))
+ __mnt_drop_write(file->f_path.mnt);
}
void mnt_drop_write_file(struct file *file)
@@ -459,11 +467,8 @@ void mnt_drop_write_file(struct file *file)
}
EXPORT_SYMBOL(mnt_drop_write_file);
-static int mnt_make_readonly(struct mount *mnt)
+static inline int mnt_hold_writers(struct mount *mnt)
{
- int ret = 0;
-
- lock_mount_hash();
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
/*
* After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -488,25 +493,30 @@ static int mnt_make_readonly(struct mount *mnt)
* we're counting up here.
*/
if (mnt_get_writers(mnt) > 0)
- ret = -EBUSY;
- else
- mnt->mnt.mnt_flags |= MNT_READONLY;
+ return -EBUSY;
+
+ return 0;
+}
+
+static inline void mnt_unhold_writers(struct mount *mnt)
+{
/*
* MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
* that become unheld will see MNT_READONLY.
*/
smp_wmb();
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
- unlock_mount_hash();
- return ret;
}
-static int __mnt_unmake_readonly(struct mount *mnt)
+static int mnt_make_readonly(struct mount *mnt)
{
- lock_mount_hash();
- mnt->mnt.mnt_flags &= ~MNT_READONLY;
- unlock_mount_hash();
- return 0;
+ int ret;
+
+ ret = mnt_hold_writers(mnt);
+ if (!ret)
+ mnt->mnt.mnt_flags |= MNT_READONLY;
+ mnt_unhold_writers(mnt);
+ return ret;
}
int sb_prepare_remount_readonly(struct super_block *sb)
@@ -547,6 +557,11 @@ int sb_prepare_remount_readonly(struct super_block *sb)
static void free_vfsmnt(struct mount *mnt)
{
+ struct user_namespace *mnt_userns;
+
+ mnt_userns = mnt_user_ns(&mnt->mnt);
+ if (mnt_userns != &init_user_ns)
+ put_user_ns(mnt_userns);
kfree_const(mnt->mnt_devname);
#ifdef CONFIG_SMP
free_percpu(mnt->mnt_pcp);
@@ -1055,6 +1070,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
atomic_inc(&sb->s_active);
+ mnt->mnt.mnt_userns = mnt_user_ns(&old->mnt);
+ if (mnt->mnt.mnt_userns != &init_user_ns)
+ mnt->mnt.mnt_userns = get_user_ns(mnt->mnt.mnt_userns);
mnt->mnt.mnt_sb = sb;
mnt->mnt.mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
@@ -2514,20 +2532,15 @@ static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
if (readonly_request)
return mnt_make_readonly(mnt);
- return __mnt_unmake_readonly(mnt);
+ mnt->mnt.mnt_flags &= ~MNT_READONLY;
+ return 0;
}
-/*
- * Update the user-settable attributes on a mount. The caller must hold
- * sb->s_umount for writing.
- */
static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
{
- lock_mount_hash();
mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
mnt->mnt.mnt_flags = mnt_flags;
touch_mnt_namespace(mnt->mnt_ns);
- unlock_mount_hash();
}
static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
@@ -2572,11 +2585,17 @@ static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
if (!can_change_locked_flags(mnt, mnt_flags))
return -EPERM;
- down_write(&sb->s_umount);
+ /*
+ * We're only checking whether the superblock is read-only not
+ * changing it, so only take down_read(&sb->s_umount).
+ */
+ down_read(&sb->s_umount);
+ lock_mount_hash();
ret = change_mount_ro_state(mnt, mnt_flags);
if (ret == 0)
set_mount_attributes(mnt, mnt_flags);
- up_write(&sb->s_umount);
+ unlock_mount_hash();
+ up_read(&sb->s_umount);
mnt_warn_timestamp_expiry(path, &mnt->mnt);
@@ -2616,8 +2635,11 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags,
err = -EPERM;
if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
err = reconfigure_super(fc);
- if (!err)
+ if (!err) {
+ lock_mount_hash();
set_mount_attributes(mnt, mnt_flags);
+ unlock_mount_hash();
+ }
}
up_write(&sb->s_umount);
}
@@ -3440,6 +3462,33 @@ out_type:
return ret;
}
+#define FSMOUNT_VALID_FLAGS \
+ (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
+ MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME)
+
+#define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
+
+#define MOUNT_SETATTR_PROPAGATION_FLAGS \
+ (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
+
+static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
+{
+ unsigned int mnt_flags = 0;
+
+ if (attr_flags & MOUNT_ATTR_RDONLY)
+ mnt_flags |= MNT_READONLY;
+ if (attr_flags & MOUNT_ATTR_NOSUID)
+ mnt_flags |= MNT_NOSUID;
+ if (attr_flags & MOUNT_ATTR_NODEV)
+ mnt_flags |= MNT_NODEV;
+ if (attr_flags & MOUNT_ATTR_NOEXEC)
+ mnt_flags |= MNT_NOEXEC;
+ if (attr_flags & MOUNT_ATTR_NODIRATIME)
+ mnt_flags |= MNT_NODIRATIME;
+
+ return mnt_flags;
+}
+
/*
* Create a kernel mount representation for a new, prepared superblock
* (specified by fs_fd) and attach to an open_tree-like file descriptor.
@@ -3462,24 +3511,10 @@ SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
return -EINVAL;
- if (attr_flags & ~(MOUNT_ATTR_RDONLY |
- MOUNT_ATTR_NOSUID |
- MOUNT_ATTR_NODEV |
- MOUNT_ATTR_NOEXEC |
- MOUNT_ATTR__ATIME |
- MOUNT_ATTR_NODIRATIME))
+ if (attr_flags & ~FSMOUNT_VALID_FLAGS)
return -EINVAL;
- if (attr_flags & MOUNT_ATTR_RDONLY)
- mnt_flags |= MNT_READONLY;
- if (attr_flags & MOUNT_ATTR_NOSUID)
- mnt_flags |= MNT_NOSUID;
- if (attr_flags & MOUNT_ATTR_NODEV)
- mnt_flags |= MNT_NODEV;
- if (attr_flags & MOUNT_ATTR_NOEXEC)
- mnt_flags |= MNT_NOEXEC;
- if (attr_flags & MOUNT_ATTR_NODIRATIME)
- mnt_flags |= MNT_NODIRATIME;
+ mnt_flags = attr_flags_to_mnt_flags(attr_flags);
switch (attr_flags & MOUNT_ATTR__ATIME) {
case MOUNT_ATTR_STRICTATIME:
@@ -3787,6 +3822,362 @@ out0:
return error;
}
+static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
+{
+ unsigned int flags = mnt->mnt.mnt_flags;
+
+ /* flags to clear */
+ flags &= ~kattr->attr_clr;
+ /* flags to raise */
+ flags |= kattr->attr_set;
+
+ return flags;
+}
+
+static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
+{
+ struct vfsmount *m = &mnt->mnt;
+
+ if (!kattr->mnt_userns)
+ return 0;
+
+ /*
+ * Once a mount has been idmapped we don't allow it to change its
+ * mapping. It makes things simpler and callers can just create
+ * another bind-mount they can idmap if they want to.
+ */
+ if (mnt_user_ns(m) != &init_user_ns)
+ return -EPERM;
+
+ /* The underlying filesystem doesn't support idmapped mounts yet. */
+ if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
+ return -EINVAL;
+
+ /* We're not controlling the superblock. */
+ if (!ns_capable(m->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /* Mount has already been visible in the filesystem hierarchy. */
+ if (!is_anon_ns(mnt->mnt_ns))
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct mount *mount_setattr_prepare(struct mount_kattr *kattr,
+ struct mount *mnt, int *err)
+{
+ struct mount *m = mnt, *last = NULL;
+
+ if (!is_mounted(&m->mnt)) {
+ *err = -EINVAL;
+ goto out;
+ }
+
+ if (!(mnt_has_parent(m) ? check_mnt(m) : is_anon_ns(m->mnt_ns))) {
+ *err = -EINVAL;
+ goto out;
+ }
+
+ do {
+ unsigned int flags;
+
+ flags = recalc_flags(kattr, m);
+ if (!can_change_locked_flags(m, flags)) {
+ *err = -EPERM;
+ goto out;
+ }
+
+ *err = can_idmap_mount(kattr, m);
+ if (*err)
+ goto out;
+
+ last = m;
+
+ if ((kattr->attr_set & MNT_READONLY) &&
+ !(m->mnt.mnt_flags & MNT_READONLY)) {
+ *err = mnt_hold_writers(m);
+ if (*err)
+ goto out;
+ }
+ } while (kattr->recurse && (m = next_mnt(m, mnt)));
+
+out:
+ return last;
+}
+
+static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
+{
+ struct user_namespace *mnt_userns;
+
+ if (!kattr->mnt_userns)
+ return;
+
+ mnt_userns = get_user_ns(kattr->mnt_userns);
+ /* Pairs with smp_load_acquire() in mnt_user_ns(). */
+ smp_store_release(&mnt->mnt.mnt_userns, mnt_userns);
+}
+
+static void mount_setattr_commit(struct mount_kattr *kattr,
+ struct mount *mnt, struct mount *last,
+ int err)
+{
+ struct mount *m = mnt;
+
+ do {
+ if (!err) {
+ unsigned int flags;
+
+ do_idmap_mount(kattr, m);
+ flags = recalc_flags(kattr, m);
+ WRITE_ONCE(m->mnt.mnt_flags, flags);
+ }
+
+ /*
+ * We either set MNT_READONLY above so make it visible
+ * before ~MNT_WRITE_HOLD or we failed to recursively
+ * apply mount options.
+ */
+ if ((kattr->attr_set & MNT_READONLY) &&
+ (m->mnt.mnt_flags & MNT_WRITE_HOLD))
+ mnt_unhold_writers(m);
+
+ if (!err && kattr->propagation)
+ change_mnt_propagation(m, kattr->propagation);
+
+ /*
+ * On failure, only cleanup until we found the first mount
+ * we failed to handle.
+ */
+ if (err && m == last)
+ break;
+ } while (kattr->recurse && (m = next_mnt(m, mnt)));
+
+ if (!err)
+ touch_mnt_namespace(mnt->mnt_ns);
+}
+
+static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
+{
+ struct mount *mnt = real_mount(path->mnt), *last = NULL;
+ int err = 0;
+
+ if (path->dentry != mnt->mnt.mnt_root)
+ return -EINVAL;
+
+ if (kattr->propagation) {
+ /*
+ * Only take namespace_lock() if we're actually changing
+ * propagation.
+ */
+ namespace_lock();
+ if (kattr->propagation == MS_SHARED) {
+ err = invent_group_ids(mnt, kattr->recurse);
+ if (err) {
+ namespace_unlock();
+ return err;
+ }
+ }
+ }
+
+ lock_mount_hash();
+
+ /*
+ * Get the mount tree in a shape where we can change mount
+ * properties without failure.
+ */
+ last = mount_setattr_prepare(kattr, mnt, &err);
+ if (last) /* Commit all changes or revert to the old state. */
+ mount_setattr_commit(kattr, mnt, last, err);
+
+ unlock_mount_hash();
+
+ if (kattr->propagation) {
+ namespace_unlock();
+ if (err)
+ cleanup_group_ids(mnt, NULL);
+ }
+
+ return err;
+}
+
+static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
+ struct mount_kattr *kattr, unsigned int flags)
+{
+ int err = 0;
+ struct ns_common *ns;
+ struct user_namespace *mnt_userns;
+ struct file *file;
+
+ if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
+ return 0;
+
+ /*
+ * We currently do not support clearing an idmapped mount. If this ever
+ * is a use-case we can revisit this but for now let's keep it simple
+ * and not allow it.
+ */
+ if (attr->attr_clr & MOUNT_ATTR_IDMAP)
+ return -EINVAL;
+
+ if (attr->userns_fd > INT_MAX)
+ return -EINVAL;
+
+ file = fget(attr->userns_fd);
+ if (!file)
+ return -EBADF;
+
+ if (!proc_ns_file(file)) {
+ err = -EINVAL;
+ goto out_fput;
+ }
+
+ ns = get_proc_ns(file_inode(file));
+ if (ns->ops->type != CLONE_NEWUSER) {
+ err = -EINVAL;
+ goto out_fput;
+ }
+
+ /*
+ * The init_user_ns is used to indicate that a vfsmount is not idmapped.
+ * This is simpler than just having to treat NULL as unmapped. Users
+ * wanting to idmap a mount to init_user_ns can just use a namespace
+ * with an identity mapping.
+ */
+ mnt_userns = container_of(ns, struct user_namespace, ns);
+ if (mnt_userns == &init_user_ns) {
+ err = -EPERM;
+ goto out_fput;
+ }
+ kattr->mnt_userns = get_user_ns(mnt_userns);
+
+out_fput:
+ fput(file);
+ return err;
+}
+
+static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
+ struct mount_kattr *kattr, unsigned int flags)
+{
+ unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
+
+ if (flags & AT_NO_AUTOMOUNT)
+ lookup_flags &= ~LOOKUP_AUTOMOUNT;
+ if (flags & AT_SYMLINK_NOFOLLOW)
+ lookup_flags &= ~LOOKUP_FOLLOW;
+ if (flags & AT_EMPTY_PATH)
+ lookup_flags |= LOOKUP_EMPTY;
+
+ *kattr = (struct mount_kattr) {
+ .lookup_flags = lookup_flags,
+ .recurse = !!(flags & AT_RECURSIVE),
+ };
+
+ if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
+ return -EINVAL;
+ if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
+ return -EINVAL;
+ kattr->propagation = attr->propagation;
+
+ if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
+ return -EINVAL;
+
+ kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
+ kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
+
+ /*
+ * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
+ * users wanting to transition to a different atime setting cannot
+ * simply specify the atime setting in @attr_set, but must also
+ * specify MOUNT_ATTR__ATIME in the @attr_clr field.
+ * So ensure that MOUNT_ATTR__ATIME can't be partially set in
+ * @attr_clr and that @attr_set can't have any atime bits set if
+ * MOUNT_ATTR__ATIME isn't set in @attr_clr.
+ */
+ if (attr->attr_clr & MOUNT_ATTR__ATIME) {
+ if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
+ return -EINVAL;
+
+ /*
+ * Clear all previous time settings as they are mutually
+ * exclusive.
+ */
+ kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
+ switch (attr->attr_set & MOUNT_ATTR__ATIME) {
+ case MOUNT_ATTR_RELATIME:
+ kattr->attr_set |= MNT_RELATIME;
+ break;
+ case MOUNT_ATTR_NOATIME:
+ kattr->attr_set |= MNT_NOATIME;
+ break;
+ case MOUNT_ATTR_STRICTATIME:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ if (attr->attr_set & MOUNT_ATTR__ATIME)
+ return -EINVAL;
+ }
+
+ return build_mount_idmapped(attr, usize, kattr, flags);
+}
+
+static void finish_mount_kattr(struct mount_kattr *kattr)
+{
+ put_user_ns(kattr->mnt_userns);
+ kattr->mnt_userns = NULL;
+}
+
+SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
+ unsigned int, flags, struct mount_attr __user *, uattr,
+ size_t, usize)
+{
+ int err;
+ struct path target;
+ struct mount_attr attr;
+ struct mount_kattr kattr;
+
+ BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
+
+ if (flags & ~(AT_EMPTY_PATH |
+ AT_RECURSIVE |
+ AT_SYMLINK_NOFOLLOW |
+ AT_NO_AUTOMOUNT))
+ return -EINVAL;
+
+ if (unlikely(usize > PAGE_SIZE))
+ return -E2BIG;
+ if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
+ return -EINVAL;
+
+ if (!may_mount())
+ return -EPERM;
+
+ err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
+ if (err)
+ return err;
+
+ /* Don't bother walking through the mounts if this is a nop. */
+ if (attr.attr_set == 0 &&
+ attr.attr_clr == 0 &&
+ attr.propagation == 0)
+ return 0;
+
+ err = build_mount_kattr(&attr, usize, &kattr, flags);
+ if (err)
+ return err;
+
+ err = user_path_at(dfd, path, kattr.lookup_flags, &target);
+ if (err)
+ return err;
+
+ err = do_mount_setattr(&target, &kattr);
+ finish_mount_kattr(&kattr);
+ path_put(&target);
+ return err;
+}
+
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index e2a488d403a6..14a72224b657 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -127,7 +127,7 @@ config PNFS_BLOCK
config PNFS_FLEXFILE_LAYOUT
tristate
depends on NFS_V4_1 && NFS_V3
- default m
+ default NFS_V4
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
string "NFSv4.1 Implementation ID Domain"
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 3be6836074ae..fe860c538747 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -115,19 +115,14 @@ bl_submit_bio(struct bio *bio)
return NULL;
}
-static struct bio *
-bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
+static struct bio *bl_alloc_init_bio(unsigned int npg,
+ struct block_device *bdev, sector_t disk_sector,
bio_end_io_t end_io, struct parallel_io *par)
{
struct bio *bio;
- npg = min(npg, BIO_MAX_PAGES);
+ npg = bio_max_segs(npg);
bio = bio_alloc(GFP_NOIO, npg);
- if (!bio && (current->flags & PF_MEMALLOC)) {
- while (!bio && (npg /= 2))
- bio = bio_alloc(GFP_NOIO, npg);
- }
-
if (bio) {
bio->bi_iter.bi_sector = disk_sector;
bio_set_dev(bio, bdev);
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 79ff172eb1c8..c5348ba81129 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -1060,6 +1060,7 @@ static const struct svc_procedure nfs4_callback_procedures1[] = {
.pc_decode = nfs4_decode_void,
.pc_encode = nfs4_encode_void,
.pc_xdrressize = 1,
+ .pc_name = "NULL",
},
[CB_COMPOUND] = {
.pc_func = nfs4_callback_compound,
@@ -1067,6 +1068,7 @@ static const struct svc_procedure nfs4_callback_procedures1[] = {
.pc_argsize = 256,
.pc_ressize = 256,
.pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
+ .pc_name = "COMPOUND",
}
};
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ef827ae193d2..fc4f490f2d78 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -81,8 +81,9 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir
spin_lock(&dir->i_lock);
if (list_empty(&nfsi->open_files) &&
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
- nfsi->cache_validity |= NFS_INO_INVALID_DATA |
- NFS_INO_REVAL_FORCED;
+ nfs_set_cache_invalid(dir,
+ NFS_INO_INVALID_DATA |
+ NFS_INO_REVAL_FORCED);
list_add(&ctx->list, &nfsi->open_files);
spin_unlock(&dir->i_lock);
return ctx;
@@ -1401,6 +1402,13 @@ out_force:
goto out;
}
+static void nfs_mark_dir_for_revalidate(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ nfs_set_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE);
+ spin_unlock(&inode->i_lock);
+}
+
/*
* We judge how long we want to trust negative
* dentries by looking at the parent inode mtime.
@@ -1435,19 +1443,14 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
__func__, dentry);
return 1;
case 0:
- nfs_mark_for_revalidate(dir);
- if (inode && S_ISDIR(inode->i_mode)) {
- /* Purge readdir caches. */
- nfs_zap_caches(inode);
- /*
- * We can't d_drop the root of a disconnected tree:
- * its d_hash is on the s_anon list and d_drop() would hide
- * it from shrink_dcache_for_unmount(), leading to busy
- * inodes on unmount and further oopses.
- */
- if (IS_ROOT(dentry))
- return 1;
- }
+ /*
+ * We can't d_drop the root of a disconnected tree:
+ * its d_hash is on the s_anon list and d_drop() would hide
+ * it from shrink_dcache_for_unmount(), leading to busy
+ * inodes on unmount and further oopses.
+ */
+ if (inode && IS_ROOT(dentry))
+ return 1;
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
__func__, dentry);
return 0;
@@ -1525,6 +1528,13 @@ out:
nfs_free_fattr(fattr);
nfs_free_fhandle(fhandle);
nfs4_label_free(label);
+
+ /*
+ * If the lookup failed despite the dentry change attribute being
+ * a match, then we should revalidate the directory cache.
+ */
+ if (!ret && nfs_verify_change_attribute(dir, dentry->d_time))
+ nfs_mark_dir_for_revalidate(dir);
return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
}
@@ -1567,7 +1577,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
error = nfs_lookup_verify_inode(inode, flags);
if (error) {
if (error == -ESTALE)
- nfs_zap_caches(dir);
+ nfs_mark_dir_for_revalidate(dir);
goto out_bad;
}
nfs_advise_use_readdirplus(dir);
@@ -1691,10 +1701,9 @@ static void nfs_drop_nlink(struct inode *inode)
if (inode->i_nlink > 0)
drop_nlink(inode);
NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter();
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
- | NFS_INO_INVALID_CTIME
- | NFS_INO_INVALID_OTHER
- | NFS_INO_REVAL_FORCED;
+ nfs_set_cache_invalid(
+ inode, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
+ NFS_INO_INVALID_OTHER | NFS_INO_REVAL_FORCED);
spin_unlock(&inode->i_lock);
}
@@ -1706,7 +1715,7 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
{
if (S_ISDIR(inode->i_mode))
/* drop any readdir cache as it could easily be old */
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
nfs_complete_unlink(dentry, inode);
@@ -2064,7 +2073,6 @@ out:
dput(parent);
return d;
out_error:
- nfs_mark_for_revalidate(dir);
d = ERR_PTR(error);
goto out;
}
@@ -2095,8 +2103,8 @@ EXPORT_SYMBOL_GPL(nfs_instantiate);
* that the operation succeeded on the server, but an error in the
* reply path made it appear to have failed.
*/
-int nfs_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
+int nfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct iattr attr;
int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
@@ -2124,7 +2132,8 @@ EXPORT_SYMBOL_GPL(nfs_create);
* See comments for nfs_proc_create regarding failed operations.
*/
int
-nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
+nfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct iattr attr;
int status;
@@ -2150,7 +2159,8 @@ EXPORT_SYMBOL_GPL(nfs_mknod);
/*
* See comments for nfs_proc_create regarding failed operations.
*/
-int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+int nfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct iattr attr;
int error;
@@ -2295,7 +2305,8 @@ EXPORT_SYMBOL_GPL(nfs_unlink);
* now have a new file handle and can instantiate an in-core NFS inode
* and move the raw page into its mapping.
*/
-int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+int nfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct page *page;
char *kaddr;
@@ -2398,9 +2409,9 @@ EXPORT_SYMBOL_GPL(nfs_link);
* If these conditions are met, we can drop the dentries before doing
* the rename.
*/
-int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
@@ -2470,9 +2481,9 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (error == 0) {
spin_lock(&old_inode->i_lock);
NFS_I(old_inode)->attr_gencount = nfs_inc_attr_generation_counter();
- NFS_I(old_inode)->cache_validity |= NFS_INO_INVALID_CHANGE
- | NFS_INO_INVALID_CTIME
- | NFS_INO_REVAL_FORCED;
+ nfs_set_cache_invalid(old_inode, NFS_INO_INVALID_CHANGE |
+ NFS_INO_INVALID_CTIME |
+ NFS_INO_REVAL_FORCED);
spin_unlock(&old_inode->i_lock);
}
out:
@@ -2939,7 +2950,9 @@ static int nfs_execute_ok(struct inode *inode, int mask)
return ret;
}
-int nfs_permission(struct inode *inode, int mask)
+int nfs_permission(struct user_namespace *mnt_userns,
+ struct inode *inode,
+ int mask)
{
const struct cred *cred = current_cred();
int res = 0;
@@ -2987,7 +3000,7 @@ out_notsup:
res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (res == 0)
- res = generic_permission(inode, mask);
+ res = generic_permission(&init_user_ns, inode, mask);
goto out;
}
EXPORT_SYMBOL_GPL(nfs_permission);
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
index 7412bb164fa7..f2b34cfe286c 100644
--- a/fs/nfs/export.c
+++ b/fs/nfs/export.c
@@ -167,10 +167,28 @@ out:
return parent;
}
+static u64 nfs_fetch_iversion(struct inode *inode)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+
+ /* Is this the right call?: */
+ nfs_revalidate_inode(server, inode);
+ /*
+ * Also, note we're ignoring any returned error. That seems to be
+ * the practice for cache consistency information elsewhere in
+ * the server, but I'm not sure why.
+ */
+ if (server->nfs_client->rpc_ops->version >= 4)
+ return inode_peek_iversion_raw(inode);
+ else
+ return time_to_chattr(&inode->i_ctime);
+}
+
const struct export_operations nfs_export_ops = {
.encode_fh = nfs_encode_fh,
.fh_to_dentry = nfs_fh_to_dentry,
.get_parent = nfs_get_parent,
+ .fetch_iversion = nfs_fetch_iversion,
.flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
EXPORT_OP_NOATOMIC_ATTR,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 63940a7a70be..16ad5050e046 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -89,7 +89,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
EXPORT_SYMBOL_GPL(nfs_file_release);
/**
- * nfs_revalidate_size - Revalidate the file size
+ * nfs_revalidate_file_size - Revalidate the file size
* @inode: pointer to inode struct
* @filp: pointer to struct file
*
@@ -606,8 +606,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- unsigned long written = 0;
- ssize_t result;
+ unsigned int mntflags = NFS_SERVER(inode)->flags;
+ ssize_t result, written;
errseq_t since;
int error;
@@ -626,13 +626,13 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
/*
* O_APPEND implies that we must revalidate the file length.
*/
- if (iocb->ki_flags & IOCB_APPEND) {
+ if (iocb->ki_flags & IOCB_APPEND || iocb->ki_pos > i_size_read(inode)) {
result = nfs_revalidate_file_size(inode, file);
if (result)
goto out;
}
- if (iocb->ki_pos > i_size_read(inode))
- nfs_revalidate_mapping(inode, file->f_mapping);
+
+ nfs_clear_invalid_mapping(file->f_mapping);
since = filemap_sample_wb_err(file->f_mapping);
nfs_start_io_write(inode);
@@ -648,6 +648,21 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
written = result;
iocb->ki_pos += written;
+
+ if (mntflags & NFS_MOUNT_WRITE_EAGER) {
+ result = filemap_fdatawrite_range(file->f_mapping,
+ iocb->ki_pos - written,
+ iocb->ki_pos - 1);
+ if (result < 0)
+ goto out;
+ }
+ if (mntflags & NFS_MOUNT_WRITE_WAIT) {
+ result = filemap_fdatawait_range(file->f_mapping,
+ iocb->ki_pos - written,
+ iocb->ki_pos - 1);
+ if (result < 0)
+ goto out;
+ }
result = generic_write_sync(iocb, written);
if (result < 0)
goto out;
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index 06894bcdea2d..971a9251c1d9 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -82,6 +82,7 @@ enum nfs_param {
Opt_v,
Opt_vers,
Opt_wsize,
+ Opt_write,
};
enum {
@@ -113,6 +114,19 @@ static const struct constant_table nfs_param_enums_lookupcache[] = {
{}
};
+enum {
+ Opt_write_lazy,
+ Opt_write_eager,
+ Opt_write_wait,
+};
+
+static const struct constant_table nfs_param_enums_write[] = {
+ { "lazy", Opt_write_lazy },
+ { "eager", Opt_write_eager },
+ { "wait", Opt_write_wait },
+ {}
+};
+
static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_flag_no("ac", Opt_ac),
fsparam_u32 ("acdirmax", Opt_acdirmax),
@@ -171,6 +185,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_flag ("v4.1", Opt_v),
fsparam_flag ("v4.2", Opt_v),
fsparam_string("vers", Opt_vers),
+ fsparam_enum ("write", Opt_write, nfs_param_enums_write),
fsparam_u32 ("wsize", Opt_wsize),
{}
};
@@ -770,6 +785,24 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
goto out_invalid_value;
}
break;
+ case Opt_write:
+ switch (result.uint_32) {
+ case Opt_write_lazy:
+ ctx->flags &=
+ ~(NFS_MOUNT_WRITE_EAGER | NFS_MOUNT_WRITE_WAIT);
+ break;
+ case Opt_write_eager:
+ ctx->flags |= NFS_MOUNT_WRITE_EAGER;
+ ctx->flags &= ~NFS_MOUNT_WRITE_WAIT;
+ break;
+ case Opt_write_wait:
+ ctx->flags |=
+ NFS_MOUNT_WRITE_EAGER | NFS_MOUNT_WRITE_WAIT;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ break;
/*
* Special options
@@ -1479,6 +1512,8 @@ static int nfs_init_fs_context(struct fs_context *fc)
ctx->selected_flavor = RPC_AUTH_MAXFLAVOR;
ctx->minorversion = 0;
ctx->need_mount = true;
+
+ fc->s_iflags |= SB_I_STABLE_WRITES;
}
fc->fs_private = ctx;
fc->ops = &nfs_fs_context_ops;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index a60df88efc40..c4c021c6ebbd 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -390,10 +390,6 @@ static void nfs_readpage_from_fscache_complete(struct page *page,
if (!error) {
SetPageUptodate(page);
unlock_page(page);
- } else {
- error = nfs_readpage_async(context, page->mapping->host, page);
- if (error)
- unlock_page(page);
}
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 522aa10a1a3e..a7fb076a5f44 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -195,7 +195,19 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
}
EXPORT_SYMBOL_GPL(nfs_check_cache_invalid);
-static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
+#ifdef CONFIG_NFS_V4_2
+static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
+{
+ return nfsi->xattr_cache != NULL;
+}
+#else
+static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
+{
+ return false;
+}
+#endif
+
+void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
{
struct nfs_inode *nfsi = NFS_I(inode);
bool have_delegation = NFS_PROTO(inode)->have_delegation(inode, FMODE_READ);
@@ -209,12 +221,15 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
| NFS_INO_INVALID_XATTR);
}
+ if (!nfs_has_xattr_cache(nfsi))
+ flags &= ~NFS_INO_INVALID_XATTR;
if (inode->i_mapping->nrpages == 0)
flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER);
nfsi->cache_validity |= flags;
if (flags & NFS_INO_INVALID_DATA)
nfs_fscache_invalidate(inode);
}
+EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
/*
* Invalidate the local caches
@@ -594,7 +609,8 @@ EXPORT_SYMBOL_GPL(nfs_fhget);
#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE|ATTR_OPEN)
int
-nfs_setattr(struct dentry *dentry, struct iattr *attr)
+nfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct nfs_fattr *fattr;
@@ -787,8 +803,8 @@ static bool nfs_need_revalidate_inode(struct inode *inode)
return false;
}
-int nfs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct nfs_server *server = NFS_SERVER(inode);
@@ -857,7 +873,7 @@ out_no_revalidate:
/* Only return attributes that were revalidated. */
stat->result_mask &= request_mask;
out_no_update:
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
if (S_ISDIR(inode->i_mode))
stat->blksize = NFS_SERVER(inode)->dtsize;
@@ -1052,8 +1068,8 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
spin_lock(&inode->i_lock);
if (list_empty(&nfsi->open_files) &&
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
- nfsi->cache_validity |= NFS_INO_INVALID_DATA |
- NFS_INO_REVAL_FORCED;
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA |
+ NFS_INO_REVAL_FORCED);
list_add_tail_rcu(&ctx->list, &nfsi->open_files);
spin_unlock(&inode->i_lock);
}
@@ -1257,55 +1273,19 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
return 0;
}
-bool nfs_mapping_need_revalidate_inode(struct inode *inode)
-{
- return nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE) ||
- NFS_STALE(inode);
-}
-
-int nfs_revalidate_mapping_rcu(struct inode *inode)
-{
- struct nfs_inode *nfsi = NFS_I(inode);
- unsigned long *bitlock = &nfsi->flags;
- int ret = 0;
-
- if (IS_SWAPFILE(inode))
- goto out;
- if (nfs_mapping_need_revalidate_inode(inode)) {
- ret = -ECHILD;
- goto out;
- }
- spin_lock(&inode->i_lock);
- if (test_bit(NFS_INO_INVALIDATING, bitlock) ||
- (nfsi->cache_validity & NFS_INO_INVALID_DATA))
- ret = -ECHILD;
- spin_unlock(&inode->i_lock);
-out:
- return ret;
-}
-
/**
- * nfs_revalidate_mapping - Revalidate the pagecache
- * @inode: pointer to host inode
+ * nfs_clear_invalid_mapping - Conditionally clear a mapping
* @mapping: pointer to mapping
+ *
+ * If the NFS_INO_INVALID_DATA inode flag is set, clear the mapping.
*/
-int nfs_revalidate_mapping(struct inode *inode,
- struct address_space *mapping)
+int nfs_clear_invalid_mapping(struct address_space *mapping)
{
+ struct inode *inode = mapping->host;
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long *bitlock = &nfsi->flags;
int ret = 0;
- /* swapfiles are not supposed to be shared. */
- if (IS_SWAPFILE(inode))
- goto out;
-
- if (nfs_mapping_need_revalidate_inode(inode)) {
- ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
- if (ret < 0)
- goto out;
- }
-
/*
* We must clear NFS_INO_INVALID_DATA first to ensure that
* invalidations that come in while we're shooting down the mappings
@@ -1336,8 +1316,8 @@ int nfs_revalidate_mapping(struct inode *inode,
set_bit(NFS_INO_INVALIDATING, bitlock);
smp_wmb();
- nfsi->cache_validity &= ~(NFS_INO_INVALID_DATA|
- NFS_INO_DATA_INVAL_DEFER);
+ nfsi->cache_validity &=
+ ~(NFS_INO_INVALID_DATA | NFS_INO_DATA_INVAL_DEFER);
spin_unlock(&inode->i_lock);
trace_nfs_invalidate_mapping_enter(inode);
ret = nfs_invalidate_mapping(inode, mapping);
@@ -1350,6 +1330,53 @@ out:
return ret;
}
+bool nfs_mapping_need_revalidate_inode(struct inode *inode)
+{
+ return nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE) ||
+ NFS_STALE(inode);
+}
+
+int nfs_revalidate_mapping_rcu(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ unsigned long *bitlock = &nfsi->flags;
+ int ret = 0;
+
+ if (IS_SWAPFILE(inode))
+ goto out;
+ if (nfs_mapping_need_revalidate_inode(inode)) {
+ ret = -ECHILD;
+ goto out;
+ }
+ spin_lock(&inode->i_lock);
+ if (test_bit(NFS_INO_INVALIDATING, bitlock) ||
+ (nfsi->cache_validity & NFS_INO_INVALID_DATA))
+ ret = -ECHILD;
+ spin_unlock(&inode->i_lock);
+out:
+ return ret;
+}
+
+/**
+ * nfs_revalidate_mapping - Revalidate the pagecache
+ * @inode: pointer to host inode
+ * @mapping: pointer to mapping
+ */
+int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+{
+ /* swapfiles are not supposed to be shared. */
+ if (IS_SWAPFILE(inode))
+ return 0;
+
+ if (nfs_mapping_need_revalidate_inode(inode)) {
+ int ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (ret < 0)
+ return ret;
+ }
+
+ return nfs_clear_invalid_mapping(mapping);
+}
+
static bool nfs_file_has_writers(struct nfs_inode *nfsi)
{
struct inode *inode = &nfsi->vfs_inode;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 62d3189745cd..7b644d6c09e4 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -378,14 +378,18 @@ extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
extern unsigned long nfs_access_cache_scan(struct shrinker *shrink,
struct shrink_control *sc);
struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
-int nfs_create(struct inode *, struct dentry *, umode_t, bool);
-int nfs_mkdir(struct inode *, struct dentry *, umode_t);
+int nfs_create(struct user_namespace *, struct inode *, struct dentry *,
+ umode_t, bool);
+int nfs_mkdir(struct user_namespace *, struct inode *, struct dentry *,
+ umode_t);
int nfs_rmdir(struct inode *, struct dentry *);
int nfs_unlink(struct inode *, struct dentry *);
-int nfs_symlink(struct inode *, struct dentry *, const char *);
+int nfs_symlink(struct user_namespace *, struct inode *, struct dentry *,
+ const char *);
int nfs_link(struct dentry *, struct inode *, struct dentry *);
-int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-int nfs_rename(struct inode *, struct dentry *,
+int nfs_mknod(struct user_namespace *, struct inode *, struct dentry *, umode_t,
+ dev_t);
+int nfs_rename(struct user_namespace *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
/* file.c */
@@ -407,7 +411,8 @@ extern int nfs_write_inode(struct inode *, struct writeback_control *);
extern int nfs_drop_inode(struct inode *);
extern void nfs_clear_inode(struct inode *);
extern void nfs_evict_inode(struct inode *);
-void nfs_zap_acl_cache(struct inode *inode);
+extern void nfs_zap_acl_cache(struct inode *inode);
+extern void nfs_set_cache_invalid(struct inode *inode, unsigned long flags);
extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 2bcbe38afe2e..93e60e921f92 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -208,20 +208,23 @@ out_fc:
}
static int
-nfs_namespace_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+nfs_namespace_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_flags)
{
if (NFS_FH(d_inode(path->dentry))->size != 0)
- return nfs_getattr(path, stat, request_mask, query_flags);
- generic_fillattr(d_inode(path->dentry), stat);
+ return nfs_getattr(mnt_userns, path, stat, request_mask,
+ query_flags);
+ generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
return 0;
}
static int
-nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr)
+nfs_namespace_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
if (NFS_FH(d_inode(dentry))->size != 0)
- return nfs_setattr(dentry, attr);
+ return nfs_setattr(mnt_userns, dentry, attr);
return -EACCES;
}
diff --git a/fs/nfs/nfs3_fs.h b/fs/nfs/nfs3_fs.h
index 1b950b66b3bb..c8a192802dda 100644
--- a/fs/nfs/nfs3_fs.h
+++ b/fs/nfs/nfs3_fs.h
@@ -12,7 +12,8 @@
*/
#ifdef CONFIG_NFS_V3_ACL
extern struct posix_acl *nfs3_get_acl(struct inode *inode, int type);
-extern int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int nfs3_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
extern int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl);
extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index c6c863382f37..bb386a691e69 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -111,6 +111,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type)
fallthrough;
case -ENOTSUPP:
status = -EOPNOTSUPP;
+ goto getout;
default:
goto getout;
}
@@ -251,7 +252,8 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
}
-int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int nfs3_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
struct posix_acl *orig = acl, *dfacl = NULL, *alloc;
int status;
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index ca10072644ff..ed1c83738c30 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -36,6 +36,7 @@
#define NFS3_pagepad_sz (1) /* Page padding */
#define NFS3_fhandle_sz (1+16)
#define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
+#define NFS3_post_op_fh_sz (1+NFS3_fh_sz)
#define NFS3_sattr_sz (15)
#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
@@ -73,7 +74,7 @@
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+NFS3_pagepad_sz)
#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+NFS3_pagepad_sz)
#define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
-#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
+#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
#define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+NFS3_pagepad_sz)
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index f3fd935620fc..094024b0aca1 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -357,13 +357,15 @@ static ssize_t _nfs42_proc_copy(struct file *src,
truncate_pagecache_range(dst_inode, pos_dst,
pos_dst + res->write_res.count);
spin_lock(&dst_inode->i_lock);
- NFS_I(dst_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE |
- NFS_INO_REVAL_FORCED | NFS_INO_INVALID_SIZE |
- NFS_INO_INVALID_ATTR | NFS_INO_INVALID_DATA);
+ nfs_set_cache_invalid(
+ dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
+ NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
+ NFS_INO_INVALID_DATA);
spin_unlock(&dst_inode->i_lock);
spin_lock(&src_inode->i_lock);
- NFS_I(src_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE |
- NFS_INO_REVAL_FORCED | NFS_INO_INVALID_ATIME);
+ nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
+ NFS_INO_REVAL_FORCED |
+ NFS_INO_INVALID_ATIME);
spin_unlock(&src_inode->i_lock);
status = res->write_res.count;
out:
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 86acffe7335c..889a9f4c0310 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -609,6 +609,7 @@ found:
* changed. Schedule recovery!
*/
nfs4_schedule_path_down_recovery(pos);
+ goto out;
default:
goto out;
}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 57b3821d975a..441a2fa073c8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -420,7 +420,9 @@ static const struct nfs4_ssc_client_ops nfs4_ssc_clnt_ops_tbl = {
*/
void nfs42_ssc_register_ops(void)
{
+#ifdef CONFIG_NFSD_V4
nfs42_ssc_register(&nfs4_ssc_clnt_ops_tbl);
+#endif
}
/**
@@ -431,7 +433,9 @@ void nfs42_ssc_register_ops(void)
*/
void nfs42_ssc_unregister_ops(void)
{
+#ifdef CONFIG_NFSD_V4
nfs42_ssc_unregister(&nfs4_ssc_clnt_ops_tbl);
+#endif
}
#endif /* CONFIG_NFS_V4_2 */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 2f4679a62712..c65c4b41e2c1 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -71,10 +71,6 @@
#include "nfs4trace.h"
-#ifdef CONFIG_NFS_V4_2
-#include "nfs42.h"
-#endif /* CONFIG_NFS_V4_2 */
-
#define NFSDBG_FACILITY NFSDBG_PROC
#define NFS4_BITMASK_SZ 3
@@ -1173,14 +1169,14 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
static void
nfs4_inc_nlink_locked(struct inode *inode)
{
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
inc_nlink(inode);
}
static void
nfs4_dec_nlink_locked(struct inode *inode)
{
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
drop_nlink(inode);
}
@@ -1191,35 +1187,31 @@ nfs4_update_changeattr_locked(struct inode *inode,
{
struct nfs_inode *nfsi = NFS_I(inode);
- nfsi->cache_validity |= NFS_INO_INVALID_CTIME
- | NFS_INO_INVALID_MTIME
- | cache_validity;
+ cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(inode)) {
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
nfsi->attrtimeo_timestamp = jiffies;
} else {
if (S_ISDIR(inode->i_mode)) {
- nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+ cache_validity |= NFS_INO_INVALID_DATA;
nfs_force_lookup_revalidate(inode);
} else {
if (!NFS_PROTO(inode)->have_delegation(inode,
FMODE_READ))
- nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
+ cache_validity |= NFS_INO_REVAL_PAGECACHE;
}
if (cinfo->before != inode_peek_iversion_raw(inode))
- nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
- NFS_INO_INVALID_ACL |
- NFS_INO_INVALID_XATTR;
+ cache_validity |= NFS_INO_INVALID_ACCESS |
+ NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_XATTR;
}
inode_set_iversion_raw(inode, cinfo->after);
nfsi->read_cache_jiffies = timestamp;
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
+ nfs_set_cache_invalid(inode, cache_validity);
nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
-
- if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
- nfs_fscache_invalidate(inode);
}
void
@@ -2231,6 +2223,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
default:
printk(KERN_ERR "NFS: %s: unhandled error "
"%d.\n", __func__, err);
+ fallthrough;
case 0:
case -ENOENT:
case -EAGAIN:
@@ -5438,15 +5431,16 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
if (cache_validity & NFS_INO_INVALID_ATIME)
bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
- if (cache_validity & NFS_INO_INVALID_ACCESS)
- bitmask[0] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER |
- FATTR4_WORD1_OWNER_GROUP;
- if (cache_validity & NFS_INO_INVALID_ACL)
- bitmask[0] |= FATTR4_WORD0_ACL;
- if (cache_validity & NFS_INO_INVALID_LABEL)
+ if (cache_validity & NFS_INO_INVALID_OTHER)
+ bitmask[1] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER |
+ FATTR4_WORD1_OWNER_GROUP |
+ FATTR4_WORD1_NUMLINKS;
+ if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
- if (cache_validity & NFS_INO_INVALID_CTIME)
+ if (cache_validity & NFS_INO_INVALID_CHANGE)
bitmask[0] |= FATTR4_WORD0_CHANGE;
+ if (cache_validity & NFS_INO_INVALID_CTIME)
+ bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
if (cache_validity & NFS_INO_INVALID_MTIME)
bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
if (cache_validity & NFS_INO_INVALID_SIZE)
@@ -5895,6 +5889,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
int ret, i;
+ /* You can't remove system.nfs4_acl: */
+ if (buflen == 0)
+ return -EINVAL;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
if (npages > ARRAY_SIZE(pages))
@@ -5917,9 +5914,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
* so mark the attribute cache invalid.
*/
spin_lock(&inode->i_lock);
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
- | NFS_INO_INVALID_CTIME
- | NFS_INO_REVAL_FORCED;
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
+ NFS_INO_INVALID_CTIME |
+ NFS_INO_REVAL_FORCED);
spin_unlock(&inode->i_lock);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
@@ -5971,7 +5968,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
return ret;
if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
return -ENOENT;
- return 0;
+ return label.len;
}
static int nfs4_get_security_label(struct inode *inode, void *buf,
@@ -7491,6 +7488,7 @@ nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *key, const void *buf,
size_t buflen, int flags)
@@ -7513,6 +7511,7 @@ static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *key, const void *buf,
size_t buflen, int flags)
@@ -7563,6 +7562,7 @@ nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
#ifdef CONFIG_NFS_V4_2
static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *key, const void *buf,
size_t buflen, int flags)
@@ -9705,6 +9705,7 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
case -NFS4ERR_BADLAYOUT: /* no layout */
case -NFS4ERR_GRACE: /* loca_recalim always false */
task->tk_status = 0;
+ break;
case 0:
break;
default:
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 4bf10792cb5b..3a51351bdc6a 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1125,6 +1125,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
" sequence-id error on an"
" unconfirmed sequence %p!\n",
seqid->sequence);
+ return;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_BAD_STATEID:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index af64b4e6fd1f..102b66e0bdef 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2875,6 +2875,7 @@ pnfs_do_write(struct nfs_pageio_descriptor *desc,
switch (trypnfs) {
case PNFS_NOT_ATTEMPTED:
pnfs_write_through_mds(desc, hdr);
+ break;
case PNFS_ATTEMPTED:
break;
case PNFS_TRY_AGAIN:
@@ -3019,6 +3020,7 @@ pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
switch (trypnfs) {
case PNFS_NOT_ATTEMPTED:
pnfs_read_through_mds(desc, hdr);
+ break;
case PNFS_ATTEMPTED:
break;
case PNFS_TRY_AGAIN:
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index eb854f1f86e2..d2b6dce1f99f 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -74,6 +74,24 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
}
EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
+static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode)
+{
+ struct nfs_pgio_mirror *pgm;
+ unsigned long npages;
+
+ nfs_pageio_complete(pgio);
+
+ /* It doesn't make sense to do mirrored reads! */
+ WARN_ON_ONCE(pgio->pg_mirror_count != 1);
+
+ pgm = &pgio->pg_mirrors[0];
+ NFS_I(inode)->read_io += pgm->pg_bytes_written;
+ npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ nfs_add_stats(inode, NFSIOS_READPAGES, npages);
+}
+
+
void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
{
struct nfs_pgio_mirror *mirror;
@@ -114,41 +132,10 @@ static void nfs_readpage_release(struct nfs_page *req, int error)
nfs_release_request(req);
}
-int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
- struct page *page)
-{
- struct nfs_page *new;
- unsigned int len;
+struct nfs_readdesc {
struct nfs_pageio_descriptor pgio;
- struct nfs_pgio_mirror *pgm;
-
- len = nfs_page_length(page);
- if (len == 0)
- return nfs_return_empty_page(page);
- new = nfs_create_request(ctx, page, 0, len);
- if (IS_ERR(new)) {
- unlock_page(page);
- return PTR_ERR(new);
- }
- if (len < PAGE_SIZE)
- zero_user_segment(page, len, PAGE_SIZE);
-
- nfs_pageio_init_read(&pgio, inode, false,
- &nfs_async_read_completion_ops);
- if (!nfs_pageio_add_request(&pgio, new)) {
- nfs_list_remove_request(new);
- nfs_readpage_release(new, pgio.pg_error);
- }
- nfs_pageio_complete(&pgio);
-
- /* It doesn't make sense to do mirrored reads! */
- WARN_ON_ONCE(pgio.pg_mirror_count != 1);
-
- pgm = &pgio.pg_mirrors[0];
- NFS_I(inode)->read_io += pgm->pg_bytes_written;
-
- return pgio.pg_error < 0 ? pgio.pg_error : 0;
-}
+ struct nfs_open_context *ctx;
+};
static void nfs_page_group_set_uptodate(struct nfs_page *req)
{
@@ -171,8 +158,7 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
/* note: regions of the page not covered by a
- * request are zeroed in nfs_readpage_async /
- * readpage_async_filler */
+ * request are zeroed in readpage_async_filler */
if (bytes > hdr->good_bytes) {
/* nothing in this request was good, so zero
* the full extent of the request */
@@ -304,6 +290,38 @@ static void nfs_readpage_result(struct rpc_task *task,
nfs_readpage_retry(task, hdr);
}
+static int
+readpage_async_filler(void *data, struct page *page)
+{
+ struct nfs_readdesc *desc = data;
+ struct nfs_page *new;
+ unsigned int len;
+ int error;
+
+ len = nfs_page_length(page);
+ if (len == 0)
+ return nfs_return_empty_page(page);
+
+ new = nfs_create_request(desc->ctx, page, 0, len);
+ if (IS_ERR(new))
+ goto out_error;
+
+ if (len < PAGE_SIZE)
+ zero_user_segment(page, len, PAGE_SIZE);
+ if (!nfs_pageio_add_request(&desc->pgio, new)) {
+ nfs_list_remove_request(new);
+ error = desc->pgio.pg_error;
+ nfs_readpage_release(new, error);
+ goto out;
+ }
+ return 0;
+out_error:
+ error = PTR_ERR(new);
+ unlock_page(page);
+out:
+ return error;
+}
+
/*
* Read a page over NFS.
* We read the page synchronously in the following case:
@@ -312,14 +330,13 @@ static void nfs_readpage_result(struct rpc_task *task,
*/
int nfs_readpage(struct file *file, struct page *page)
{
- struct nfs_open_context *ctx;
+ struct nfs_readdesc desc;
struct inode *inode = page_file_mapping(page)->host;
- int error;
+ int ret;
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
page, PAGE_SIZE, page_index(page));
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
- nfs_add_stats(inode, NFSIOS_READPAGES, 1);
/*
* Try to flush any pending writes to the file..
@@ -328,93 +345,59 @@ int nfs_readpage(struct file *file, struct page *page)
* be any new pending writes generated at this point
* for this page (other pages can be written to).
*/
- error = nfs_wb_page(inode, page);
- if (error)
+ ret = nfs_wb_page(inode, page);
+ if (ret)
goto out_unlock;
if (PageUptodate(page))
goto out_unlock;
- error = -ESTALE;
+ ret = -ESTALE;
if (NFS_STALE(inode))
goto out_unlock;
if (file == NULL) {
- error = -EBADF;
- ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
- if (ctx == NULL)
+ ret = -EBADF;
+ desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
+ if (desc.ctx == NULL)
goto out_unlock;
} else
- ctx = get_nfs_open_context(nfs_file_open_context(file));
+ desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
if (!IS_SYNC(inode)) {
- error = nfs_readpage_from_fscache(ctx, inode, page);
- if (error == 0)
+ ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
+ if (ret == 0)
goto out;
}
- xchg(&ctx->error, 0);
- error = nfs_readpage_async(ctx, inode, page);
- if (!error) {
- error = wait_on_page_locked_killable(page);
- if (!PageUptodate(page) && !error)
- error = xchg(&ctx->error, 0);
- }
-out:
- put_nfs_open_context(ctx);
- return error;
-out_unlock:
- unlock_page(page);
- return error;
-}
-
-struct nfs_readdesc {
- struct nfs_pageio_descriptor *pgio;
- struct nfs_open_context *ctx;
-};
-
-static int
-readpage_async_filler(void *data, struct page *page)
-{
- struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
- struct nfs_page *new;
- unsigned int len;
- int error;
+ xchg(&desc.ctx->error, 0);
+ nfs_pageio_init_read(&desc.pgio, inode, false,
+ &nfs_async_read_completion_ops);
- len = nfs_page_length(page);
- if (len == 0)
- return nfs_return_empty_page(page);
+ ret = readpage_async_filler(&desc, page);
- new = nfs_create_request(desc->ctx, page, 0, len);
- if (IS_ERR(new))
- goto out_error;
+ if (!ret)
+ nfs_pageio_complete_read(&desc.pgio, inode);
- if (len < PAGE_SIZE)
- zero_user_segment(page, len, PAGE_SIZE);
- if (!nfs_pageio_add_request(desc->pgio, new)) {
- nfs_list_remove_request(new);
- error = desc->pgio->pg_error;
- nfs_readpage_release(new, error);
- goto out;
+ ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
+ if (!ret) {
+ ret = wait_on_page_locked_killable(page);
+ if (!PageUptodate(page) && !ret)
+ ret = xchg(&desc.ctx->error, 0);
}
- return 0;
-out_error:
- error = PTR_ERR(new);
- unlock_page(page);
out:
- return error;
+ put_nfs_open_context(desc.ctx);
+ return ret;
+out_unlock:
+ unlock_page(page);
+ return ret;
}
-int nfs_readpages(struct file *filp, struct address_space *mapping,
+int nfs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
- struct nfs_pageio_descriptor pgio;
- struct nfs_pgio_mirror *pgm;
- struct nfs_readdesc desc = {
- .pgio = &pgio,
- };
+ struct nfs_readdesc desc;
struct inode *inode = mapping->host;
- unsigned long npages;
- int ret = -ESTALE;
+ int ret;
dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
inode->i_sb->s_id,
@@ -422,15 +405,17 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
nr_pages);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
+ ret = -ESTALE;
if (NFS_STALE(inode))
goto out;
- if (filp == NULL) {
+ if (file == NULL) {
+ ret = -EBADF;
desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
if (desc.ctx == NULL)
- return -EBADF;
+ goto out;
} else
- desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
+ desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
/* attempt to read as many of the pages as possible from the cache
* - this returns -ENOBUFS immediately if the cookie is negative
@@ -440,20 +425,13 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
- nfs_pageio_init_read(&pgio, inode, false,
+ nfs_pageio_init_read(&desc.pgio, inode, false,
&nfs_async_read_completion_ops);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
- nfs_pageio_complete(&pgio);
- /* It doesn't make sense to do mirrored reads! */
- WARN_ON_ONCE(pgio.pg_mirror_count != 1);
+ nfs_pageio_complete_read(&desc.pgio, inode);
- pgm = &pgio.pg_mirrors[0];
- NFS_I(inode)->read_io += pgm->pg_bytes_written;
- npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
- PAGE_SHIFT;
- nfs_add_stats(inode, NFSIOS_READPAGES, npages);
read_complete:
put_nfs_open_context(desc.ctx);
out:
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4034102010f0..94885c6f8f54 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -86,9 +86,11 @@ const struct super_operations nfs_sops = {
};
EXPORT_SYMBOL_GPL(nfs_sops);
+#ifdef CONFIG_NFS_V4_2
static const struct nfs_ssc_client_ops nfs_ssc_clnt_ops_tbl = {
.sco_sb_deactive = nfs_sb_deactive,
};
+#endif
#if IS_ENABLED(CONFIG_NFS_V4)
static int __init register_nfs4_fs(void)
@@ -111,15 +113,21 @@ static void unregister_nfs4_fs(void)
}
#endif
+#ifdef CONFIG_NFS_V4_2
static void nfs_ssc_register_ops(void)
{
+#ifdef CONFIG_NFSD_V4
nfs_ssc_register(&nfs_ssc_clnt_ops_tbl);
+#endif
}
static void nfs_ssc_unregister_ops(void)
{
+#ifdef CONFIG_NFSD_V4
nfs_ssc_unregister(&nfs_ssc_clnt_ops_tbl);
+#endif
}
+#endif /* CONFIG_NFS_V4_2 */
static struct shrinker acl_shrinker = {
.count_objects = nfs_access_cache_count,
@@ -148,7 +156,9 @@ int __init register_nfs_fs(void)
ret = register_shrinker(&acl_shrinker);
if (ret < 0)
goto error_3;
+#ifdef CONFIG_NFS_V4_2
nfs_ssc_register_ops();
+#endif
return 0;
error_3:
nfs_unregister_sysctl();
@@ -168,7 +178,9 @@ void __exit unregister_nfs_fs(void)
unregister_shrinker(&acl_shrinker);
nfs_unregister_sysctl();
unregister_nfs4_fs();
+#ifdef CONFIG_NFS_V4_2
nfs_ssc_unregister_ops();
+#endif
unregister_filesystem(&nfs_fs_type);
}
@@ -511,6 +523,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
seq_puts(m, ",local_lock=flock");
else
seq_puts(m, ",local_lock=posix");
+
+ if (nfss->flags & NFS_MOUNT_WRITE_EAGER) {
+ if (nfss->flags & NFS_MOUNT_WRITE_WAIT)
+ seq_puts(m, ",write=wait");
+ else
+ seq_puts(m, ",write=eager");
+ }
}
/*
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index b27ebdccef70..5fa11e1aca4c 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -500,9 +500,9 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
spin_lock(&inode->i_lock);
NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter();
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
- | NFS_INO_INVALID_CTIME
- | NFS_INO_REVAL_FORCED;
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
+ NFS_INO_INVALID_CTIME |
+ NFS_INO_REVAL_FORCED);
spin_unlock(&inode->i_lock);
d_move(dentry, sdentry);
break;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 639c34fec04a..f05a90338a76 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -303,9 +303,9 @@ static void nfs_set_pageerror(struct address_space *mapping)
nfs_zap_mapping(mapping->host, mapping);
/* Force file size revalidation */
spin_lock(&inode->i_lock);
- NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
- NFS_INO_REVAL_PAGECACHE |
- NFS_INO_INVALID_SIZE;
+ nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED |
+ NFS_INO_REVAL_PAGECACHE |
+ NFS_INO_INVALID_SIZE);
spin_unlock(&inode->i_lock);
}
@@ -712,16 +712,23 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct nfs_pageio_descriptor pgio;
- struct nfs_io_completion *ioc;
+ struct nfs_io_completion *ioc = NULL;
+ unsigned int mntflags = NFS_SERVER(inode)->flags;
+ int priority = 0;
int err;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
- ioc = nfs_io_completion_alloc(GFP_KERNEL);
- if (ioc)
- nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
+ if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate ||
+ wbc->for_background || wbc->for_sync || wbc->for_reclaim) {
+ ioc = nfs_io_completion_alloc(GFP_KERNEL);
+ if (ioc)
+ nfs_io_completion_init(ioc, nfs_io_completion_commit,
+ inode);
+ priority = wb_priority(wbc);
+ }
- nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
+ nfs_pageio_init_write(&pgio, inode, priority, false,
&nfs_async_write_completion_ops);
pgio.pg_io_completion = ioc;
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
@@ -1278,19 +1285,21 @@ bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
* the PageUptodate() flag. In this case, we will need to turn off
* write optimisations that depend on the page contents being correct.
*/
-static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
+static bool nfs_write_pageuptodate(struct page *page, struct inode *inode,
+ unsigned int pagelen)
{
struct nfs_inode *nfsi = NFS_I(inode);
if (nfs_have_delegated_attributes(inode))
goto out;
- if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ if (nfsi->cache_validity &
+ (NFS_INO_REVAL_PAGECACHE | NFS_INO_INVALID_SIZE))
return false;
smp_rmb();
- if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
+ if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0)
return false;
out:
- if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+ if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0)
return false;
return PageUptodate(page) != 0;
}
@@ -1310,7 +1319,8 @@ is_whole_file_wrlock(struct file_lock *fl)
* If the file is opened for synchronous writes then we can just skip the rest
* of the checks.
*/
-static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
+static int nfs_can_extend_write(struct file *file, struct page *page,
+ struct inode *inode, unsigned int pagelen)
{
int ret;
struct file_lock_context *flctx = inode->i_flctx;
@@ -1318,7 +1328,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
if (file->f_flags & O_DSYNC)
return 0;
- if (!nfs_write_pageuptodate(page, inode))
+ if (!nfs_write_pageuptodate(page, inode, pagelen))
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
@@ -1356,6 +1366,7 @@ int nfs_updatepage(struct file *file, struct page *page,
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct address_space *mapping = page_file_mapping(page);
struct inode *inode = mapping->host;
+ unsigned int pagelen = nfs_page_length(page);
int status = 0;
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -1366,8 +1377,8 @@ int nfs_updatepage(struct file *file, struct page *page,
if (!count)
goto out;
- if (nfs_can_extend_write(file, page, inode)) {
- count = max(count + offset, nfs_page_length(page));
+ if (nfs_can_extend_write(file, page, inode, pagelen)) {
+ count = max(count + offset, pagelen);
offset = 0;
}
@@ -1593,7 +1604,7 @@ static int nfs_writeback_done(struct rpc_task *task,
/* Deal with the suid/sgid bit corner case */
if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
- NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
spin_unlock(&inode->i_lock);
}
return 0;
diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile
index fa82f5aaa6d9..119c75ab9fd0 100644
--- a/fs/nfs_common/Makefile
+++ b/fs/nfs_common/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
obj-$(CONFIG_GRACE_PERIOD) += grace.o
-obj-$(CONFIG_GRACE_PERIOD) += nfs_ssc.o
+obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
diff --git a/fs/nfs_common/nfs_ssc.c b/fs/nfs_common/nfs_ssc.c
index f43bbb373913..7c1509e968c8 100644
--- a/fs/nfs_common/nfs_ssc.c
+++ b/fs/nfs_common/nfs_ssc.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * fs/nfs_common/nfs_ssc_comm.c
- *
* Helper for knfsd's SSC to access ops in NFS client modules
*
* Author: Dai Ngo <dai.ngo@oracle.com>
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index d056ad2fdefd..79c563c1a5e8 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -295,3 +295,55 @@ int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
nfsacl_desc.desc.array_len;
}
EXPORT_SYMBOL_GPL(nfsacl_decode);
+
+/**
+ * nfs_stream_decode_acl - Decode an NFSv3 ACL
+ *
+ * @xdr: an xdr_stream positioned at an encoded ACL
+ * @aclcnt: OUT: count of ACEs in decoded posix_acl
+ * @pacl: OUT: a dynamically-allocated buffer containing the decoded posix_acl
+ *
+ * Return values:
+ * %false: The encoded ACL is not valid
+ * %true: @pacl contains a decoded ACL, and @xdr is advanced
+ *
+ * On a successful return, caller must release *pacl using posix_acl_release().
+ */
+bool nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt,
+ struct posix_acl **pacl)
+{
+ const size_t elem_size = XDR_UNIT * 3;
+ struct nfsacl_decode_desc nfsacl_desc = {
+ .desc = {
+ .elem_size = elem_size,
+ .xcode = pacl ? xdr_nfsace_decode : NULL,
+ },
+ };
+ unsigned int base;
+ u32 entries;
+
+ if (xdr_stream_decode_u32(xdr, &entries) < 0)
+ return false;
+ if (entries > NFS_ACL_MAX_ENTRIES)
+ return false;
+
+ base = xdr_stream_pos(xdr);
+ if (!xdr_inline_decode(xdr, XDR_UNIT + elem_size * entries))
+ return false;
+ nfsacl_desc.desc.array_maxlen = entries;
+ if (xdr_decode_array2(xdr->buf, base, &nfsacl_desc.desc))
+ return false;
+
+ if (pacl) {
+ if (entries != nfsacl_desc.desc.array_len ||
+ posix_acl_from_nfsacl(nfsacl_desc.acl) != 0) {
+ posix_acl_release(nfsacl_desc.acl);
+ return false;
+ }
+ *pacl = nfsacl_desc.acl;
+ }
+ if (aclcnt)
+ *aclcnt = entries;
+ return true;
+}
+EXPORT_SYMBOL_GPL(nfs_stream_decode_acl);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index dbbc583d6273..d6cff5fbe705 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -73,9 +73,11 @@ config NFSD_V4
select NFSD_V3
select FS_POSIX_ACL
select SUNRPC_GSS
+ select CRYPTO
select CRYPTO_MD5
select CRYPTO_SHA256
select GRACE_PERIOD
+ select NFS_V4_2_SSC_HELPER if NFS_V4_2
help
This option enables support in your system's NFS server for
version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index a07c39c94bbd..1058659a8d31 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -254,7 +254,7 @@ again:
req->cmd[4] = bufflen & 0xff;
req->cmd_len = COMMAND_SIZE(INQUIRY);
- blk_execute_rq(rq->q, NULL, rq, 1);
+ blk_execute_rq(NULL, rq, 1);
if (req->result) {
pr_err("pNFS: INQUIRY 0x83 failed with: %x\n",
req->result);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 81e7bb12aca6..9421dae22737 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -331,12 +331,29 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
fsloc->locations = NULL;
}
+static int export_stats_init(struct export_stats *stats)
+{
+ stats->start_time = ktime_get_seconds();
+ return nfsd_percpu_counters_init(stats->counter, EXP_STATS_COUNTERS_NUM);
+}
+
+static void export_stats_reset(struct export_stats *stats)
+{
+ nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
+}
+
+static void export_stats_destroy(struct export_stats *stats)
+{
+ nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
+}
+
static void svc_export_put(struct kref *ref)
{
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
+ export_stats_destroy(&exp->ex_stats);
kfree(exp->ex_uuid);
kfree_rcu(exp, ex_rcu);
}
@@ -369,8 +386,9 @@ static struct svc_export *svc_export_update(struct svc_export *new,
struct svc_export *old);
static struct svc_export *svc_export_lookup(struct svc_export *);
-static int check_export(struct inode *inode, int *flags, unsigned char *uuid)
+static int check_export(struct path *path, int *flags, unsigned char *uuid)
{
+ struct inode *inode = d_inode(path->dentry);
/*
* We currently export only dirs, regular files, and (for v4
@@ -394,6 +412,7 @@ static int check_export(struct inode *inode, int *flags, unsigned char *uuid)
* or an FSID number (so NFSEXP_FSID or ->uuid is needed).
* 2: We must be able to find an inode from a filehandle.
* This means that s_export_op must be set.
+ * 3: We must not currently be on an idmapped mount.
*/
if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) &&
!(*flags & NFSEXP_FSID) &&
@@ -408,6 +427,11 @@ static int check_export(struct inode *inode, int *flags, unsigned char *uuid)
return -EINVAL;
}
+ if (mnt_user_ns(path->mnt) != &init_user_ns) {
+ dprintk("exp_export: export of idmapped mounts not yet supported.\n");
+ return -EINVAL;
+ }
+
if (inode->i_sb->s_export_op->flags & EXPORT_OP_NOSUBTREECHK &&
!(*flags & NFSEXP_NOSUBTREECHECK)) {
dprintk("%s: %s does not support subtree checking!\n",
@@ -636,8 +660,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out4;
}
- err = check_export(d_inode(exp.ex_path.dentry), &exp.ex_flags,
- exp.ex_uuid);
+ err = check_export(&exp.ex_path, &exp.ex_flags, exp.ex_uuid);
if (err)
goto out4;
/*
@@ -692,22 +715,47 @@ static void exp_flags(struct seq_file *m, int flag, int fsid,
kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fslocs);
static void show_secinfo(struct seq_file *m, struct svc_export *exp);
+static int is_export_stats_file(struct seq_file *m)
+{
+ /*
+ * The export_stats file uses the same ops as the exports file.
+ * We use the file's name to determine the reported info per export.
+ * There is no rename in nsfdfs, so d_name.name is stable.
+ */
+ return !strcmp(m->file->f_path.dentry->d_name.name, "export_stats");
+}
+
static int svc_export_show(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h)
{
- struct svc_export *exp ;
+ struct svc_export *exp;
+ bool export_stats = is_export_stats_file(m);
- if (h ==NULL) {
- seq_puts(m, "#path domain(flags)\n");
+ if (h == NULL) {
+ if (export_stats)
+ seq_puts(m, "#path domain start-time\n#\tstats\n");
+ else
+ seq_puts(m, "#path domain(flags)\n");
return 0;
}
exp = container_of(h, struct svc_export, h);
seq_path(m, &exp->ex_path, " \t\n\\");
seq_putc(m, '\t');
seq_escape(m, exp->ex_client->name, " \t\n\\");
+ if (export_stats) {
+ seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
+ seq_printf(m, "\tfh_stale: %lld\n",
+ percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
+ seq_printf(m, "\tio_read: %lld\n",
+ percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
+ seq_printf(m, "\tio_write: %lld\n",
+ percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
+ seq_putc(m, '\n');
+ return 0;
+ }
seq_putc(m, '(');
- if (test_bit(CACHE_VALID, &h->flags) &&
+ if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags)) {
exp_flags(m, exp->ex_flags, exp->ex_fsid,
exp->ex_anon_uid, exp->ex_anon_gid, &exp->ex_fslocs);
@@ -748,6 +796,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_layout_types = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
+ export_stats_reset(&new->ex_stats);
}
static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -780,10 +829,15 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
static struct cache_head *svc_export_alloc(void)
{
struct svc_export *i = kmalloc(sizeof(*i), GFP_KERNEL);
- if (i)
- return &i->h;
- else
+ if (!i)
+ return NULL;
+
+ if (export_stats_init(&i->ex_stats)) {
+ kfree(i);
return NULL;
+ }
+
+ return &i->h;
}
static const struct cache_detail svc_export_cache_template = {
@@ -1245,10 +1299,14 @@ static int e_show(struct seq_file *m, void *p)
struct cache_head *cp = p;
struct svc_export *exp = container_of(cp, struct svc_export, h);
struct cache_detail *cd = m->private;
+ bool export_stats = is_export_stats_file(m);
if (p == SEQ_START_TOKEN) {
seq_puts(m, "# Version 1.1\n");
- seq_puts(m, "# Path Client(Flags) # IPs\n");
+ if (export_stats)
+ seq_puts(m, "# Path Client Start-time\n#\tStats\n");
+ else
+ seq_puts(m, "# Path Client(Flags) # IPs\n");
return 0;
}
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index e7daa1f246f0..ee0e3aba4a6e 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -6,6 +6,7 @@
#define NFSD_EXPORT_H
#include <linux/sunrpc/cache.h>
+#include <linux/percpu_counter.h>
#include <uapi/linux/nfsd/export.h>
#include <linux/nfs4.h>
@@ -46,6 +47,19 @@ struct exp_flavor_info {
u32 flags;
};
+/* Per-export stats */
+enum {
+ EXP_STATS_FH_STALE,
+ EXP_STATS_IO_READ,
+ EXP_STATS_IO_WRITE,
+ EXP_STATS_COUNTERS_NUM
+};
+
+struct export_stats {
+ time64_t start_time;
+ struct percpu_counter counter[EXP_STATS_COUNTERS_NUM];
+};
+
struct svc_export {
struct cache_head h;
struct auth_domain * ex_client;
@@ -62,6 +76,7 @@ struct svc_export {
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
struct rcu_head ex_rcu;
+ struct export_stats ex_stats;
};
/* an "export key" (expkey) maps a filehandlefragement to an
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 53fcbf79bdca..7629248fdd53 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -898,6 +898,8 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
continue;
if (!nfsd_match_cred(nf->nf_cred, current_cred()))
continue;
+ if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
+ continue;
if (nfsd_file_get(nf) != NULL)
return nf;
}
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 7346acda9d76..c330f5bd0cf3 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -10,6 +10,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/percpu_counter.h>
/* Hash tables for nfs4_clientid state */
#define CLIENT_HASH_BITS 4
@@ -21,6 +22,14 @@
struct cld_net;
struct nfsd4_client_tracking_ops;
+enum {
+ /* cache misses due only to checksum comparison failures */
+ NFSD_NET_PAYLOAD_MISSES,
+ /* amount of memory (in bytes) currently consumed by the DRC */
+ NFSD_NET_DRC_MEM_USAGE,
+ NFSD_NET_COUNTERS_NUM
+};
+
/*
* Represents a nfsd "container". With respect to nfsv4 state tracking, the
* fields of interest are the *_id_hashtbls and the *_name_tree. These track
@@ -149,20 +158,16 @@ struct nfsd_net {
/*
* Stats and other tracking of on the duplicate reply cache.
- * These fields and the "rc" fields in nfsdstats are modified
- * with only the per-bucket cache lock, which isn't really safe
- * and should be fixed if we want the statistics to be
- * completely accurate.
+ * The longest_chain* fields are modified with only the per-bucket
+ * cache lock, which isn't really safe and should be fixed if we want
+ * these statistics to be completely accurate.
*/
/* total number of entries */
atomic_t num_drc_entries;
- /* cache misses due only to checksum comparison failures */
- unsigned int payload_misses;
-
- /* amount of memory (in bytes) currently consumed by the DRC */
- unsigned int drc_mem_usage;
+ /* Per-netns stats counters */
+ struct percpu_counter counter[NFSD_NET_COUNTERS_NUM];
/* longest hash chain seen */
unsigned int longest_chain;
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index b0f66604532a..855e17772eba 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -113,10 +113,12 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst *rqstp)
fh_lock(fh);
- error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
+ error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS,
+ argp->acl_access);
if (error)
goto out_drop_lock;
- error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
+ error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_DEFAULT,
+ argp->acl_default);
if (error)
goto out_drop_lock;
@@ -188,63 +190,49 @@ out:
static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_getaclargs *argp = rqstp->rq_argp;
- p = nfs2svc_decode_fh(p, &argp->fh);
- if (!p)
+ if (!svcxdr_decode_fhandle(xdr, &argp->fh))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
return 0;
- argp->mask = ntohl(*p); p++;
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
-
static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_setaclargs *argp = rqstp->rq_argp;
- struct kvec *head = rqstp->rq_arg.head;
- unsigned int base;
- int n;
- p = nfs2svc_decode_fh(p, &argp->fh);
- if (!p)
+ if (!svcxdr_decode_fhandle(xdr, &argp->fh))
return 0;
- argp->mask = ntohl(*p++);
- if (argp->mask & ~NFS_ACL_MASK ||
- !xdr_argsize_check(rqstp, p))
+ if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
return 0;
-
- base = (char *)p - (char *)head->iov_base;
- n = nfsacl_decode(&rqstp->rq_arg, base, NULL,
- (argp->mask & NFS_ACL) ?
- &argp->acl_access : NULL);
- if (n > 0)
- n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL,
- (argp->mask & NFS_DFACL) ?
- &argp->acl_default : NULL);
- return (n > 0);
-}
-
-static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p)
-{
- struct nfsd_fhandle *argp = rqstp->rq_argp;
-
- p = nfs2svc_decode_fh(p, &argp->fh);
- if (!p)
+ if (argp->mask & ~NFS_ACL_MASK)
+ return 0;
+ if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_ACL) ?
+ &argp->acl_access : NULL))
return 0;
- return xdr_argsize_check(rqstp, p);
+ if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_DFACL) ?
+ &argp->acl_default : NULL))
+ return 0;
+
+ return 1;
}
static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p)
{
- struct nfsd3_accessargs *argp = rqstp->rq_argp;
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct nfsd3_accessargs *args = rqstp->rq_argp;
- p = nfs2svc_decode_fh(p, &argp->fh);
- if (!p)
+ if (!svcxdr_decode_fhandle(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->access) < 0)
return 0;
- argp->access = ntohl(*p++);
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
/*
@@ -371,6 +359,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
+ .pc_name = "NULL",
},
[ACLPROC2_GETACL] = {
.pc_func = nfsacld_proc_getacl,
@@ -381,6 +370,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_ressize = sizeof(struct nfsd3_getaclres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+2*(1+ACL),
+ .pc_name = "GETACL",
},
[ACLPROC2_SETACL] = {
.pc_func = nfsacld_proc_setacl,
@@ -391,16 +381,18 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
+ .pc_name = "SETACL",
},
[ACLPROC2_GETATTR] = {
.pc_func = nfsacld_proc_getattr,
- .pc_decode = nfsaclsvc_decode_fhandleargs,
+ .pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfsaclsvc_encode_attrstatres,
.pc_release = nfsaclsvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
+ .pc_name = "GETATTR",
},
[ACLPROC2_ACCESS] = {
.pc_func = nfsacld_proc_access,
@@ -411,6 +403,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_ressize = sizeof(struct nfsd3_accessres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT+1,
+ .pc_name = "SETATTR",
},
};
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 7c30876a31a1..9a6f18d74d14 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -103,10 +103,12 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst *rqstp)
fh_lock(fh);
- error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
+ error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS,
+ argp->acl_access);
if (error)
goto out_drop_lock;
- error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
+ error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_DEFAULT,
+ argp->acl_default);
out_drop_lock:
fh_unlock(fh);
@@ -124,43 +126,39 @@ out:
/*
* XDR decode functions
*/
+
static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_getaclargs *args = rqstp->rq_argp;
- p = nfs3svc_decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->mask) < 0)
return 0;
- args->mask = ntohl(*p); p++;
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
-
static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p)
{
- struct nfsd3_setaclargs *args = rqstp->rq_argp;
- struct kvec *head = rqstp->rq_arg.head;
- unsigned int base;
- int n;
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
+ struct nfsd3_setaclargs *argp = rqstp->rq_argp;
- p = nfs3svc_decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_nfs_fh3(xdr, &argp->fh))
return 0;
- args->mask = ntohl(*p++);
- if (args->mask & ~NFS_ACL_MASK ||
- !xdr_argsize_check(rqstp, p))
+ if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
+ return 0;
+ if (argp->mask & ~NFS_ACL_MASK)
+ return 0;
+ if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_ACL) ?
+ &argp->acl_access : NULL))
+ return 0;
+ if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_DFACL) ?
+ &argp->acl_default : NULL))
return 0;
- base = (char *)p - (char *)head->iov_base;
- n = nfsacl_decode(&rqstp->rq_arg, base, NULL,
- (args->mask & NFS_ACL) ?
- &args->acl_access : NULL);
- if (n > 0)
- n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL,
- (args->mask & NFS_DFACL) ?
- &args->acl_default : NULL);
- return (n > 0);
+ return 1;
}
/*
@@ -251,6 +249,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
+ .pc_name = "NULL",
},
[ACLPROC3_GETACL] = {
.pc_func = nfsd3_proc_getacl,
@@ -261,6 +260,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_ressize = sizeof(struct nfsd3_getaclres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+2*(1+ACL),
+ .pc_name = "GETACL",
},
[ACLPROC3_SETACL] = {
.pc_func = nfsd3_proc_setacl,
@@ -271,6 +271,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_ressize = sizeof(struct nfsd3_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT,
+ .pc_name = "SETACL",
},
};
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 76931f4f57c3..8675851199f8 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -124,15 +124,16 @@ nfsd3_proc_access(struct svc_rqst *rqstp)
static __be32
nfsd3_proc_readlink(struct svc_rqst *rqstp)
{
- struct nfsd3_readlinkargs *argp = rqstp->rq_argp;
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd3_readlinkres *resp = rqstp->rq_resp;
+ char *buffer = page_address(*(rqstp->rq_next_page++));
dprintk("nfsd: READLINK(3) %s\n", SVCFH_fmt(&argp->fh));
/* Read the symlink. */
fh_copy(&resp->fh, &argp->fh);
resp->len = NFS3_MAXPATHLEN;
- resp->status = nfsd_readlink(rqstp, &resp->fh, argp->buffer, &resp->len);
+ resp->status = nfsd_readlink(rqstp, &resp->fh, buffer, &resp->len);
return rpc_success;
}
@@ -144,25 +145,38 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
{
struct nfsd3_readargs *argp = rqstp->rq_argp;
struct nfsd3_readres *resp = rqstp->rq_resp;
- u32 max_blocksize = svc_max_payload(rqstp);
- unsigned long cnt = min(argp->count, max_blocksize);
+ u32 max_blocksize = svc_max_payload(rqstp);
+ unsigned int len;
+ int v;
+
+ argp->count = min_t(u32, argp->count, max_blocksize);
dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
SVCFH_fmt(&argp->fh),
(unsigned long) argp->count,
(unsigned long long) argp->offset);
+ v = 0;
+ len = argp->count;
+ while (len > 0) {
+ struct page *page = *(rqstp->rq_next_page++);
+
+ rqstp->rq_vec[v].iov_base = page_address(page);
+ rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE);
+ len -= rqstp->rq_vec[v].iov_len;
+ v++;
+ }
+
/* Obtain buffer pointer for payload.
* 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof)
* + 1 (xdr opaque byte count) = 26
*/
- resp->count = cnt;
+ resp->count = argp->count;
svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4);
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
- rqstp->rq_vec, argp->vlen, &resp->count,
- &resp->eof);
+ rqstp->rq_vec, v, &resp->count, &resp->eof);
return rpc_success;
}
@@ -421,6 +435,23 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
return rpc_success;
}
+static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
+ struct nfsd3_readdirres *resp,
+ int count)
+{
+ count = min_t(u32, count, svc_max_payload(rqstp));
+
+ /* Convert byte count to number of words (i.e. >> 2),
+ * and reserve room for the NULL ptr & eof flag (-2 words) */
+ resp->buflen = (count >> 2) - 2;
+
+ resp->buffer = page_address(*rqstp->rq_next_page);
+ while (count > 0) {
+ rqstp->rq_next_page++;
+ count -= PAGE_SIZE;
+ }
+}
+
/*
* Read a portion of a directory.
*/
@@ -430,6 +461,7 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
struct nfsd3_readdirargs *argp = rqstp->rq_argp;
struct nfsd3_readdirres *resp = rqstp->rq_resp;
int count = 0;
+ loff_t offset;
struct page **p;
caddr_t page_addr = NULL;
@@ -437,18 +469,16 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
SVCFH_fmt(&argp->fh),
argp->count, (u32) argp->cookie);
- /* Make sure we've room for the NULL ptr & eof flag, and shrink to
- * client read size */
- count = (argp->count >> 2) - 2;
+ nfsd3_init_dirlist_pages(rqstp, resp, argp->count);
/* Read directory and encode entries on the fly */
fh_copy(&resp->fh, &argp->fh);
- resp->buflen = count;
resp->common.err = nfs_ok;
- resp->buffer = argp->buffer;
resp->rqstp = rqstp;
- resp->status = nfsd_readdir(rqstp, &resp->fh, (loff_t *)&argp->cookie,
+ offset = argp->cookie;
+
+ resp->status = nfsd_readdir(rqstp, &resp->fh, &offset,
&resp->common, nfs3svc_encode_entry);
memcpy(resp->verf, argp->verf, 8);
count = 0;
@@ -464,8 +494,6 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
}
resp->count = count >> 2;
if (resp->offset) {
- loff_t offset = argp->cookie;
-
if (unlikely(resp->offset1)) {
/* we ended up with offset on a page boundary */
*resp->offset = htonl(offset >> 32);
@@ -498,16 +526,12 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
SVCFH_fmt(&argp->fh),
argp->count, (u32) argp->cookie);
- /* Convert byte count to number of words (i.e. >> 2),
- * and reserve room for the NULL ptr & eof flag (-2 words) */
- resp->count = (argp->count >> 2) - 2;
+ nfsd3_init_dirlist_pages(rqstp, resp, argp->count);
/* Read directory and encode entries on the fly */
fh_copy(&resp->fh, &argp->fh);
resp->common.err = nfs_ok;
- resp->buffer = argp->buffer;
- resp->buflen = resp->count;
resp->rqstp = rqstp;
offset = argp->cookie;
@@ -683,7 +707,6 @@ out:
* NFSv3 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
-#define nfs3svc_decode_fhandleargs nfs3svc_decode_fhandle
#define nfs3svc_encode_attrstatres nfs3svc_encode_attrstat
#define nfs3svc_encode_wccstatres nfs3svc_encode_wccstat
#define nfsd3_mkdirargs nfsd3_createargs
@@ -708,16 +731,18 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
+ .pc_name = "NULL",
},
[NFS3PROC_GETATTR] = {
.pc_func = nfsd3_proc_getattr,
.pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_attrstatres,
.pc_release = nfs3svc_release_fhandle,
- .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd3_attrstatres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
+ .pc_name = "GETATTR",
},
[NFS3PROC_SETATTR] = {
.pc_func = nfsd3_proc_setattr,
@@ -728,6 +753,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
+ .pc_name = "SETATTR",
},
[NFS3PROC_LOOKUP] = {
.pc_func = nfsd3_proc_lookup,
@@ -738,6 +764,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+pAT+pAT,
+ .pc_name = "LOOKUP",
},
[NFS3PROC_ACCESS] = {
.pc_func = nfsd3_proc_access,
@@ -748,16 +775,18 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_accessres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+1,
+ .pc_name = "ACCESS",
},
[NFS3PROC_READLINK] = {
.pc_func = nfsd3_proc_readlink,
- .pc_decode = nfs3svc_decode_readlinkargs,
+ .pc_decode = nfs3svc_decode_fhandleargs,
.pc_encode = nfs3svc_encode_readlinkres,
.pc_release = nfs3svc_release_fhandle,
- .pc_argsize = sizeof(struct nfsd3_readlinkargs),
+ .pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd3_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4,
+ .pc_name = "READLINK",
},
[NFS3PROC_READ] = {
.pc_func = nfsd3_proc_read,
@@ -768,6 +797,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_readres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4,
+ .pc_name = "READ",
},
[NFS3PROC_WRITE] = {
.pc_func = nfsd3_proc_write,
@@ -778,6 +808,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_writeres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC+4,
+ .pc_name = "WRITE",
},
[NFS3PROC_CREATE] = {
.pc_func = nfsd3_proc_create,
@@ -788,6 +819,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
+ .pc_name = "CREATE",
},
[NFS3PROC_MKDIR] = {
.pc_func = nfsd3_proc_mkdir,
@@ -798,6 +830,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
+ .pc_name = "MKDIR",
},
[NFS3PROC_SYMLINK] = {
.pc_func = nfsd3_proc_symlink,
@@ -808,6 +841,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
+ .pc_name = "SYMLINK",
},
[NFS3PROC_MKNOD] = {
.pc_func = nfsd3_proc_mknod,
@@ -818,6 +852,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_createres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+(1+FH+pAT)+WC,
+ .pc_name = "MKNOD",
},
[NFS3PROC_REMOVE] = {
.pc_func = nfsd3_proc_remove,
@@ -828,6 +863,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
+ .pc_name = "REMOVE",
},
[NFS3PROC_RMDIR] = {
.pc_func = nfsd3_proc_rmdir,
@@ -838,6 +874,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_wccstatres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC,
+ .pc_name = "RMDIR",
},
[NFS3PROC_RENAME] = {
.pc_func = nfsd3_proc_rename,
@@ -848,6 +885,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_renameres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+WC+WC,
+ .pc_name = "RENAME",
},
[NFS3PROC_LINK] = {
.pc_func = nfsd3_proc_link,
@@ -858,6 +896,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_linkres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+pAT+WC,
+ .pc_name = "LINK",
},
[NFS3PROC_READDIR] = {
.pc_func = nfsd3_proc_readdir,
@@ -867,6 +906,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_argsize = sizeof(struct nfsd3_readdirargs),
.pc_ressize = sizeof(struct nfsd3_readdirres),
.pc_cachetype = RC_NOCACHE,
+ .pc_name = "READDIR",
},
[NFS3PROC_READDIRPLUS] = {
.pc_func = nfsd3_proc_readdirplus,
@@ -876,6 +916,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_argsize = sizeof(struct nfsd3_readdirplusargs),
.pc_ressize = sizeof(struct nfsd3_readdirres),
.pc_cachetype = RC_NOCACHE,
+ .pc_name = "READDIRPLUS",
},
[NFS3PROC_FSSTAT] = {
.pc_func = nfsd3_proc_fsstat,
@@ -885,6 +926,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_fsstatres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+2*6+1,
+ .pc_name = "FSSTAT",
},
[NFS3PROC_FSINFO] = {
.pc_func = nfsd3_proc_fsinfo,
@@ -894,6 +936,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_fsinfores),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+12,
+ .pc_name = "FSINFO",
},
[NFS3PROC_PATHCONF] = {
.pc_func = nfsd3_proc_pathconf,
@@ -903,6 +946,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_pathconfres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT+6,
+ .pc_name = "PATHCONF",
},
[NFS3PROC_COMMIT] = {
.pc_func = nfsd3_proc_commit,
@@ -913,6 +957,7 @@ static const struct svc_procedure nfsd_procedures3[22] = {
.pc_ressize = sizeof(struct nfsd3_commitres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+WC+2,
+ .pc_name = "COMMIT",
},
};
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 34b880211e5e..9d9a01ce0b27 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -29,8 +29,9 @@ static u32 nfs3_ftypes[] = {
/*
- * XDR functions for basic NFS types
+ * Basic NFSv3 data types (RFC 1813 Sections 2.5 and 2.6)
*/
+
static __be32 *
encode_time3(__be32 *p, struct timespec64 *time)
{
@@ -38,32 +39,47 @@ encode_time3(__be32 *p, struct timespec64 *time)
return p;
}
-static __be32 *
-decode_time3(__be32 *p, struct timespec64 *time)
+static bool
+svcxdr_decode_nfstime3(struct xdr_stream *xdr, struct timespec64 *timep)
{
- time->tv_sec = ntohl(*p++);
- time->tv_nsec = ntohl(*p++);
- return p;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+ if (!p)
+ return false;
+ timep->tv_sec = be32_to_cpup(p++);
+ timep->tv_nsec = be32_to_cpup(p);
+
+ return true;
}
-static __be32 *
-decode_fh(__be32 *p, struct svc_fh *fhp)
+/**
+ * svcxdr_decode_nfs_fh3 - Decode an NFSv3 file handle
+ * @xdr: XDR stream positioned at an undecoded NFSv3 FH
+ * @fhp: OUT: filled-in server file handle
+ *
+ * Return values:
+ * %false: The encoded file handle was not valid
+ * %true: @fhp has been initialized
+ */
+bool
+svcxdr_decode_nfs_fh3(struct xdr_stream *xdr, struct svc_fh *fhp)
{
- unsigned int size;
+ __be32 *p;
+ u32 size;
+
+ if (xdr_stream_decode_u32(xdr, &size) < 0)
+ return false;
+ if (size == 0 || size > NFS3_FHSIZE)
+ return false;
+ p = xdr_inline_decode(xdr, size);
+ if (!p)
+ return false;
fh_init(fhp, NFS3_FHSIZE);
- size = ntohl(*p++);
- if (size > NFS3_FHSIZE)
- return NULL;
-
- memcpy(&fhp->fh_handle.fh_base, p, size);
fhp->fh_handle.fh_size = size;
- return p + XDR_QUADLEN(size);
-}
+ memcpy(&fhp->fh_handle.fh_base, p, size);
-/* Helper function for NFSv3 ACL code */
-__be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp)
-{
- return decode_fh(p, fhp);
+ return true;
}
static __be32 *
@@ -76,69 +92,165 @@ encode_fh(__be32 *p, struct svc_fh *fhp)
return p + XDR_QUADLEN(size);
}
-/*
- * Decode a file name and make sure that the path contains
- * no slashes or null bytes.
- */
-static __be32 *
-decode_filename(__be32 *p, char **namp, unsigned int *lenp)
+static bool
+svcxdr_decode_filename3(struct xdr_stream *xdr, char **name, unsigned int *len)
{
- char *name;
- unsigned int i;
+ u32 size, i;
+ __be32 *p;
+ char *c;
+
+ if (xdr_stream_decode_u32(xdr, &size) < 0)
+ return false;
+ if (size == 0 || size > NFS3_MAXNAMLEN)
+ return false;
+ p = xdr_inline_decode(xdr, size);
+ if (!p)
+ return false;
- if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS3_MAXNAMLEN)) != NULL) {
- for (i = 0, name = *namp; i < *lenp; i++, name++) {
- if (*name == '\0' || *name == '/')
- return NULL;
- }
+ *len = size;
+ *name = (char *)p;
+ for (i = 0, c = *name; i < size; i++, c++) {
+ if (*c == '\0' || *c == '/')
+ return false;
}
- return p;
+ return true;
}
-static __be32 *
-decode_sattr3(__be32 *p, struct iattr *iap, struct user_namespace *userns)
+static bool
+svcxdr_decode_diropargs3(struct xdr_stream *xdr, struct svc_fh *fhp,
+ char **name, unsigned int *len)
+{
+ return svcxdr_decode_nfs_fh3(xdr, fhp) &&
+ svcxdr_decode_filename3(xdr, name, len);
+}
+
+static bool
+svcxdr_decode_sattr3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ struct iattr *iap)
{
- u32 tmp;
+ u32 set_it;
iap->ia_valid = 0;
- if (*p++) {
+ if (xdr_stream_decode_bool(xdr, &set_it) < 0)
+ return false;
+ if (set_it) {
+ u32 mode;
+
+ if (xdr_stream_decode_u32(xdr, &mode) < 0)
+ return false;
iap->ia_valid |= ATTR_MODE;
- iap->ia_mode = ntohl(*p++);
+ iap->ia_mode = mode;
}
- if (*p++) {
- iap->ia_uid = make_kuid(userns, ntohl(*p++));
+ if (xdr_stream_decode_bool(xdr, &set_it) < 0)
+ return false;
+ if (set_it) {
+ u32 uid;
+
+ if (xdr_stream_decode_u32(xdr, &uid) < 0)
+ return false;
+ iap->ia_uid = make_kuid(nfsd_user_namespace(rqstp), uid);
if (uid_valid(iap->ia_uid))
iap->ia_valid |= ATTR_UID;
}
- if (*p++) {
- iap->ia_gid = make_kgid(userns, ntohl(*p++));
+ if (xdr_stream_decode_bool(xdr, &set_it) < 0)
+ return false;
+ if (set_it) {
+ u32 gid;
+
+ if (xdr_stream_decode_u32(xdr, &gid) < 0)
+ return false;
+ iap->ia_gid = make_kgid(nfsd_user_namespace(rqstp), gid);
if (gid_valid(iap->ia_gid))
iap->ia_valid |= ATTR_GID;
}
- if (*p++) {
- u64 newsize;
+ if (xdr_stream_decode_bool(xdr, &set_it) < 0)
+ return false;
+ if (set_it) {
+ u64 newsize;
+ if (xdr_stream_decode_u64(xdr, &newsize) < 0)
+ return false;
iap->ia_valid |= ATTR_SIZE;
- p = xdr_decode_hyper(p, &newsize);
iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX);
}
- if ((tmp = ntohl(*p++)) == 1) { /* set to server time */
+ if (xdr_stream_decode_u32(xdr, &set_it) < 0)
+ return false;
+ switch (set_it) {
+ case DONT_CHANGE:
+ break;
+ case SET_TO_SERVER_TIME:
iap->ia_valid |= ATTR_ATIME;
- } else if (tmp == 2) { /* set to client time */
+ break;
+ case SET_TO_CLIENT_TIME:
+ if (!svcxdr_decode_nfstime3(xdr, &iap->ia_atime))
+ return false;
iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
- iap->ia_atime.tv_sec = ntohl(*p++);
- iap->ia_atime.tv_nsec = ntohl(*p++);
+ break;
+ default:
+ return false;
}
- if ((tmp = ntohl(*p++)) == 1) { /* set to server time */
+ if (xdr_stream_decode_u32(xdr, &set_it) < 0)
+ return false;
+ switch (set_it) {
+ case DONT_CHANGE:
+ break;
+ case SET_TO_SERVER_TIME:
iap->ia_valid |= ATTR_MTIME;
- } else if (tmp == 2) { /* set to client time */
+ break;
+ case SET_TO_CLIENT_TIME:
+ if (!svcxdr_decode_nfstime3(xdr, &iap->ia_mtime))
+ return false;
iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
- iap->ia_mtime.tv_sec = ntohl(*p++);
- iap->ia_mtime.tv_nsec = ntohl(*p++);
+ break;
+ default:
+ return false;
}
- return p;
+
+ return true;
+}
+
+static bool
+svcxdr_decode_sattrguard3(struct xdr_stream *xdr, struct nfsd3_sattrargs *args)
+{
+ __be32 *p;
+ u32 check;
+
+ if (xdr_stream_decode_bool(xdr, &check) < 0)
+ return false;
+ if (check) {
+ p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+ if (!p)
+ return false;
+ args->check_guard = 1;
+ args->guardtime = be32_to_cpup(p);
+ } else
+ args->check_guard = 0;
+
+ return true;
+}
+
+static bool
+svcxdr_decode_specdata3(struct xdr_stream *xdr, struct nfsd3_mknodargs *args)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+ if (!p)
+ return false;
+ args->major = be32_to_cpup(p++);
+ args->minor = be32_to_cpup(p);
+
+ return true;
+}
+
+static bool
+svcxdr_decode_devicedata3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfsd3_mknodargs *args)
+{
+ return svcxdr_decode_sattr3(rqstp, xdr, &args->attrs) &&
+ svcxdr_decode_specdata3(xdr, args);
}
static __be32 *encode_fsid(__be32 *p, struct svc_fh *fhp)
@@ -252,6 +364,11 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
return encode_post_op_attr(rqstp, p, fhp);
}
+static bool fs_supports_change_attribute(struct super_block *sb)
+{
+ return sb->s_flags & SB_I_VERSION || sb->s_export_op->fetch_iversion;
+}
+
/*
* Fill in the pre_op attr for the wcc data
*/
@@ -260,24 +377,26 @@ void fill_pre_wcc(struct svc_fh *fhp)
struct inode *inode;
struct kstat stat;
bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
- __be32 err;
if (fhp->fh_no_wcc || fhp->fh_pre_saved)
return;
inode = d_inode(fhp->fh_dentry);
- err = fh_getattr(fhp, &stat);
- if (err) {
- /* Grab the times from inode anyway */
- stat.mtime = inode->i_mtime;
- stat.ctime = inode->i_ctime;
- stat.size = inode->i_size;
+ if (fs_supports_change_attribute(inode->i_sb) || !v4) {
+ __be32 err = fh_getattr(fhp, &stat);
+
+ if (err) {
+ /* Grab the times from inode anyway */
+ stat.mtime = inode->i_mtime;
+ stat.ctime = inode->i_ctime;
+ stat.size = inode->i_size;
+ }
+ fhp->fh_pre_mtime = stat.mtime;
+ fhp->fh_pre_ctime = stat.ctime;
+ fhp->fh_pre_size = stat.size;
}
if (v4)
fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
- fhp->fh_pre_mtime = stat.mtime;
- fhp->fh_pre_ctime = stat.ctime;
- fhp->fh_pre_size = stat.size;
fhp->fh_pre_saved = true;
}
@@ -288,7 +407,6 @@ void fill_post_wcc(struct svc_fh *fhp)
{
bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
struct inode *inode = d_inode(fhp->fh_dentry);
- __be32 err;
if (fhp->fh_no_wcc)
return;
@@ -296,12 +414,16 @@ void fill_post_wcc(struct svc_fh *fhp)
if (fhp->fh_post_saved)
printk("nfsd: inode locked twice during operation.\n");
- err = fh_getattr(fhp, &fhp->fh_post_attr);
- if (err) {
- fhp->fh_post_saved = false;
- fhp->fh_post_attr.ctime = inode->i_ctime;
- } else
- fhp->fh_post_saved = true;
+ fhp->fh_post_saved = true;
+
+ if (fs_supports_change_attribute(inode->i_sb) || !v4) {
+ __be32 err = fh_getattr(fhp, &fhp->fh_post_attr);
+
+ if (err) {
+ fhp->fh_post_saved = false;
+ fhp->fh_post_attr.ctime = inode->i_ctime;
+ }
+ }
if (v4)
fhp->fh_post_change =
nfsd4_change_attribute(&fhp->fh_post_attr, inode);
@@ -312,331 +434,277 @@ void fill_post_wcc(struct svc_fh *fhp)
*/
int
-nfs3svc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p)
+nfs3svc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_fhandle *args = rqstp->rq_argp;
- p = decode_fh(p, &args->fh);
- if (!p)
- return 0;
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_nfs_fh3(xdr, &args->fh);
}
int
nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_sattrargs *args = rqstp->rq_argp;
- p = decode_fh(p, &args->fh);
- if (!p)
- return 0;
- p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
-
- if ((args->check_guard = ntohl(*p++)) != 0) {
- struct timespec64 time;
- p = decode_time3(p, &time);
- args->guardtime = time.tv_sec;
- }
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_nfs_fh3(xdr, &args->fh) &&
+ svcxdr_decode_sattr3(rqstp, xdr, &args->attrs) &&
+ svcxdr_decode_sattrguard3(xdr, args);
}
int
nfs3svc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_diropargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->fh))
- || !(p = decode_filename(p, &args->name, &args->len)))
- return 0;
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_diropargs3(xdr, &args->fh, &args->name, &args->len);
}
int
nfs3svc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_accessargs *args = rqstp->rq_argp;
- p = decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->access) < 0)
return 0;
- args->access = ntohl(*p++);
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_readargs *args = rqstp->rq_argp;
- unsigned int len;
- int v;
- u32 max_blocksize = svc_max_payload(rqstp);
- p = decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u64(xdr, &args->offset) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return 0;
- p = xdr_decode_hyper(p, &args->offset);
-
- args->count = ntohl(*p++);
- len = min(args->count, max_blocksize);
-
- /* set up the kvec */
- v=0;
- while (len > 0) {
- struct page *p = *(rqstp->rq_next_page++);
- rqstp->rq_vec[v].iov_base = page_address(p);
- rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE);
- len -= rqstp->rq_vec[v].iov_len;
- v++;
- }
- args->vlen = v;
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_writeargs *args = rqstp->rq_argp;
- unsigned int len, hdr, dlen;
u32 max_blocksize = svc_max_payload(rqstp);
struct kvec *head = rqstp->rq_arg.head;
struct kvec *tail = rqstp->rq_arg.tail;
+ size_t remaining;
- p = decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return 0;
- p = xdr_decode_hyper(p, &args->offset);
-
- args->count = ntohl(*p++);
- args->stable = ntohl(*p++);
- len = args->len = ntohl(*p++);
- if ((void *)p > head->iov_base + head->iov_len)
+ if (xdr_stream_decode_u64(xdr, &args->offset) < 0)
return 0;
- /*
- * The count must equal the amount of data passed.
- */
- if (args->count != args->len)
+ if (xdr_stream_decode_u32(xdr, &args->count) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->stable) < 0)
return 0;
- /*
- * Check to make sure that we got the right number of
- * bytes.
- */
- hdr = (void*)p - head->iov_base;
- dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
- /*
- * Round the length of the data which was specified up to
- * the next multiple of XDR units and then compare that
- * against the length which was actually received.
- * Note that when RPCSEC/GSS (for example) is used, the
- * data buffer can be padded so dlen might be larger
- * than required. It must never be smaller.
- */
- if (dlen < XDR_QUADLEN(len)*4)
+ /* opaque data */
+ if (xdr_stream_decode_u32(xdr, &args->len) < 0)
return 0;
+ /* request sanity */
+ if (args->count != args->len)
+ return 0;
+ remaining = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len;
+ remaining -= xdr_stream_pos(xdr);
+ if (remaining < xdr_align_size(args->len))
+ return 0;
if (args->count > max_blocksize) {
args->count = max_blocksize;
- len = args->len = max_blocksize;
+ args->len = max_blocksize;
}
- args->first.iov_base = (void *)p;
- args->first.iov_len = head->iov_len - hdr;
+ args->first.iov_base = xdr->p;
+ args->first.iov_len = head->iov_len - xdr_stream_pos(xdr);
+
return 1;
}
int
nfs3svc_decode_createargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_createargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->fh))
- || !(p = decode_filename(p, &args->name, &args->len)))
+ if (!svcxdr_decode_diropargs3(xdr, &args->fh, &args->name, &args->len))
return 0;
-
- switch (args->createmode = ntohl(*p++)) {
+ if (xdr_stream_decode_u32(xdr, &args->createmode) < 0)
+ return 0;
+ switch (args->createmode) {
case NFS3_CREATE_UNCHECKED:
case NFS3_CREATE_GUARDED:
- p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
- break;
+ return svcxdr_decode_sattr3(rqstp, xdr, &args->attrs);
case NFS3_CREATE_EXCLUSIVE:
- args->verf = p;
- p += 2;
+ args->verf = xdr_inline_decode(xdr, NFS3_CREATEVERFSIZE);
+ if (!args->verf)
+ return 0;
break;
default:
return 0;
}
-
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nfs3svc_decode_mkdirargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_createargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->fh)) ||
- !(p = decode_filename(p, &args->name, &args->len)))
- return 0;
- p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_diropargs3(xdr, &args->fh,
+ &args->name, &args->len) &&
+ svcxdr_decode_sattr3(rqstp, xdr, &args->attrs);
}
int
nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_symlinkargs *args = rqstp->rq_argp;
- char *base = (char *)p;
- size_t dlen;
+ struct kvec *head = rqstp->rq_arg.head;
+ struct kvec *tail = rqstp->rq_arg.tail;
+ size_t remaining;
- if (!(p = decode_fh(p, &args->ffh)) ||
- !(p = decode_filename(p, &args->fname, &args->flen)))
+ if (!svcxdr_decode_diropargs3(xdr, &args->ffh, &args->fname, &args->flen))
+ return 0;
+ if (!svcxdr_decode_sattr3(rqstp, xdr, &args->attrs))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->tlen) < 0)
return 0;
- p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
- args->tlen = ntohl(*p++);
+ /* request sanity */
+ remaining = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len;
+ remaining -= xdr_stream_pos(xdr);
+ if (remaining < xdr_align_size(args->tlen))
+ return 0;
- args->first.iov_base = p;
- args->first.iov_len = rqstp->rq_arg.head[0].iov_len;
- args->first.iov_len -= (char *)p - base;
+ args->first.iov_base = xdr->p;
+ args->first.iov_len = head->iov_len - xdr_stream_pos(xdr);
- dlen = args->first.iov_len + rqstp->rq_arg.page_len +
- rqstp->rq_arg.tail[0].iov_len;
- if (dlen < XDR_QUADLEN(args->tlen) << 2)
- return 0;
return 1;
}
int
nfs3svc_decode_mknodargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_mknodargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->fh))
- || !(p = decode_filename(p, &args->name, &args->len)))
+ if (!svcxdr_decode_diropargs3(xdr, &args->fh, &args->name, &args->len))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->ftype) < 0)
+ return 0;
+ switch (args->ftype) {
+ case NF3CHR:
+ case NF3BLK:
+ return svcxdr_decode_devicedata3(rqstp, xdr, args);
+ case NF3SOCK:
+ case NF3FIFO:
+ return svcxdr_decode_sattr3(rqstp, xdr, &args->attrs);
+ case NF3REG:
+ case NF3DIR:
+ case NF3LNK:
+ /* Valid XDR but illegal file types */
+ break;
+ default:
return 0;
-
- args->ftype = ntohl(*p++);
-
- if (args->ftype == NF3BLK || args->ftype == NF3CHR
- || args->ftype == NF3SOCK || args->ftype == NF3FIFO)
- p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
-
- if (args->ftype == NF3BLK || args->ftype == NF3CHR) {
- args->major = ntohl(*p++);
- args->minor = ntohl(*p++);
}
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nfs3svc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_renameargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->ffh))
- || !(p = decode_filename(p, &args->fname, &args->flen))
- || !(p = decode_fh(p, &args->tfh))
- || !(p = decode_filename(p, &args->tname, &args->tlen)))
- return 0;
-
- return xdr_argsize_check(rqstp, p);
-}
-
-int
-nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p)
-{
- struct nfsd3_readlinkargs *args = rqstp->rq_argp;
-
- p = decode_fh(p, &args->fh);
- if (!p)
- return 0;
- args->buffer = page_address(*(rqstp->rq_next_page++));
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_diropargs3(xdr, &args->ffh,
+ &args->fname, &args->flen) &&
+ svcxdr_decode_diropargs3(xdr, &args->tfh,
+ &args->tname, &args->tlen);
}
int
nfs3svc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_linkargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->ffh))
- || !(p = decode_fh(p, &args->tfh))
- || !(p = decode_filename(p, &args->tname, &args->tlen)))
- return 0;
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_nfs_fh3(xdr, &args->ffh) &&
+ svcxdr_decode_diropargs3(xdr, &args->tfh,
+ &args->tname, &args->tlen);
}
int
nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_readdirargs *args = rqstp->rq_argp;
- int len;
- u32 max_blocksize = svc_max_payload(rqstp);
- p = decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u64(xdr, &args->cookie) < 0)
+ return 0;
+ args->verf = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
+ if (!args->verf)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return 0;
- p = xdr_decode_hyper(p, &args->cookie);
- args->verf = p; p += 2;
- args->dircount = ~0;
- args->count = ntohl(*p++);
- len = args->count = min_t(u32, args->count, max_blocksize);
-
- while (len > 0) {
- struct page *p = *(rqstp->rq_next_page++);
- if (!args->buffer)
- args->buffer = page_address(p);
- len -= PAGE_SIZE;
- }
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_readdirargs *args = rqstp->rq_argp;
- int len;
- u32 max_blocksize = svc_max_payload(rqstp);
+ u32 dircount;
- p = decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u64(xdr, &args->cookie) < 0)
+ return 0;
+ args->verf = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
+ if (!args->verf)
+ return 0;
+ /* dircount is ignored */
+ if (xdr_stream_decode_u32(xdr, &dircount) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return 0;
- p = xdr_decode_hyper(p, &args->cookie);
- args->verf = p; p += 2;
- args->dircount = ntohl(*p++);
- args->count = ntohl(*p++);
-
- len = args->count = min(args->count, max_blocksize);
- while (len > 0) {
- struct page *p = *(rqstp->rq_next_page++);
- if (!args->buffer)
- args->buffer = page_address(p);
- len -= PAGE_SIZE;
- }
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nfs3svc_decode_commitargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_commitargs *args = rqstp->rq_argp;
- p = decode_fh(p, &args->fh);
- if (!p)
+
+ if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u64(xdr, &args->offset) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return 0;
- p = xdr_decode_hyper(p, &args->offset);
- args->count = ntohl(*p++);
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
/*
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 71292a0d6f09..eaa3a0cf38f1 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -781,12 +781,13 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
fh_lock(fhp);
- host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
+ host_error = set_posix_acl(&init_user_ns, inode, ACL_TYPE_ACCESS, pacl);
if (host_error < 0)
goto out_drop_lock;
if (S_ISDIR(inode->i_mode)) {
- host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
+ host_error = set_posix_acl(&init_user_ns, inode,
+ ACL_TYPE_DEFAULT, dpacl);
}
out_drop_lock:
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 052be5bf9ef5..7325592b456e 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1189,6 +1189,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
switch (task->tk_status) {
case -EIO:
case -ETIMEDOUT:
+ case -EACCES:
nfsd4_mark_cb_down(clp, task->tk_status);
}
break;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 8d6d2678abad..dd9f38d072dd 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -378,8 +378,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* Before RECLAIM_COMPLETE done, server should deny new lock
*/
if (nfsd4_has_session(cstate) &&
- !test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
- &cstate->session->se_client->cl_flags) &&
+ !test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags) &&
open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_grace;
@@ -428,8 +427,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
- status = nfs4_check_open_reclaim(&open->op_clientid,
- cstate, nn);
+ status = nfs4_check_open_reclaim(cstate->clp);
if (status)
goto out;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
@@ -1304,7 +1302,7 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
struct nfsd_file *dst)
{
nfs42_ssc_close(src->nf_file);
- /* 'src' is freed by nfsd4_do_async_copy */
+ fput(src->nf_file);
nfsd_file_put(dst);
mntput(ss_mnt);
}
@@ -1888,7 +1886,7 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
nfserr = nfs_ok;
if (gdp->gd_maxcount != 0) {
nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb,
- rqstp, cstate->session->se_client, gdp);
+ rqstp, cstate->clp, gdp);
}
gdp->gd_notify_types &= ops->notify_types;
@@ -2174,7 +2172,7 @@ nfsd4_proc_null(struct svc_rqst *rqstp)
static inline void nfsd4_increment_op_stats(u32 opnum)
{
if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
- nfsdstats.nfs4_opcount[opnum]++;
+ percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]);
}
static const struct nfsd4_operation nfsd4_ops[];
@@ -3305,6 +3303,7 @@ static const struct svc_procedure nfsd_procedures4[2] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 1,
+ .pc_name = "NULL",
},
[NFSPROC4_COMPOUND] = {
.pc_func = nfsd4_proc_compound,
@@ -3315,6 +3314,7 @@ static const struct svc_procedure nfsd_procedures4[2] = {
.pc_release = nfsd4_release_compoundargs,
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = NFSD_BUFSIZE/4,
+ .pc_name = "COMPOUND",
},
};
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 186fa2c2c6ba..891395c6c7d3 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -233,7 +233,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
* as well be forgiving and just succeed silently.
*/
goto out_put;
- status = vfs_mkdir(d_inode(dir), dentry, S_IRWXU);
+ status = vfs_mkdir(&init_user_ns, d_inode(dir), dentry, S_IRWXU);
out_put:
dput(dentry);
out_unlock:
@@ -353,7 +353,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen, struct nfsd_net *nn)
status = -ENOENT;
if (d_really_is_negative(dentry))
goto out;
- status = vfs_rmdir(d_inode(dir), dentry);
+ status = vfs_rmdir(&init_user_ns, d_inode(dir), dentry);
out:
dput(dentry);
out_unlock:
@@ -443,7 +443,7 @@ purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
if (nfs4_has_reclaimed_state(name, nn))
goto out_free;
- status = vfs_rmdir(d_inode(parent), child);
+ status = vfs_rmdir(&init_user_ns, d_inode(parent), child);
if (status)
printk("failed to remove client recovery directory %pd\n",
child);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1d2cd6a88f61..97447a64bad0 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3891,6 +3891,7 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
+ struct nfs4_client *clp = cstate->clp;
__be32 status = 0;
if (rc->rca_one_fs) {
@@ -3904,12 +3905,11 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp,
}
status = nfserr_complete_already;
- if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
- &cstate->session->se_client->cl_flags))
+ if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
goto out;
status = nfserr_stale_clientid;
- if (is_client_expired(cstate->session->se_client))
+ if (is_client_expired(clp))
/*
* The following error isn't really legal.
* But we only get here if the client just explicitly
@@ -3920,8 +3920,8 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp,
goto out;
status = nfs_ok;
- nfsd4_client_record_create(cstate->session->se_client);
- inc_reclaim_complete(cstate->session->se_client);
+ nfsd4_client_record_create(clp);
+ inc_reclaim_complete(clp);
out:
return status;
}
@@ -4633,40 +4633,37 @@ static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4
return nfserr_bad_seqid;
}
-static __be32 lookup_clientid(clientid_t *clid,
- struct nfsd4_compound_state *cstate,
- struct nfsd_net *nn,
- bool sessions)
+static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
+ struct nfsd_net *nn)
{
struct nfs4_client *found;
+ spin_lock(&nn->client_lock);
+ found = find_confirmed_client(clid, sessions, nn);
+ if (found)
+ atomic_inc(&found->cl_rpc_users);
+ spin_unlock(&nn->client_lock);
+ return found;
+}
+
+static __be32 set_client(clientid_t *clid,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd_net *nn)
+{
if (cstate->clp) {
- found = cstate->clp;
- if (!same_clid(&found->cl_clientid, clid))
+ if (!same_clid(&cstate->clp->cl_clientid, clid))
return nfserr_stale_clientid;
return nfs_ok;
}
-
if (STALE_CLIENTID(clid, nn))
return nfserr_stale_clientid;
-
/*
- * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
- * cached already then we know this is for is for v4.0 and "sessions"
- * will be false.
+ * We're in the 4.0 case (otherwise the SEQUENCE op would have
+ * set cstate->clp), so session = false:
*/
- WARN_ON_ONCE(cstate->session);
- spin_lock(&nn->client_lock);
- found = find_confirmed_client(clid, sessions, nn);
- if (!found) {
- spin_unlock(&nn->client_lock);
+ cstate->clp = lookup_clientid(clid, false, nn);
+ if (!cstate->clp)
return nfserr_expired;
- }
- atomic_inc(&found->cl_rpc_users);
- spin_unlock(&nn->client_lock);
-
- /* Cache the nfs4_client in cstate! */
- cstate->clp = found;
return nfs_ok;
}
@@ -4680,8 +4677,6 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
struct nfs4_openowner *oo = NULL;
__be32 status;
- if (STALE_CLIENTID(&open->op_clientid, nn))
- return nfserr_stale_clientid;
/*
* In case we need it later, after we've already created the
* file and don't want to risk a further failure:
@@ -4690,7 +4685,7 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
if (open->op_file == NULL)
return nfserr_jukebox;
- status = lookup_clientid(clientid, cstate, nn, false);
+ status = set_client(clientid, cstate, nn);
if (status)
return status;
clp = cstate->clp;
@@ -4945,31 +4940,6 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
return fl;
}
-static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
- struct nfs4_file *fp)
-{
- struct nfs4_clnt_odstate *co;
- struct file *f = fp->fi_deleg_file->nf_file;
- struct inode *ino = locks_inode(f);
- int writes = atomic_read(&ino->i_writecount);
-
- if (fp->fi_fds[O_WRONLY])
- writes--;
- if (fp->fi_fds[O_RDWR])
- writes--;
- if (writes > 0)
- return -EAGAIN;
- spin_lock(&fp->fi_lock);
- list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
- if (co->co_client != clp) {
- spin_unlock(&fp->fi_lock);
- return -EAGAIN;
- }
- }
- spin_unlock(&fp->fi_lock);
- return 0;
-}
-
static struct nfs4_delegation *
nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
@@ -4989,12 +4959,9 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
nf = find_readable_file(fp);
if (!nf) {
- /*
- * We probably could attempt another open and get a read
- * delegation, but for now, don't bother until the
- * client actually sends us one.
- */
- return ERR_PTR(-EAGAIN);
+ /* We should always have a readable file here */
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EBADF);
}
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
@@ -5024,19 +4991,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
if (!fl)
goto out_clnt_odstate;
- status = nfsd4_check_conflicting_opens(clp, fp);
- if (status) {
- locks_free_lock(fl);
- goto out_clnt_odstate;
- }
status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
if (fl)
locks_free_lock(fl);
if (status)
goto out_clnt_odstate;
- status = nfsd4_check_conflicting_opens(clp, fp);
- if (status)
- goto out_clnt_odstate;
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
@@ -5118,6 +5077,17 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
goto out_no_deleg;
if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
goto out_no_deleg;
+ /*
+ * Also, if the file was opened for write or
+ * create, there's a good chance the client's
+ * about to write to it, resulting in an
+ * immediate recall (since we don't support
+ * write delegations):
+ */
+ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
+ goto out_no_deleg;
+ if (open->op_create == NFS4_OPEN_CREATE)
+ goto out_no_deleg;
break;
default:
goto out_no_deleg;
@@ -5300,17 +5270,14 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
trace_nfsd_clid_renew(clid);
- status = lookup_clientid(clid, cstate, nn, false);
+ status = set_client(clid, cstate, nn);
if (status)
- goto out;
+ return status;
clp = cstate->clp;
- status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
&& clp->cl_cb_state != NFSD4_CB_UP)
- goto out;
- status = nfs_ok;
-out:
- return status;
+ return nfserr_cb_path_down;
+ return nfs_ok;
}
void
@@ -5397,7 +5364,7 @@ nfs4_laundromat(struct nfsd_net *nn)
idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
- cps->cpntf_time > cutoff)
+ cps->cpntf_time < cutoff)
_free_cpntf_state_locked(nn, cps);
}
spin_unlock(&nn->s2s_cp_lock);
@@ -5686,8 +5653,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return nfserr_bad_stateid;
- status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn,
- false);
+ status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
if (cstate->session)
return nfserr_bad_stateid;
@@ -5818,21 +5784,27 @@ static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
{
__be32 status;
struct nfs4_cpntf_state *cps = NULL;
- struct nfsd4_compound_state cstate;
+ struct nfs4_client *found;
status = manage_cpntf_state(nn, st, NULL, &cps);
if (status)
return status;
cps->cpntf_time = ktime_get_boottime_seconds();
- memset(&cstate, 0, sizeof(cstate));
- status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true);
- if (status)
+
+ status = nfserr_expired;
+ found = lookup_clientid(&cps->cp_p_clid, true, nn);
+ if (!found)
goto out;
- status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid,
- NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
- stid, nn);
- put_client_renew(cstate.clp);
+
+ *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
+ NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
+ if (*stid)
+ status = nfs_ok;
+ else
+ status = nfserr_bad_stateid;
+
+ put_client_renew(found);
out:
nfs4_put_cpntf_state(nn, cps);
return status;
@@ -5921,7 +5893,7 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
struct nfsd4_test_stateid_id *stateid;
- struct nfs4_client *cl = cstate->session->se_client;
+ struct nfs4_client *cl = cstate->clp;
list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
stateid->ts_id_status =
@@ -5967,7 +5939,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stid *s;
struct nfs4_delegation *dp;
- struct nfs4_client *cl = cstate->session->se_client;
+ struct nfs4_client *cl = cstate->clp;
__be32 ret = nfserr_bad_stateid;
spin_lock(&cl->cl_lock);
@@ -6696,13 +6668,9 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (nfsd4_has_session(cstate))
/* See rfc 5661 18.10.3: given clientid is ignored: */
memcpy(&lock->lk_new_clientid,
- &cstate->session->se_client->cl_clientid,
+ &cstate->clp->cl_clientid,
sizeof(clientid_t));
- status = nfserr_stale_clientid;
- if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
- goto out;
-
/* validate and update open stateid and open seqid */
status = nfs4_preprocess_confirmed_seqid_op(cstate,
lock->lk_new_open_seqid,
@@ -6909,8 +6877,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_inval;
if (!nfsd4_has_session(cstate)) {
- status = lookup_clientid(&lockt->lt_clientid, cstate, nn,
- false);
+ status = set_client(&lockt->lt_clientid, cstate, nn);
if (status)
goto out;
}
@@ -7094,7 +7061,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
- status = lookup_clientid(clid, cstate, nn, false);
+ status = set_client(clid, cstate, nn);
if (status)
return status;
@@ -7230,25 +7197,13 @@ nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
return NULL;
}
-/*
-* Called from OPEN. Look for clientid in reclaim list.
-*/
__be32
-nfs4_check_open_reclaim(clientid_t *clid,
- struct nfsd4_compound_state *cstate,
- struct nfsd_net *nn)
+nfs4_check_open_reclaim(struct nfs4_client *clp)
{
- __be32 status;
-
- /* find clientid in conf_id_hashtbl */
- status = lookup_clientid(clid, cstate, nn, false);
- if (status)
- return nfserr_reclaim_bad;
-
- if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
+ if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
return nfserr_no_grace;
- if (nfsd4_client_record_check(cstate->clp))
+ if (nfsd4_client_record_check(clp))
return nfserr_reclaim_bad;
return nfs_ok;
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 80c90fc231a5..96cdf77925f3 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -121,14 +121,14 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
struct nfsd_net *nn)
{
if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
- nn->drc_mem_usage -= rp->c_replvec.iov_len;
+ nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
kfree(rp->c_replvec.iov_base);
}
if (rp->c_state != RC_UNUSED) {
rb_erase(&rp->c_node, &b->rb_head);
list_del(&rp->c_lru);
atomic_dec(&nn->num_drc_entries);
- nn->drc_mem_usage -= sizeof(*rp);
+ nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
}
kmem_cache_free(drc_slab, rp);
}
@@ -154,6 +154,16 @@ void nfsd_drc_slab_free(void)
kmem_cache_destroy(drc_slab);
}
+static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
+{
+ return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
+}
+
+static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
+{
+ nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
+}
+
int nfsd_reply_cache_init(struct nfsd_net *nn)
{
unsigned int hashsize;
@@ -165,12 +175,16 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
hashsize = nfsd_hashsize(nn->max_drc_entries);
nn->maskbits = ilog2(hashsize);
+ status = nfsd_reply_cache_stats_init(nn);
+ if (status)
+ goto out_nomem;
+
nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
nn->nfsd_reply_cache_shrinker.seeks = 1;
status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
if (status)
- goto out_nomem;
+ goto out_stats_destroy;
nn->drc_hashtbl = kvzalloc(array_size(hashsize,
sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
@@ -186,6 +200,8 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
return 0;
out_shrinker:
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
+out_stats_destroy:
+ nfsd_reply_cache_stats_destroy(nn);
out_nomem:
printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
return -ENOMEM;
@@ -196,6 +212,7 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
struct svc_cacherep *rp;
unsigned int i;
+ nfsd_reply_cache_stats_destroy(nn);
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
for (i = 0; i < nn->drc_hashsize; i++) {
@@ -324,7 +341,7 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
{
if (key->c_key.k_xid == rp->c_key.k_xid &&
key->c_key.k_csum != rp->c_key.k_csum) {
- ++nn->payload_misses;
+ nfsd_stats_payload_misses_inc(nn);
trace_nfsd_drc_mismatch(nn, key, rp);
}
@@ -407,7 +424,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) {
- nfsdstats.rcnocache++;
+ nfsd_stats_rc_nocache_inc();
goto out;
}
@@ -429,12 +446,12 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
goto found_entry;
}
- nfsdstats.rcmisses++;
+ nfsd_stats_rc_misses_inc();
rqstp->rq_cacherep = rp;
rp->c_state = RC_INPROG;
atomic_inc(&nn->num_drc_entries);
- nn->drc_mem_usage += sizeof(*rp);
+ nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
/* go ahead and prune the cache */
prune_bucket(b, nn);
@@ -446,7 +463,7 @@ out:
found_entry:
/* We found a matching entry which is either in progress or done. */
- nfsdstats.rchits++;
+ nfsd_stats_rc_hits_inc();
rtn = RC_DROPIT;
/* Request being processed */
@@ -548,7 +565,7 @@ void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
return;
}
spin_lock(&b->cache_lock);
- nn->drc_mem_usage += bufsize;
+ nfsd_stats_drc_mem_usage_add(nn, bufsize);
lru_put_end(b, rp);
rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
rp->c_type = cachetype;
@@ -588,13 +605,18 @@ static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
seq_printf(m, "num entries: %u\n",
- atomic_read(&nn->num_drc_entries));
+ atomic_read(&nn->num_drc_entries));
seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
- seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage);
- seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
- seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
- seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
- seq_printf(m, "payload misses: %u\n", nn->payload_misses);
+ seq_printf(m, "mem usage: %lld\n",
+ percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
+ seq_printf(m, "cache hits: %lld\n",
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
+ seq_printf(m, "cache misses: %lld\n",
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
+ seq_printf(m, "not cached: %lld\n",
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
+ seq_printf(m, "payload misses: %lld\n",
+ percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
return 0;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index f6d5d783f4a4..ef86ed23af82 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -32,6 +32,7 @@
enum {
NFSD_Root = 1,
NFSD_List,
+ NFSD_Export_Stats,
NFSD_Export_features,
NFSD_Fh,
NFSD_FO_UnlockIP,
@@ -1348,6 +1349,8 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
static const struct tree_descr nfsd_files[] = {
[NFSD_List] = {"exports", &exports_nfsd_operations, S_IRUGO},
+ /* Per-export io stats use same ops as exports file */
+ [NFSD_Export_Stats] = {"export_stats", &exports_nfsd_operations, S_IRUGO},
[NFSD_Export_features] = {"export_features",
&export_features_operations, S_IRUGO},
[NFSD_FO_UnlockIP] = {"unlock_ip",
@@ -1522,19 +1525,18 @@ static int __init init_nfsd(void)
int retval;
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
- retval = register_pernet_subsys(&nfsd_net_ops);
- if (retval < 0)
- return retval;
retval = register_cld_notifier();
if (retval)
- goto out_unregister_pernet;
+ return retval;
retval = nfsd4_init_slabs();
if (retval)
goto out_unregister_notifier;
retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
- nfsd_stat_init(); /* Statistics */
+ retval = nfsd_stat_init(); /* Statistics */
+ if (retval)
+ goto out_free_pnfs;
retval = nfsd_drc_slab_create();
if (retval)
goto out_free_stat;
@@ -1544,9 +1546,14 @@ static int __init init_nfsd(void)
goto out_free_lockd;
retval = register_filesystem(&nfsd_fs_type);
if (retval)
+ goto out_free_exports;
+ retval = register_pernet_subsys(&nfsd_net_ops);
+ if (retval < 0)
goto out_free_all;
return 0;
out_free_all:
+ unregister_pernet_subsys(&nfsd_net_ops);
+out_free_exports:
remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL);
out_free_lockd:
@@ -1554,18 +1561,18 @@ out_free_lockd:
nfsd_drc_slab_free();
out_free_stat:
nfsd_stat_shutdown();
+out_free_pnfs:
nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
out_unregister_notifier:
unregister_cld_notifier();
-out_unregister_pernet:
- unregister_pernet_subsys(&nfsd_net_ops);
return retval;
}
static void __exit exit_nfsd(void)
{
+ unregister_pernet_subsys(&nfsd_net_ops);
nfsd_drc_slab_free();
remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL);
@@ -1575,7 +1582,6 @@ static void __exit exit_nfsd(void)
nfsd4_exit_pnfs();
unregister_filesystem(&nfsd_fs_type);
unregister_cld_notifier();
- unregister_pernet_subsys(&nfsd_net_ops);
}
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index d63cf8196fed..8bdc37aa2c2e 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -24,8 +24,8 @@
#include <uapi/linux/nfsd/debug.h>
#include "netns.h"
-#include "stats.h"
#include "export.h"
+#include "stats.h"
#undef ifdebug
#ifdef CONFIG_SUNRPC_DEBUG
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 66f2ef67792a..10b44421eace 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -40,7 +40,8 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
/* make sure parents give x permission to user */
int err;
parent = dget_parent(tdentry);
- err = inode_permission(d_inode(parent), MAY_EXEC);
+ err = inode_permission(&init_user_ns,
+ d_inode(parent), MAY_EXEC);
if (err < 0) {
dput(parent);
break;
@@ -349,7 +350,7 @@ out:
__be32
fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
{
- struct svc_export *exp;
+ struct svc_export *exp = NULL;
struct dentry *dentry;
__be32 error;
@@ -422,7 +423,7 @@ skip_pseudoflavor_check:
}
out:
if (error == nfserr_stale)
- nfsdstats.fh_stale++;
+ nfsd_stats_fh_stale_inc(exp);
return error;
}
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index cb20c2cd3469..f58933519f38 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -12,6 +12,7 @@
#include <linux/sunrpc/svc.h>
#include <uapi/linux/nfsd/nfsfh.h>
#include <linux/iversion.h>
+#include <linux/exportfs.h>
static inline __u32 ino_t_to_u32(ino_t ino)
{
@@ -264,7 +265,9 @@ fh_clear_wcc(struct svc_fh *fhp)
static inline u64 nfsd4_change_attribute(struct kstat *stat,
struct inode *inode)
{
- if (IS_I_VERSION(inode)) {
+ if (inode->i_sb->s_export_op->fetch_iversion)
+ return inode->i_sb->s_export_op->fetch_iversion(inode);
+ else if (IS_I_VERSION(inode)) {
u64 chattr;
chattr = stat->ctime.tv_sec;
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 9473d048efec..a8d5449dd0e9 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -90,7 +90,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
if (delta < 0)
delta = -delta;
if (delta < MAX_TOUCH_TIME_ERROR &&
- setattr_prepare(fhp->fh_dentry, iap) != 0) {
+ setattr_prepare(&init_user_ns, fhp->fh_dentry, iap) != 0) {
/*
* Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
* This will cause notify_change to set these times
@@ -149,14 +149,15 @@ out:
static __be32
nfsd_proc_readlink(struct svc_rqst *rqstp)
{
- struct nfsd_readlinkargs *argp = rqstp->rq_argp;
+ struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_readlinkres *resp = rqstp->rq_resp;
+ char *buffer = page_address(*(rqstp->rq_next_page++));
dprintk("nfsd: READLINK %s\n", SVCFH_fmt(&argp->fh));
/* Read the symlink. */
resp->len = NFS_MAXPATHLEN;
- resp->status = nfsd_readlink(rqstp, &argp->fh, argp->buffer, &resp->len);
+ resp->status = nfsd_readlink(rqstp, &argp->fh, buffer, &resp->len);
fh_put(&argp->fh);
return rpc_success;
@@ -171,32 +172,36 @@ nfsd_proc_read(struct svc_rqst *rqstp)
{
struct nfsd_readargs *argp = rqstp->rq_argp;
struct nfsd_readres *resp = rqstp->rq_resp;
+ unsigned int len;
u32 eof;
+ int v;
dprintk("nfsd: READ %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, argp->offset);
+ argp->count = min_t(u32, argp->count, NFSSVC_MAXBLKSIZE_V2);
+
+ v = 0;
+ len = argp->count;
+ while (len > 0) {
+ struct page *page = *(rqstp->rq_next_page++);
+
+ rqstp->rq_vec[v].iov_base = page_address(page);
+ rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE);
+ len -= rqstp->rq_vec[v].iov_len;
+ v++;
+ }
+
/* Obtain buffer pointer for payload. 19 is 1 word for
* status, 17 words for fattr, and 1 word for the byte count.
*/
-
- if (NFSSVC_MAXBLKSIZE_V2 < argp->count) {
- char buf[RPC_MAX_ADDRBUFLEN];
- printk(KERN_NOTICE
- "oversized read request from %s (%d bytes)\n",
- svc_print_addr(rqstp, buf, sizeof(buf)),
- argp->count);
- argp->count = NFSSVC_MAXBLKSIZE_V2;
- }
svc_reserve_auth(rqstp, (19<<2) + argp->count + 4);
resp->count = argp->count;
- resp->status = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh),
- argp->offset,
- rqstp->rq_vec, argp->vlen,
- &resp->count,
- &eof);
+ fh_copy(&resp->fh, &argp->fh);
+ resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
+ rqstp->rq_vec, v, &resp->count, &eof);
if (resp->status == nfs_ok)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
@@ -548,6 +553,20 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
return rpc_success;
}
+static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
+ struct nfsd_readdirres *resp,
+ int count)
+{
+ count = min_t(u32, count, PAGE_SIZE);
+
+ /* Convert byte count to number of words (i.e. >> 2),
+ * and reserve room for the NULL ptr & eof flag (-2 words) */
+ resp->buflen = (count >> 2) - 2;
+
+ resp->buffer = page_address(*rqstp->rq_next_page);
+ rqstp->rq_next_page++;
+}
+
/*
* Read a portion of a directory.
*/
@@ -556,31 +575,24 @@ nfsd_proc_readdir(struct svc_rqst *rqstp)
{
struct nfsd_readdirargs *argp = rqstp->rq_argp;
struct nfsd_readdirres *resp = rqstp->rq_resp;
- int count;
loff_t offset;
+ __be32 *buffer;
dprintk("nfsd: READDIR %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, argp->cookie);
- /* Shrink to the client read size */
- count = (argp->count >> 2) - 2;
-
- /* Make sure we've room for the NULL ptr & eof flag */
- count -= 2;
- if (count < 0)
- count = 0;
+ nfsd_init_dirlist_pages(rqstp, resp, argp->count);
+ buffer = resp->buffer;
- resp->buffer = argp->buffer;
resp->offset = NULL;
- resp->buflen = count;
resp->common.err = nfs_ok;
/* Read directory and encode entries on the fly */
offset = argp->cookie;
resp->status = nfsd_readdir(rqstp, &argp->fh, &offset,
&resp->common, nfssvc_encode_entry);
- resp->count = resp->buffer - argp->buffer;
+ resp->count = resp->buffer - buffer;
if (resp->offset)
*resp->offset = htonl(offset);
@@ -623,16 +635,18 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
+ .pc_name = "NULL",
},
[NFSPROC_GETATTR] = {
.pc_func = nfsd_proc_getattr,
- .pc_decode = nfssvc_decode_fhandle,
+ .pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_attrstat,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
+ .pc_name = "GETATTR",
},
[NFSPROC_SETATTR] = {
.pc_func = nfsd_proc_setattr,
@@ -643,6 +657,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
+ .pc_name = "SETATTR",
},
[NFSPROC_ROOT] = {
.pc_func = nfsd_proc_root,
@@ -652,6 +667,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
+ .pc_name = "ROOT",
},
[NFSPROC_LOOKUP] = {
.pc_func = nfsd_proc_lookup,
@@ -662,15 +678,17 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+AT,
+ .pc_name = "LOOKUP",
},
[NFSPROC_READLINK] = {
.pc_func = nfsd_proc_readlink,
- .pc_decode = nfssvc_decode_readlinkargs,
+ .pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_readlinkres,
- .pc_argsize = sizeof(struct nfsd_readlinkargs),
+ .pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
+ .pc_name = "READLINK",
},
[NFSPROC_READ] = {
.pc_func = nfsd_proc_read,
@@ -681,6 +699,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_readres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+ .pc_name = "READ",
},
[NFSPROC_WRITECACHE] = {
.pc_func = nfsd_proc_writecache,
@@ -690,6 +709,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
+ .pc_name = "WRITECACHE",
},
[NFSPROC_WRITE] = {
.pc_func = nfsd_proc_write,
@@ -700,6 +720,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
+ .pc_name = "WRITE",
},
[NFSPROC_CREATE] = {
.pc_func = nfsd_proc_create,
@@ -710,6 +731,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
+ .pc_name = "CREATE",
},
[NFSPROC_REMOVE] = {
.pc_func = nfsd_proc_remove,
@@ -719,6 +741,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
+ .pc_name = "REMOVE",
},
[NFSPROC_RENAME] = {
.pc_func = nfsd_proc_rename,
@@ -728,6 +751,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
+ .pc_name = "RENAME",
},
[NFSPROC_LINK] = {
.pc_func = nfsd_proc_link,
@@ -737,6 +761,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
+ .pc_name = "LINK",
},
[NFSPROC_SYMLINK] = {
.pc_func = nfsd_proc_symlink,
@@ -746,6 +771,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
+ .pc_name = "SYMLINK",
},
[NFSPROC_MKDIR] = {
.pc_func = nfsd_proc_mkdir,
@@ -756,6 +782,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
+ .pc_name = "MKDIR",
},
[NFSPROC_RMDIR] = {
.pc_func = nfsd_proc_rmdir,
@@ -765,6 +792,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
+ .pc_name = "RMDIR",
},
[NFSPROC_READDIR] = {
.pc_func = nfsd_proc_readdir,
@@ -773,15 +801,17 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_argsize = sizeof(struct nfsd_readdirargs),
.pc_ressize = sizeof(struct nfsd_readdirres),
.pc_cachetype = RC_NOCACHE,
+ .pc_name = "READDIR",
},
[NFSPROC_STATFS] = {
.pc_func = nfsd_proc_statfs,
- .pc_decode = nfssvc_decode_fhandle,
+ .pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_statfsres,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_statfsres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+5,
+ .pc_name = "STATFS",
},
};
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index f9c9f4c63cc7..6de406322106 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -955,37 +955,6 @@ out:
return 0;
}
-/*
- * A write procedure can have a large argument, and a read procedure can
- * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
- * reply that can both be larger than a page. The xdr code has taken
- * advantage of this assumption to be a sloppy about bounds checking in
- * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
- * problem, we enforce these assumptions here:
- */
-static bool nfs_request_too_big(struct svc_rqst *rqstp,
- const struct svc_procedure *proc)
-{
- /*
- * The ACL code has more careful bounds-checking and is not
- * susceptible to this problem:
- */
- if (rqstp->rq_prog != NFS_PROGRAM)
- return false;
- /*
- * Ditto NFSv4 (which can in theory have argument and reply both
- * more than a page):
- */
- if (rqstp->rq_vers >= 4)
- return false;
- /* The reply will be small, we're OK: */
- if (proc->pc_xdrressize > 0 &&
- proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
- return false;
-
- return rqstp->rq_arg.len > PAGE_SIZE;
-}
-
/**
* nfsd_dispatch - Process an NFS or NFSACL Request
* @rqstp: incoming request
@@ -1004,9 +973,6 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
struct kvec *resv = &rqstp->rq_res.head[0];
__be32 *p;
- if (nfs_request_too_big(rqstp, proc))
- goto out_decode_err;
-
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 7aa6e8aca2c1..5d79ef6a0c7f 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -23,24 +23,31 @@ static u32 nfs_ftypes[] = {
/*
- * XDR functions for basic NFS types
+ * Basic NFSv2 data types (RFC 1094 Section 2.3)
*/
-static __be32 *
-decode_fh(__be32 *p, struct svc_fh *fhp)
+
+/**
+ * svcxdr_decode_fhandle - Decode an NFSv2 file handle
+ * @xdr: XDR stream positioned at an encoded NFSv2 FH
+ * @fhp: OUT: filled-in server file handle
+ *
+ * Return values:
+ * %false: The encoded file handle was not valid
+ * %true: @fhp has been initialized
+ */
+bool
+svcxdr_decode_fhandle(struct xdr_stream *xdr, struct svc_fh *fhp)
{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS_FHSIZE);
+ if (!p)
+ return false;
fh_init(fhp, NFS_FHSIZE);
memcpy(&fhp->fh_handle.fh_base, p, NFS_FHSIZE);
fhp->fh_handle.fh_size = NFS_FHSIZE;
- /* FIXME: Look up export pointer here and verify
- * Sun Secure RPC if requested */
- return p + (NFS_FHSIZE >> 2);
-}
-
-/* Helper function for NFSv2 ACL code */
-__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp)
-{
- return decode_fh(p, fhp);
+ return true;
}
static __be32 *
@@ -50,66 +57,95 @@ encode_fh(__be32 *p, struct svc_fh *fhp)
return p + (NFS_FHSIZE>> 2);
}
-/*
- * Decode a file name and make sure that the path contains
- * no slashes or null bytes.
- */
-static __be32 *
-decode_filename(__be32 *p, char **namp, unsigned int *lenp)
+static bool
+svcxdr_decode_filename(struct xdr_stream *xdr, char **name, unsigned int *len)
{
- char *name;
- unsigned int i;
-
- if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXNAMLEN)) != NULL) {
- for (i = 0, name = *namp; i < *lenp; i++, name++) {
- if (*name == '\0' || *name == '/')
- return NULL;
- }
- }
+ u32 size, i;
+ __be32 *p;
+ char *c;
+
+ if (xdr_stream_decode_u32(xdr, &size) < 0)
+ return false;
+ if (size == 0 || size > NFS_MAXNAMLEN)
+ return false;
+ p = xdr_inline_decode(xdr, size);
+ if (!p)
+ return false;
- return p;
+ *len = size;
+ *name = (char *)p;
+ for (i = 0, c = *name; i < size; i++, c++)
+ if (*c == '\0' || *c == '/')
+ return false;
+
+ return true;
}
-static __be32 *
-decode_sattr(__be32 *p, struct iattr *iap, struct user_namespace *userns)
+static bool
+svcxdr_decode_diropargs(struct xdr_stream *xdr, struct svc_fh *fhp,
+ char **name, unsigned int *len)
{
- u32 tmp, tmp1;
+ return svcxdr_decode_fhandle(xdr, fhp) &&
+ svcxdr_decode_filename(xdr, name, len);
+}
+
+static bool
+svcxdr_decode_sattr(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ struct iattr *iap)
+{
+ u32 tmp1, tmp2;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, XDR_UNIT * 8);
+ if (!p)
+ return false;
iap->ia_valid = 0;
- /* Sun client bug compatibility check: some sun clients seem to
- * put 0xffff in the mode field when they mean 0xffffffff.
- * Quoting the 4.4BSD nfs server code: Nah nah nah nah na nah.
+ /*
+ * Some Sun clients put 0xffff in the mode field when they
+ * mean 0xffffffff.
*/
- if ((tmp = ntohl(*p++)) != (u32)-1 && tmp != 0xffff) {
+ tmp1 = be32_to_cpup(p++);
+ if (tmp1 != (u32)-1 && tmp1 != 0xffff) {
iap->ia_valid |= ATTR_MODE;
- iap->ia_mode = tmp;
+ iap->ia_mode = tmp1;
}
- if ((tmp = ntohl(*p++)) != (u32)-1) {
- iap->ia_uid = make_kuid(userns, tmp);
+
+ tmp1 = be32_to_cpup(p++);
+ if (tmp1 != (u32)-1) {
+ iap->ia_uid = make_kuid(nfsd_user_namespace(rqstp), tmp1);
if (uid_valid(iap->ia_uid))
iap->ia_valid |= ATTR_UID;
}
- if ((tmp = ntohl(*p++)) != (u32)-1) {
- iap->ia_gid = make_kgid(userns, tmp);
+
+ tmp1 = be32_to_cpup(p++);
+ if (tmp1 != (u32)-1) {
+ iap->ia_gid = make_kgid(nfsd_user_namespace(rqstp), tmp1);
if (gid_valid(iap->ia_gid))
iap->ia_valid |= ATTR_GID;
}
- if ((tmp = ntohl(*p++)) != (u32)-1) {
+
+ tmp1 = be32_to_cpup(p++);
+ if (tmp1 != (u32)-1) {
iap->ia_valid |= ATTR_SIZE;
- iap->ia_size = tmp;
+ iap->ia_size = tmp1;
}
- tmp = ntohl(*p++); tmp1 = ntohl(*p++);
- if (tmp != (u32)-1 && tmp1 != (u32)-1) {
+
+ tmp1 = be32_to_cpup(p++);
+ tmp2 = be32_to_cpup(p++);
+ if (tmp1 != (u32)-1 && tmp2 != (u32)-1) {
iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
- iap->ia_atime.tv_sec = tmp;
- iap->ia_atime.tv_nsec = tmp1 * 1000;
+ iap->ia_atime.tv_sec = tmp1;
+ iap->ia_atime.tv_nsec = tmp2 * NSEC_PER_USEC;
}
- tmp = ntohl(*p++); tmp1 = ntohl(*p++);
- if (tmp != (u32)-1 && tmp1 != (u32)-1) {
+
+ tmp1 = be32_to_cpup(p++);
+ tmp2 = be32_to_cpup(p++);
+ if (tmp1 != (u32)-1 && tmp2 != (u32)-1) {
iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
- iap->ia_mtime.tv_sec = tmp;
- iap->ia_mtime.tv_nsec = tmp1 * 1000;
+ iap->ia_mtime.tv_sec = tmp1;
+ iap->ia_mtime.tv_nsec = tmp2 * NSEC_PER_USEC;
/*
* Passing the invalid value useconds=1000000 for mtime
* is a Sun convention for "set both mtime and atime to
@@ -119,10 +155,11 @@ decode_sattr(__be32 *p, struct iattr *iap, struct user_namespace *userns)
* sattr in section 6.1 of "NFS Illustrated" by
* Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
*/
- if (tmp1 == 1000000)
+ if (tmp2 == 1000000)
iap->ia_valid &= ~(ATTR_ATIME_SET|ATTR_MTIME_SET);
}
- return p;
+
+ return true;
}
static __be32 *
@@ -194,225 +231,158 @@ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *f
*/
int
-nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p)
+nfssvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_fhandle *args = rqstp->rq_argp;
- p = decode_fh(p, &args->fh);
- if (!p)
- return 0;
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_fhandle(xdr, &args->fh);
}
int
nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_sattrargs *args = rqstp->rq_argp;
- p = decode_fh(p, &args->fh);
- if (!p)
- return 0;
- p = decode_sattr(p, &args->attrs, nfsd_user_namespace(rqstp));
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_fhandle(xdr, &args->fh) &&
+ svcxdr_decode_sattr(rqstp, xdr, &args->attrs);
}
int
nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_diropargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->fh))
- || !(p = decode_filename(p, &args->name, &args->len)))
- return 0;
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_diropargs(xdr, &args->fh, &args->name, &args->len);
}
int
nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_readargs *args = rqstp->rq_argp;
- unsigned int len;
- int v;
- p = decode_fh(p, &args->fh);
- if (!p)
- return 0;
-
- args->offset = ntohl(*p++);
- len = args->count = ntohl(*p++);
- p++; /* totalcount - unused */
+ u32 totalcount;
- len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2);
+ if (!svcxdr_decode_fhandle(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->offset) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->count) < 0)
+ return 0;
+ /* totalcount is ignored */
+ if (xdr_stream_decode_u32(xdr, &totalcount) < 0)
+ return 0;
- /* set up somewhere to store response.
- * We take pages, put them on reslist and include in iovec
- */
- v=0;
- while (len > 0) {
- struct page *p = *(rqstp->rq_next_page++);
-
- rqstp->rq_vec[v].iov_base = page_address(p);
- rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE);
- len -= rqstp->rq_vec[v].iov_len;
- v++;
- }
- args->vlen = v;
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
int
nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_writeargs *args = rqstp->rq_argp;
- unsigned int len, hdr, dlen;
struct kvec *head = rqstp->rq_arg.head;
+ struct kvec *tail = rqstp->rq_arg.tail;
+ u32 beginoffset, totalcount;
+ size_t remaining;
- p = decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_fhandle(xdr, &args->fh))
return 0;
-
- p++; /* beginoffset */
- args->offset = ntohl(*p++); /* offset */
- p++; /* totalcount */
- len = args->len = ntohl(*p++);
- /*
- * The protocol specifies a maximum of 8192 bytes.
- */
- if (len > NFSSVC_MAXBLKSIZE_V2)
+ /* beginoffset is ignored */
+ if (xdr_stream_decode_u32(xdr, &beginoffset) < 0)
return 0;
-
- /*
- * Check to make sure that we got the right number of
- * bytes.
- */
- hdr = (void*)p - head->iov_base;
- if (hdr > head->iov_len)
+ if (xdr_stream_decode_u32(xdr, &args->offset) < 0)
+ return 0;
+ /* totalcount is ignored */
+ if (xdr_stream_decode_u32(xdr, &totalcount) < 0)
return 0;
- dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
- /*
- * Round the length of the data which was specified up to
- * the next multiple of XDR units and then compare that
- * against the length which was actually received.
- * Note that when RPCSEC/GSS (for example) is used, the
- * data buffer can be padded so dlen might be larger
- * than required. It must never be smaller.
- */
- if (dlen < XDR_QUADLEN(len)*4)
+ /* opaque data */
+ if (xdr_stream_decode_u32(xdr, &args->len) < 0)
return 0;
+ if (args->len > NFSSVC_MAXBLKSIZE_V2)
+ return 0;
+ remaining = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len;
+ remaining -= xdr_stream_pos(xdr);
+ if (remaining < xdr_align_size(args->len))
+ return 0;
+ args->first.iov_base = xdr->p;
+ args->first.iov_len = head->iov_len - xdr_stream_pos(xdr);
- args->first.iov_base = (void *)p;
- args->first.iov_len = head->iov_len - hdr;
return 1;
}
int
nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_createargs *args = rqstp->rq_argp;
- if ( !(p = decode_fh(p, &args->fh))
- || !(p = decode_filename(p, &args->name, &args->len)))
- return 0;
- p = decode_sattr(p, &args->attrs, nfsd_user_namespace(rqstp));
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_diropargs(xdr, &args->fh,
+ &args->name, &args->len) &&
+ svcxdr_decode_sattr(rqstp, xdr, &args->attrs);
}
int
nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_renameargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->ffh))
- || !(p = decode_filename(p, &args->fname, &args->flen))
- || !(p = decode_fh(p, &args->tfh))
- || !(p = decode_filename(p, &args->tname, &args->tlen)))
- return 0;
-
- return xdr_argsize_check(rqstp, p);
-}
-
-int
-nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p)
-{
- struct nfsd_readlinkargs *args = rqstp->rq_argp;
-
- p = decode_fh(p, &args->fh);
- if (!p)
- return 0;
- args->buffer = page_address(*(rqstp->rq_next_page++));
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_diropargs(xdr, &args->ffh,
+ &args->fname, &args->flen) &&
+ svcxdr_decode_diropargs(xdr, &args->tfh,
+ &args->tname, &args->tlen);
}
int
nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_linkargs *args = rqstp->rq_argp;
- if (!(p = decode_fh(p, &args->ffh))
- || !(p = decode_fh(p, &args->tfh))
- || !(p = decode_filename(p, &args->tname, &args->tlen)))
- return 0;
-
- return xdr_argsize_check(rqstp, p);
+ return svcxdr_decode_fhandle(xdr, &args->ffh) &&
+ svcxdr_decode_diropargs(xdr, &args->tfh,
+ &args->tname, &args->tlen);
}
int
nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_symlinkargs *args = rqstp->rq_argp;
- char *base = (char *)p;
- size_t xdrlen;
+ struct kvec *head = rqstp->rq_arg.head;
- if ( !(p = decode_fh(p, &args->ffh))
- || !(p = decode_filename(p, &args->fname, &args->flen)))
+ if (!svcxdr_decode_diropargs(xdr, &args->ffh, &args->fname, &args->flen))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->tlen) < 0)
return 0;
-
- args->tlen = ntohl(*p++);
if (args->tlen == 0)
return 0;
- args->first.iov_base = p;
- args->first.iov_len = rqstp->rq_arg.head[0].iov_len;
- args->first.iov_len -= (char *)p - base;
-
- /* This request is never larger than a page. Therefore,
- * transport will deliver either:
- * 1. pathname in the pagelist -> sattr is in the tail.
- * 2. everything in the head buffer -> sattr is in the head.
- */
- if (rqstp->rq_arg.page_len) {
- if (args->tlen != rqstp->rq_arg.page_len)
- return 0;
- p = rqstp->rq_arg.tail[0].iov_base;
- } else {
- xdrlen = XDR_QUADLEN(args->tlen);
- if (xdrlen > args->first.iov_len - (8 * sizeof(__be32)))
- return 0;
- p += xdrlen;
- }
- decode_sattr(p, &args->attrs, nfsd_user_namespace(rqstp));
-
- return 1;
+ args->first.iov_len = head->iov_len - xdr_stream_pos(xdr);
+ args->first.iov_base = xdr_inline_decode(xdr, args->tlen);
+ if (!args->first.iov_base)
+ return 0;
+ return svcxdr_decode_sattr(rqstp, xdr, &args->attrs);
}
int
nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
{
+ struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd_readdirargs *args = rqstp->rq_argp;
- p = decode_fh(p, &args->fh);
- if (!p)
+ if (!svcxdr_decode_fhandle(xdr, &args->fh))
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->cookie) < 0)
+ return 0;
+ if (xdr_stream_decode_u32(xdr, &args->count) < 0)
return 0;
- args->cookie = ntohl(*p++);
- args->count = ntohl(*p++);
- args->count = min_t(u32, args->count, PAGE_SIZE);
- args->buffer = page_address(*(rqstp->rq_next_page++));
- return xdr_argsize_check(rqstp, p);
+ return 1;
}
/*
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 9eae11a9d21c..73deea353169 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -649,8 +649,7 @@ void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *)
extern void nfs4_release_reclaim(struct nfsd_net *);
extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(struct xdr_netobj name,
struct nfsd_net *nn);
-extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
- struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
+extern __be32 nfs4_check_open_reclaim(struct nfs4_client *);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index b1bc582b0493..1d3b881e7382 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -7,16 +7,14 @@
* Format:
* rc <hits> <misses> <nocache>
* Statistsics for the reply cache
- * fh <stale> <total-lookups> <anonlookups> <dir-not-in-dcache> <nondir-not-in-dcache>
+ * fh <stale> <deprecated filehandle cache stats>
* statistics for filehandle lookup
* io <bytes-read> <bytes-written>
* statistics for IO throughput
- * th <threads> <fullcnt> <10%-20%> <20%-30%> ... <90%-100%> <100%>
- * time (seconds) when nfsd thread usage above thresholds
- * and number of times that all threads were in use
- * ra cache-size <10% <20% <30% ... <100% not-found
- * number of times that read-ahead entry was found that deep in
- * the cache.
+ * th <threads> <deprecated thread usage histogram stats>
+ * number of threads
+ * ra <deprecated ra-cache stats>
+ *
* plus generic RPC stats (see net/sunrpc/stats.c)
*
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
@@ -38,31 +36,24 @@ static int nfsd_proc_show(struct seq_file *seq, void *v)
{
int i;
- seq_printf(seq, "rc %u %u %u\nfh %u %u %u %u %u\nio %u %u\n",
- nfsdstats.rchits,
- nfsdstats.rcmisses,
- nfsdstats.rcnocache,
- nfsdstats.fh_stale,
- nfsdstats.fh_lookup,
- nfsdstats.fh_anon,
- nfsdstats.fh_nocache_dir,
- nfsdstats.fh_nocache_nondir,
- nfsdstats.io_read,
- nfsdstats.io_write);
+ seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n",
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]),
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]),
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]),
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]),
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]),
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
+
/* thread usage: */
- seq_printf(seq, "th %u %u", nfsdstats.th_cnt, nfsdstats.th_fullcnt);
- for (i=0; i<10; i++) {
- unsigned int jifs = nfsdstats.th_usage[i];
- unsigned int sec = jifs / HZ, msec = (jifs % HZ)*1000/HZ;
- seq_printf(seq, " %u.%03u", sec, msec);
- }
+ seq_printf(seq, "th %u 0", nfsdstats.th_cnt);
+
+ /* deprecated thread usage histogram stats */
+ for (i = 0; i < 10; i++)
+ seq_puts(seq, " 0.000");
+
+ /* deprecated ra-cache stats */
+ seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n");
- /* newline and ra-cache */
- seq_printf(seq, "\nra %u", nfsdstats.ra_size);
- for (i=0; i<11; i++)
- seq_printf(seq, " %u", nfsdstats.ra_depth[i]);
- seq_putc(seq, '\n');
-
/* show my rpc info */
svc_seq_show(seq, &nfsd_svcstats);
@@ -70,8 +61,10 @@ static int nfsd_proc_show(struct seq_file *seq, void *v)
/* Show count for individual nfsv4 operations */
/* Writing operation numbers 0 1 2 also for maintaining uniformity */
seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
- for (i = 0; i <= LAST_NFS4_OP; i++)
- seq_printf(seq, " %u", nfsdstats.nfs4_opcount[i]);
+ for (i = 0; i <= LAST_NFS4_OP; i++) {
+ seq_printf(seq, " %lld",
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
+ }
seq_putc(seq, '\n');
#endif
@@ -91,14 +84,63 @@ static const struct proc_ops nfsd_proc_ops = {
.proc_release = single_release,
};
-void
-nfsd_stat_init(void)
+int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
+{
+ int i, err = 0;
+
+ for (i = 0; !err && i < num; i++)
+ err = percpu_counter_init(&counters[i], 0, GFP_KERNEL);
+
+ if (!err)
+ return 0;
+
+ for (; i > 0; i--)
+ percpu_counter_destroy(&counters[i-1]);
+
+ return err;
+}
+
+void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ percpu_counter_set(&counters[i], 0);
+}
+
+void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
{
+ int i;
+
+ for (i = 0; i < num; i++)
+ percpu_counter_destroy(&counters[i]);
+}
+
+static int nfsd_stat_counters_init(void)
+{
+ return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
+}
+
+static void nfsd_stat_counters_destroy(void)
+{
+ nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
+}
+
+int nfsd_stat_init(void)
+{
+ int err;
+
+ err = nfsd_stat_counters_init();
+ if (err)
+ return err;
+
svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
+
+ return 0;
}
-void
-nfsd_stat_shutdown(void)
+void nfsd_stat_shutdown(void)
{
+ nfsd_stat_counters_destroy();
svc_proc_unregister(&init_net, "nfsd");
}
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index b23fdac69820..51ecda852e23 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -8,37 +8,91 @@
#define _NFSD_STATS_H
#include <uapi/linux/nfsd/stats.h>
+#include <linux/percpu_counter.h>
-struct nfsd_stats {
- unsigned int rchits; /* repcache hits */
- unsigned int rcmisses; /* repcache hits */
- unsigned int rcnocache; /* uncached reqs */
- unsigned int fh_stale; /* FH stale error */
- unsigned int fh_lookup; /* dentry cached */
- unsigned int fh_anon; /* anon file dentry returned */
- unsigned int fh_nocache_dir; /* filehandle not found in dcache */
- unsigned int fh_nocache_nondir; /* filehandle not found in dcache */
- unsigned int io_read; /* bytes returned to read requests */
- unsigned int io_write; /* bytes passed in write requests */
- unsigned int th_cnt; /* number of available threads */
- unsigned int th_usage[10]; /* number of ticks during which n perdeciles
- * of available threads were in use */
- unsigned int th_fullcnt; /* number of times last free thread was used */
- unsigned int ra_size; /* size of ra cache */
- unsigned int ra_depth[11]; /* number of times ra entry was found that deep
- * in the cache (10percentiles). [10] = not found */
+enum {
+ NFSD_STATS_RC_HITS, /* repcache hits */
+ NFSD_STATS_RC_MISSES, /* repcache misses */
+ NFSD_STATS_RC_NOCACHE, /* uncached reqs */
+ NFSD_STATS_FH_STALE, /* FH stale error */
+ NFSD_STATS_IO_READ, /* bytes returned to read requests */
+ NFSD_STATS_IO_WRITE, /* bytes passed in write requests */
#ifdef CONFIG_NFSD_V4
- unsigned int nfs4_opcount[LAST_NFS4_OP + 1]; /* count of individual nfsv4 operations */
+ NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
+ NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
+#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
#endif
+ NFSD_STATS_COUNTERS_NUM
+};
+
+struct nfsd_stats {
+ struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
+ /* Protected by nfsd_mutex */
+ unsigned int th_cnt; /* number of available threads */
};
extern struct nfsd_stats nfsdstats;
+
extern struct svc_stat nfsd_svcstats;
-void nfsd_stat_init(void);
-void nfsd_stat_shutdown(void);
+int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
+void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
+void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
+int nfsd_stat_init(void);
+void nfsd_stat_shutdown(void);
+
+static inline void nfsd_stats_rc_hits_inc(void)
+{
+ percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]);
+}
+
+static inline void nfsd_stats_rc_misses_inc(void)
+{
+ percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]);
+}
+
+static inline void nfsd_stats_rc_nocache_inc(void)
+{
+ percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]);
+}
+
+static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
+{
+ percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
+ if (exp)
+ percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
+}
+
+static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
+{
+ percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
+ if (exp)
+ percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
+}
+
+static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
+{
+ percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
+ if (exp)
+ percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
+}
+
+static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
+{
+ percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]);
+}
+
+static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount)
+{
+ percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
+}
+
+static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount)
+{
+ percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
+}
#endif /* _NFSD_STATS_H */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 04937e51de56..fd6be35a1642 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -448,7 +448,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
.ia_size = iap->ia_size,
};
- host_err = notify_change(dentry, &size_attr, NULL);
+ host_err = notify_change(&init_user_ns, dentry, &size_attr, NULL);
if (host_err)
goto out_unlock;
iap->ia_valid &= ~ATTR_SIZE;
@@ -463,7 +463,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
}
iap->ia_valid |= ATTR_CTIME;
- host_err = notify_change(dentry, iap, NULL);
+ host_err = notify_change(&init_user_ns, dentry, iap, NULL);
out_unlock:
fh_unlock(fhp);
@@ -499,7 +499,8 @@ int nfsd4_is_junction(struct dentry *dentry)
return 0;
if (!(inode->i_mode & S_ISVTX))
return 0;
- if (vfs_getxattr(dentry, NFSD_JUNCTION_XATTR_NAME, NULL, 0) <= 0)
+ if (vfs_getxattr(&init_user_ns, dentry, NFSD_JUNCTION_XATTR_NAME,
+ NULL, 0) <= 0)
return 0;
return 1;
}
@@ -889,7 +890,7 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned long *count, u32 *eof, ssize_t host_err)
{
if (host_err >= 0) {
- nfsdstats.io_read += host_err;
+ nfsd_stats_io_read_add(fhp->fh_export, host_err);
*eof = nfsd_eof_on_read(file, offset, host_err, *count);
*count = host_err;
fsnotify_access(file);
@@ -1040,7 +1041,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
goto out_nfserr;
}
*cnt = host_err;
- nfsdstats.io_write += *cnt;
+ nfsd_stats_io_write_add(exp, *cnt);
fsnotify_modify(file);
if (stable && use_wgather) {
@@ -1254,12 +1255,12 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
host_err = 0;
switch (type) {
case S_IFREG:
- host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
+ host_err = vfs_create(&init_user_ns, dirp, dchild, iap->ia_mode, true);
if (!host_err)
nfsd_check_ignore_resizing(iap);
break;
case S_IFDIR:
- host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
+ host_err = vfs_mkdir(&init_user_ns, dirp, dchild, iap->ia_mode);
if (!host_err && unlikely(d_unhashed(dchild))) {
struct dentry *d;
d = lookup_one_len(dchild->d_name.name,
@@ -1287,7 +1288,8 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
- host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
+ host_err = vfs_mknod(&init_user_ns, dirp, dchild,
+ iap->ia_mode, rdev);
break;
default:
printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
@@ -1485,7 +1487,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!IS_POSIXACL(dirp))
iap->ia_mode &= ~current_umask();
- host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
+ host_err = vfs_create(&init_user_ns, dirp, dchild, iap->ia_mode, true);
if (host_err < 0) {
fh_drop_write(fhp);
goto out_nfserr;
@@ -1609,7 +1611,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (IS_ERR(dnew))
goto out_nfserr;
- host_err = vfs_symlink(d_inode(dentry), dnew, path);
+ host_err = vfs_symlink(&init_user_ns, d_inode(dentry), dnew, path);
err = nfserrno(host_err);
if (!err)
err = nfserrno(commit_metadata(fhp));
@@ -1677,7 +1679,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
err = nfserr_noent;
if (d_really_is_negative(dold))
goto out_dput;
- host_err = vfs_link(dold, dirp, dnew, NULL);
+ host_err = vfs_link(dold, &init_user_ns, dirp, dnew, NULL);
if (!host_err) {
err = nfserrno(commit_metadata(ffhp));
if (!err)
@@ -1797,7 +1799,15 @@ retry:
close_cached = true;
goto out_dput_old;
} else {
- host_err = vfs_rename(fdir, odentry, tdir, ndentry, NULL, 0);
+ struct renamedata rd = {
+ .old_mnt_userns = &init_user_ns,
+ .old_dir = fdir,
+ .old_dentry = odentry,
+ .new_mnt_userns = &init_user_ns,
+ .new_dir = tdir,
+ .new_dentry = ndentry,
+ };
+ host_err = vfs_rename(&rd);
if (!host_err) {
host_err = commit_metadata(tfhp);
if (!host_err)
@@ -1884,9 +1894,9 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (type != S_IFDIR) {
if (rdentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK)
nfsd_close_cached_files(rdentry);
- host_err = vfs_unlink(dirp, rdentry, NULL);
+ host_err = vfs_unlink(&init_user_ns, dirp, rdentry, NULL);
} else {
- host_err = vfs_rmdir(dirp, rdentry);
+ host_err = vfs_rmdir(&init_user_ns, dirp, rdentry);
}
if (!host_err)
@@ -2149,7 +2159,7 @@ nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
inode_lock_shared(inode);
- len = vfs_getxattr(dentry, name, NULL, 0);
+ len = vfs_getxattr(&init_user_ns, dentry, name, NULL, 0);
/*
* Zero-length attribute, just return.
@@ -2176,7 +2186,7 @@ nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
goto out;
}
- len = vfs_getxattr(dentry, name, buf, len);
+ len = vfs_getxattr(&init_user_ns, dentry, name, buf, len);
if (len <= 0) {
kvfree(buf);
buf = NULL;
@@ -2283,7 +2293,8 @@ nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name)
fh_lock(fhp);
- ret = __vfs_removexattr_locked(fhp->fh_dentry, name, NULL);
+ ret = __vfs_removexattr_locked(&init_user_ns, fhp->fh_dentry,
+ name, NULL);
fh_unlock(fhp);
fh_drop_write(fhp);
@@ -2307,8 +2318,8 @@ nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
return nfserrno(ret);
fh_lock(fhp);
- ret = __vfs_setxattr_locked(fhp->fh_dentry, name, buf, len, flags,
- NULL);
+ ret = __vfs_setxattr_locked(&init_user_ns, fhp->fh_dentry, name, buf,
+ len, flags, NULL);
fh_unlock(fhp);
fh_drop_write(fhp);
@@ -2391,13 +2402,14 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
return 0;
/* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
- err = inode_permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC));
+ err = inode_permission(&init_user_ns, inode,
+ acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
/* Allow read access to binaries even when mode 111 */
if (err == -EACCES && S_ISREG(inode->i_mode) &&
(acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
- err = inode_permission(inode, MAY_EXEC);
+ err = inode_permission(&init_user_ns, inode, MAY_EXEC);
return err? nfserrno(err) : 0;
}
diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h
index ad77387734cc..3018b52b6d5e 100644
--- a/fs/nfsd/xdr.h
+++ b/fs/nfsd/xdr.h
@@ -27,7 +27,6 @@ struct nfsd_readargs {
struct svc_fh fh;
__u32 offset;
__u32 count;
- int vlen;
};
struct nfsd_writeargs {
@@ -53,11 +52,6 @@ struct nfsd_renameargs {
unsigned int tlen;
};
-struct nfsd_readlinkargs {
- struct svc_fh fh;
- char * buffer;
-};
-
struct nfsd_linkargs {
struct svc_fh ffh;
struct svc_fh tfh;
@@ -79,7 +73,6 @@ struct nfsd_readdirargs {
struct svc_fh fh;
__u32 cookie;
__u32 count;
- __be32 * buffer;
};
struct nfsd_stat {
@@ -144,14 +137,13 @@ union nfsd_xdrstore {
#define NFS2_SVC_XDRSIZE sizeof(union nfsd_xdrstore)
-int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *);
+int nfssvc_decode_fhandleargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_readargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_createargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *);
-int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *);
@@ -172,6 +164,6 @@ void nfssvc_release_readres(struct svc_rqst *rqstp);
/* Helper functions for NFSv2 ACL code */
__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat);
-__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp);
+bool svcxdr_decode_fhandle(struct xdr_stream *xdr, struct svc_fh *fhp);
#endif /* LINUX_NFSD_H */
diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index 456fcd7a1038..3e1578953f54 100644
--- a/fs/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -25,14 +25,13 @@ struct nfsd3_diropargs {
struct nfsd3_accessargs {
struct svc_fh fh;
- unsigned int access;
+ __u32 access;
};
struct nfsd3_readargs {
struct svc_fh fh;
__u64 offset;
__u32 count;
- int vlen;
};
struct nfsd3_writeargs {
@@ -71,11 +70,6 @@ struct nfsd3_renameargs {
unsigned int tlen;
};
-struct nfsd3_readlinkargs {
- struct svc_fh fh;
- char * buffer;
-};
-
struct nfsd3_linkargs {
struct svc_fh ffh;
struct svc_fh tfh;
@@ -96,10 +90,8 @@ struct nfsd3_symlinkargs {
struct nfsd3_readdirargs {
struct svc_fh fh;
__u64 cookie;
- __u32 dircount;
__u32 count;
__be32 * verf;
- __be32 * buffer;
};
struct nfsd3_commitargs {
@@ -110,13 +102,13 @@ struct nfsd3_commitargs {
struct nfsd3_getaclargs {
struct svc_fh fh;
- int mask;
+ __u32 mask;
};
struct posix_acl;
struct nfsd3_setaclargs {
struct svc_fh fh;
- int mask;
+ __u32 mask;
struct posix_acl *acl_access;
struct posix_acl *acl_default;
};
@@ -273,7 +265,7 @@ union nfsd3_xdrstore {
#define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore)
-int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *);
+int nfs3svc_decode_fhandleargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *);
@@ -283,7 +275,6 @@ int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *);
-int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *);
@@ -316,7 +307,6 @@ int nfs3svc_encode_entry_plus(void *, const char *name,
/* Helper functions for NFSv3 ACL code */
__be32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p,
struct svc_fh *fhp);
-__be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp);
-
+bool svcxdr_decode_nfs_fh3(struct xdr_stream *xdr, struct svc_fh *fhp);
#endif /* _LINUX_NFSD_XDR3_H */
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 745d371d6fea..2e8eb263cf0f 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -348,7 +348,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
/* reference count of i_bh inherits from nilfs_mdt_read_block() */
atomic64_inc(&root->inodes_count);
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_ino = ino;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
@@ -805,14 +805,15 @@ void nilfs_evict_inode(struct inode *inode)
*/
}
-int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
+int nilfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
struct nilfs_transaction_info ti;
struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
int err;
- err = setattr_prepare(dentry, iattr);
+ err = setattr_prepare(&init_user_ns, dentry, iattr);
if (err)
return err;
@@ -827,7 +828,7 @@ int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
nilfs_truncate(inode);
}
- setattr_copy(inode, iattr);
+ setattr_copy(&init_user_ns, inode, iattr);
mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE) {
@@ -843,7 +844,8 @@ out_err:
return err;
}
-int nilfs_permission(struct inode *inode, int mask)
+int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
struct nilfs_root *root = NILFS_I(inode)->i_root;
@@ -851,7 +853,7 @@ int nilfs_permission(struct inode *inode, int mask)
root->cno != NILFS_CPTREE_CURRENT_CNO)
return -EROFS; /* snapshot is not writable */
- return generic_permission(inode, mask);
+ return generic_permission(&init_user_ns, inode, mask);
}
int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 07d26f61f22a..b053b40315bf 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -132,7 +132,7 @@ static int nilfs_ioctl_setflags(struct inode *inode, struct file *filp,
unsigned int flags, oldflags;
int ret;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
if (get_user(flags, (int __user *)argp))
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index a6ec7961d4f5..ecace5f96a95 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -72,8 +72,8 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int nilfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int nilfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode;
struct nilfs_transaction_info ti;
@@ -100,7 +100,8 @@ static int nilfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
static int
-nilfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
+nilfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode *inode;
struct nilfs_transaction_info ti;
@@ -124,8 +125,8 @@ nilfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
return err;
}
-static int nilfs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int nilfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct nilfs_transaction_info ti;
struct super_block *sb = dir->i_sb;
@@ -201,7 +202,8 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
return err;
}
-static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int nilfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct nilfs_transaction_info ti;
@@ -338,8 +340,9 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
return err;
}
-static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
+static int nilfs_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct inode *old_inode = d_inode(old_dentry);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index f8450ee3fd06..c4a45a081ade 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -267,9 +267,11 @@ extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
extern void nilfs_truncate(struct inode *);
extern void nilfs_evict_inode(struct inode *);
-extern int nilfs_setattr(struct dentry *, struct iattr *);
+extern int nilfs_setattr(struct user_namespace *, struct dentry *,
+ struct iattr *);
extern void nilfs_write_failed(struct address_space *mapping, loff_t to);
-int nilfs_permission(struct inode *inode, int mask);
+int nilfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask);
int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh);
extern int nilfs_inode_dirty(struct inode *);
int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty);
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 1a8729eded8b..56872e93823d 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -386,10 +386,6 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
struct bio *bio;
bio = bio_alloc(GFP_NOIO, nr_vecs);
- if (bio == NULL) {
- while (!bio && (nr_vecs >>= 1))
- bio = bio_alloc(GFP_NOIO, nr_vecs);
- }
if (likely(bio)) {
bio_set_dev(bio, nilfs->ns_bdev);
bio->bi_iter.bi_sector =
@@ -403,7 +399,7 @@ static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
{
wi->bio = NULL;
wi->rest_blocks = segbuf->sb_sum.nblocks;
- wi->max_pages = BIO_MAX_PAGES;
+ wi->max_pages = BIO_MAX_VECS;
wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
wi->start = wi->end = 0;
wi->blocknr = segbuf->sb_pseg_start;
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index b55cdeb4d169..987c8ab02aee 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -375,7 +375,7 @@ static inline int nilfs_flush_device(struct the_nilfs *nilfs)
*/
smp_wmb();
- err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL);
+ err = blkdev_issue_flush(nilfs->ns_bdev);
if (err != -EIO)
err = 0;
return err;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index dcab112e1f00..9e0c1afac8bd 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -702,7 +702,7 @@ static int fanotify_find_path(int dfd, const char __user *filename,
}
/* you can only watch an inode if you have read permissions on it */
- ret = inode_permission(path->dentry->d_inode, MAY_READ);
+ ret = path_permission(path, MAY_READ);
if (ret) {
path_put(path);
goto out;
@@ -976,7 +976,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
f_flags |= O_NONBLOCK;
/* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
- group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
+ group = fsnotify_alloc_user_group(&fanotify_fsnotify_ops);
if (IS_ERR(group)) {
free_uid(user);
return PTR_ERR(group);
diff --git a/fs/notify/group.c b/fs/notify/group.c
index a4a4b1c64d32..ffd723ffe46d 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -111,14 +111,12 @@ void fsnotify_put_group(struct fsnotify_group *group)
}
EXPORT_SYMBOL_GPL(fsnotify_put_group);
-/*
- * Create a new fsnotify_group and hold a reference for the group returned.
- */
-struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
+static struct fsnotify_group *__fsnotify_alloc_group(
+ const struct fsnotify_ops *ops, gfp_t gfp)
{
struct fsnotify_group *group;
- group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
+ group = kzalloc(sizeof(struct fsnotify_group), gfp);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -139,8 +137,25 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
return group;
}
+
+/*
+ * Create a new fsnotify_group and hold a reference for the group returned.
+ */
+struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
+{
+ return __fsnotify_alloc_group(ops, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(fsnotify_alloc_group);
+/*
+ * Create a new fsnotify_group and hold a reference for the group returned.
+ */
+struct fsnotify_group *fsnotify_alloc_user_group(const struct fsnotify_ops *ops)
+{
+ return __fsnotify_alloc_group(ops, GFP_KERNEL_ACCOUNT);
+}
+EXPORT_SYMBOL_GPL(fsnotify_alloc_user_group);
+
int fsnotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 59c177011a0f..c71be4fb7dc5 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -352,7 +352,7 @@ static int inotify_find_inode(const char __user *dirname, struct path *path,
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
- error = inode_permission(path->dentry->d_inode, MAY_READ);
+ error = path_permission(path, MAY_READ);
if (error) {
path_put(path);
return error;
@@ -632,11 +632,11 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
struct fsnotify_group *group;
struct inotify_event_info *oevent;
- group = fsnotify_alloc_group(&inotify_fsnotify_ops);
+ group = fsnotify_alloc_user_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
- oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
+ oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL_ACCOUNT);
if (unlikely(!oevent)) {
fsnotify_destroy_group(group);
return ERR_PTR(-ENOMEM);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index f7e4cbc26eaf..f5c058b3192c 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -629,6 +629,12 @@ static int ntfs_read_locked_inode(struct inode *vi)
}
a = ctx->attr;
/* Get the standard information attribute value. */
+ if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset)
+ + le32_to_cpu(a->data.resident.value_length) >
+ (u8 *)ctx->mrec + vol->mft_record_size) {
+ ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode.");
+ goto unm_err_out;
+ }
si = (STANDARD_INFORMATION*)((u8*)a +
le16_to_cpu(a->data.resident.value_offset));
@@ -2848,6 +2854,7 @@ void ntfs_truncate_vfs(struct inode *vi) {
/**
* ntfs_setattr - called from notify_change() when an attribute is being changed
+ * @mnt_userns: user namespace of the mount the inode was found from
* @dentry: dentry whose attributes to change
* @attr: structure describing the attributes and the changes
*
@@ -2860,13 +2867,14 @@ void ntfs_truncate_vfs(struct inode *vi) {
*
* Called with ->i_mutex held.
*/
-int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
+int ntfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *vi = d_inode(dentry);
int err;
unsigned int ia_valid = attr->ia_valid;
- err = setattr_prepare(dentry, attr);
+ err = setattr_prepare(&init_user_ns, dentry, attr);
if (err)
goto out;
/* We do not support NTFS ACLs yet. */
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index 363e4e820673..6f78ee00f57f 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -289,7 +289,8 @@ extern int ntfs_show_options(struct seq_file *sf, struct dentry *root);
extern int ntfs_truncate(struct inode *vi);
extern void ntfs_truncate_vfs(struct inode *vi);
-extern int ntfs_setattr(struct dentry *dentry, struct iattr *attr);
+extern int ntfs_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr);
extern int __ntfs_write_inode(struct inode *vi, int sync);
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index 85422761ff43..5d4bf7a3259f 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -703,7 +703,7 @@ typedef struct {
/* 14*/ le16 instance; /* The instance of this attribute record. This
number is unique within this mft record (see
MFT_RECORD/next_attribute_instance notes in
- in mft.h for more details). */
+ mft.h for more details). */
/* 16*/ union {
/* Resident attributes. */
struct {
@@ -1838,7 +1838,7 @@ typedef struct {
* Also, each security descriptor is stored twice in the $SDS stream with a
* fixed offset of 0x40000 bytes (256kib, the Windows cache manager's max size)
* between them; i.e. if a SDS_ENTRY specifies an offset of 0x51d0, then the
- * the first copy of the security descriptor will be at offset 0x51d0 in the
+ * first copy of the security descriptor will be at offset 0x51d0 in the
* $SDS data stream and the second copy will be at offset 0x451d0.
*/
typedef struct {
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 7b07f5df3a29..5259badabb56 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -262,7 +262,8 @@ static int ocfs2_set_acl(handle_t *handle,
return ret;
}
-int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int ocfs2_iop_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
struct buffer_head *bh = NULL;
int status, had_lock;
@@ -274,7 +275,8 @@ int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (type == ACL_TYPE_ACCESS && acl) {
umode_t mode;
- status = posix_acl_update_mode(inode, &mode, &acl);
+ status = posix_acl_update_mode(&init_user_ns, inode, &mode,
+ &acl);
if (status)
goto unlock;
diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
index 127b13432146..4e86450917b2 100644
--- a/fs/ocfs2/acl.h
+++ b/fs/ocfs2/acl.h
@@ -19,7 +19,8 @@ struct ocfs2_acl_entry {
};
struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type);
-int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int ocfs2_iop_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
struct buffer_head *, struct buffer_head *,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 0179a73a3fa2..12a7590601dd 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -2042,7 +2042,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
o2hb_nego_timeout_handler,
reg, NULL, &reg->hr_handler_list);
if (ret)
- goto free;
+ goto remove_item;
ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
sizeof(struct o2hb_nego_msg),
@@ -2057,6 +2057,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
unregister_handler:
o2net_unregister_handler_list(&reg->hr_handler_list);
+remove_item:
+ spin_lock(&o2hb_live_lock);
+ list_del(&reg->hr_all_item);
+ if (o2hb_global_heartbeat_active())
+ clear_bit(reg->hr_region_num, o2hb_region_bitmap);
+ spin_unlock(&o2hb_live_lock);
free:
kfree(reg);
return ERR_PTR(ret);
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 6abaded3ff6b..70a10764f249 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -165,16 +165,6 @@ void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
spin_unlock(&lock->spinlock);
}
-void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
-{
- BUG_ON(!dlm);
- BUG_ON(!lock);
-
- spin_lock(&dlm->ast_lock);
- __dlm_queue_bast(dlm, lock);
- spin_unlock(&dlm->ast_lock);
-}
-
static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock)
{
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index c8a444622faa..58d57e25d384 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -17,10 +17,7 @@
#define DLM_LOCKID_NAME_MAX 32
-#define DLM_DOMAIN_NAME_MAX_LEN 255
#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
-#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
-#define DLM_THREAD_MS 200 // flush at least every 200 ms
#define DLM_HASH_SIZE_DEFAULT (1 << 17)
#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
@@ -902,7 +899,6 @@ void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res);
void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
-void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
void dlm_do_local_ast(struct dlm_ctxt *dlm,
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 583820ec63e2..b2870f1a31df 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -190,17 +190,18 @@ static int dlmfs_file_release(struct inode *inode,
* We do ->setattr() just to override size changes. Our size is the size
* of the LVB and nothing else.
*/
-static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
+static int dlmfs_file_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
int error;
struct inode *inode = d_inode(dentry);
attr->ia_valid &= ~ATTR_SIZE;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -329,7 +330,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
if (inode) {
inode->i_ino = get_next_ino();
- inode_init_owner(inode, NULL, mode);
+ inode_init_owner(&init_user_ns, inode, NULL, mode);
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
inc_nlink(inode);
@@ -352,7 +353,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
return NULL;
inode->i_ino = get_next_ino();
- inode_init_owner(inode, parent, mode);
+ inode_init_owner(&init_user_ns, inode, parent, mode);
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
ip = DLMFS_I(inode);
@@ -395,7 +396,8 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
* File creation. Allocate an inode, and we're done..
*/
/* SMP-safe */
-static int dlmfs_mkdir(struct inode * dir,
+static int dlmfs_mkdir(struct user_namespace * mnt_userns,
+ struct inode * dir,
struct dentry * dentry,
umode_t mode)
{
@@ -443,7 +445,8 @@ bail:
return status;
}
-static int dlmfs_create(struct inode *dir,
+static int dlmfs_create(struct user_namespace *mnt_userns,
+ struct inode *dir,
struct dentry *dentry,
umode_t mode,
bool excl)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 85979e2214b3..6611c64ca0be 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -194,7 +194,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
needs_barrier = true;
err = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) {
- ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!err)
err = ret;
}
@@ -1112,7 +1112,8 @@ out:
return ret;
}
-int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
int status = 0, size_change;
int inode_locked = 0;
@@ -1142,7 +1143,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
return 0;
- status = setattr_prepare(dentry, attr);
+ status = setattr_prepare(&init_user_ns, dentry, attr);
if (status)
return status;
@@ -1263,7 +1264,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
status = ocfs2_mark_inode_dirty(handle, inode, bh);
@@ -1298,8 +1299,8 @@ bail:
return status;
}
-int ocfs2_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int ocfs2_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
struct super_block *sb = path->dentry->d_sb;
@@ -1313,7 +1314,7 @@ int ocfs2_getattr(const struct path *path, struct kstat *stat,
goto bail;
}
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
/*
* If there is inline data in the inode, the inode will normally not
* have data blocks allocated (it may have an external xattr block).
@@ -1330,7 +1331,8 @@ bail:
return err;
}
-int ocfs2_permission(struct inode *inode, int mask)
+int ocfs2_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
int ret, had_lock;
struct ocfs2_lock_holder oh;
@@ -1355,7 +1357,7 @@ int ocfs2_permission(struct inode *inode, int mask)
dump_stack();
}
- ret = generic_permission(inode, mask);
+ ret = generic_permission(&init_user_ns, inode, mask);
ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
out:
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 4832cbceba5b..8536cec5f122 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -51,10 +51,13 @@ int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
u64 new_i_size, u64 zero_to);
int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
loff_t zero_to);
-int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
-int ocfs2_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags);
-int ocfs2_permission(struct inode *inode, int mask);
+int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
+int ocfs2_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags);
+int ocfs2_permission(struct user_namespace *mnt_userns,
+ struct inode *inode,
+ int mask);
int ocfs2_should_update_atime(struct inode *inode,
struct vfsmount *vfsmnt);
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 89984172fc4a..50c9b30ee9f6 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -96,7 +96,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
}
status = -EACCES;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
goto bail_unlock;
if (!S_ISDIR(inode->i_mode))
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 2a237ab00453..3abdd36da2e2 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -198,7 +198,7 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
* callers. */
if (S_ISDIR(mode))
set_nlink(inode, 2);
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
status = dquot_initialize(inode);
if (status)
return ERR_PTR(status);
@@ -221,7 +221,8 @@ static void ocfs2_cleanup_add_entry_failure(struct ocfs2_super *osb,
iput(inode);
}
-static int ocfs2_mknod(struct inode *dir,
+static int ocfs2_mknod(struct user_namespace *mnt_userns,
+ struct inode *dir,
struct dentry *dentry,
umode_t mode,
dev_t dev)
@@ -645,7 +646,8 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
return status;
}
-static int ocfs2_mkdir(struct inode *dir,
+static int ocfs2_mkdir(struct user_namespace *mnt_userns,
+ struct inode *dir,
struct dentry *dentry,
umode_t mode)
{
@@ -653,14 +655,15 @@ static int ocfs2_mkdir(struct inode *dir,
trace_ocfs2_mkdir(dir, dentry, dentry->d_name.len, dentry->d_name.name,
OCFS2_I(dir)->ip_blkno, mode);
- ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0);
+ ret = ocfs2_mknod(&init_user_ns, dir, dentry, mode | S_IFDIR, 0);
if (ret)
mlog_errno(ret);
return ret;
}
-static int ocfs2_create(struct inode *dir,
+static int ocfs2_create(struct user_namespace *mnt_userns,
+ struct inode *dir,
struct dentry *dentry,
umode_t mode,
bool excl)
@@ -669,7 +672,7 @@ static int ocfs2_create(struct inode *dir,
trace_ocfs2_create(dir, dentry, dentry->d_name.len, dentry->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno, mode);
- ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0);
+ ret = ocfs2_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
if (ret)
mlog_errno(ret);
@@ -1195,7 +1198,8 @@ static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2)
ocfs2_inode_unlock(inode2, 1);
}
-static int ocfs2_rename(struct inode *old_dir,
+static int ocfs2_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
@@ -1784,7 +1788,8 @@ bail:
return status;
}
-static int ocfs2_symlink(struct inode *dir,
+static int ocfs2_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir,
struct dentry *dentry,
const char *symname)
{
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3b397fa9c9e8..c19a463fac55 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -978,7 +978,7 @@ static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
return 0;
}
- if (!eb || (eb && !eb->h_next_leaf_blk)) {
+ if (!eb || !eb->h_next_leaf_blk) {
/*
* We are the last extent rec, so any high cpos should
* be stored in this leaf refcount block.
@@ -4346,7 +4346,7 @@ static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
- return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ return inode_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
}
/**
@@ -4400,7 +4400,7 @@ static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
* file.
*/
if (!preserve) {
- error = inode_permission(inode, MAY_READ);
+ error = inode_permission(&init_user_ns, inode, MAY_READ);
if (error)
return error;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 2febc76e9de7..079f8826993e 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -973,8 +973,6 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
* quota files */
dquot_disable(sb, type, DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED);
- if (!inode)
- continue;
iput(inode);
}
}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 9ccd19d8f7b1..36ae47a4aef6 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7249,6 +7249,7 @@ static int ocfs2_xattr_security_get(const struct xattr_handler *handler,
}
static int ocfs2_xattr_security_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -7321,6 +7322,7 @@ static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler,
}
static int ocfs2_xattr_trusted_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -7351,6 +7353,7 @@ static int ocfs2_xattr_user_get(const struct xattr_handler *handler,
}
static int ocfs2_xattr_user_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index a0f45651f3b7..c219f91f44e9 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -279,13 +279,14 @@ out_free_inode:
return err;
}
-static int omfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int omfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
return omfs_add_node(dir, dentry, mode | S_IFDIR);
}
-static int omfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int omfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
return omfs_add_node(dir, dentry, mode | S_IFREG);
}
@@ -369,9 +370,9 @@ static bool omfs_fill_chain(struct inode *dir, struct dir_context *ctx,
return true;
}
-static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int omfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct inode *new_inode = d_inode(new_dentry);
struct inode *old_inode = d_inode(old_dentry);
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 2c7b70ee1388..11e733aab25d 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -343,12 +343,13 @@ const struct file_operations omfs_file_operations = {
.splice_read = generic_file_splice_read,
};
-static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
+static int omfs_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
@@ -361,7 +362,7 @@ static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
omfs_truncate(inode);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index ce93ccca8639..2a0e83236c01 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -48,7 +48,7 @@ struct inode *omfs_new_inode(struct inode *dir, umode_t mode)
goto fail;
inode->i_ino = new_block;
- inode_init_owner(inode, NULL, mode);
+ inode_init_owner(&init_user_ns, inode, NULL, mode);
inode->i_mapping->a_ops = &omfs_aops;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
diff --git a/fs/open.c b/fs/open.c
index 1e06e443a565..e53af13b5835 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -35,8 +35,8 @@
#include "internal.h"
-int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
- struct file *filp)
+int do_truncate(struct user_namespace *mnt_userns, struct dentry *dentry,
+ loff_t length, unsigned int time_attrs, struct file *filp)
{
int ret;
struct iattr newattrs;
@@ -61,13 +61,14 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
inode_lock(dentry->d_inode);
/* Note any delegations or leases have already been broken: */
- ret = notify_change(dentry, &newattrs, NULL);
+ ret = notify_change(mnt_userns, dentry, &newattrs, NULL);
inode_unlock(dentry->d_inode);
return ret;
}
long vfs_truncate(const struct path *path, loff_t length)
{
+ struct user_namespace *mnt_userns;
struct inode *inode;
long error;
@@ -83,7 +84,8 @@ long vfs_truncate(const struct path *path, loff_t length)
if (error)
goto out;
- error = inode_permission(inode, MAY_WRITE);
+ mnt_userns = mnt_user_ns(path->mnt);
+ error = inode_permission(mnt_userns, inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
@@ -107,7 +109,7 @@ long vfs_truncate(const struct path *path, loff_t length)
if (!error)
error = security_path_truncate(path);
if (!error)
- error = do_truncate(path->dentry, length, 0, NULL);
+ error = do_truncate(mnt_userns, path->dentry, length, 0, NULL);
put_write_and_out:
put_write_access(inode);
@@ -186,13 +188,13 @@ long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
/* Check IS_APPEND on real upper inode */
if (IS_APPEND(file_inode(f.file)))
goto out_putf;
-
sb_start_write(inode->i_sb);
error = locks_verify_truncate(inode, f.file, length);
if (!error)
error = security_path_truncate(&f.file->f_path);
if (!error)
- error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
+ error = do_truncate(file_mnt_user_ns(f.file), dentry, length,
+ ATTR_MTIME | ATTR_CTIME, f.file);
sb_end_write(inode->i_sb);
out_putf:
fdput(f);
@@ -436,7 +438,7 @@ retry:
goto out_path_release;
}
- res = inode_permission(inode, mode | MAY_ACCESS);
+ res = inode_permission(mnt_user_ns(path.mnt), inode, mode | MAY_ACCESS);
/* SuS v2 requires we report a read only fs too */
if (res || !(mode & S_IWOTH) || special_file(inode->i_mode))
goto out_path_release;
@@ -492,7 +494,7 @@ retry:
if (error)
goto out;
- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+ error = path_permission(&path, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
@@ -521,7 +523,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
if (!d_can_lookup(f.file->f_path.dentry))
goto out_putf;
- error = inode_permission(file_inode(f.file), MAY_EXEC | MAY_CHDIR);
+ error = file_permission(f.file, MAY_EXEC | MAY_CHDIR);
if (!error)
set_fs_pwd(current->fs, &f.file->f_path);
out_putf:
@@ -540,7 +542,7 @@ retry:
if (error)
goto out;
- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+ error = path_permission(&path, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
@@ -580,7 +582,8 @@ retry_deleg:
goto out_unlock;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- error = notify_change(path->dentry, &newattrs, &delegated_inode);
+ error = notify_change(mnt_user_ns(path->mnt), path->dentry,
+ &newattrs, &delegated_inode);
out_unlock:
inode_unlock(inode);
if (delegated_inode) {
@@ -641,6 +644,7 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode)
int chown_common(const struct path *path, uid_t user, gid_t group)
{
+ struct user_namespace *mnt_userns;
struct inode *inode = path->dentry->d_inode;
struct inode *delegated_inode = NULL;
int error;
@@ -651,6 +655,10 @@ int chown_common(const struct path *path, uid_t user, gid_t group)
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
+ mnt_userns = mnt_user_ns(path->mnt);
+ uid = kuid_from_mnt(mnt_userns, uid);
+ gid = kgid_from_mnt(mnt_userns, gid);
+
retry_deleg:
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
@@ -671,7 +679,8 @@ retry_deleg:
inode_lock(inode);
error = security_path_chown(path, uid, gid);
if (!error)
- error = notify_change(path->dentry, &newattrs, &delegated_inode);
+ error = notify_change(mnt_userns, path->dentry, &newattrs,
+ &delegated_inode);
inode_unlock(inode);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
@@ -1091,6 +1100,12 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op)
lookup_flags |= LOOKUP_BENEATH;
if (how->resolve & RESOLVE_IN_ROOT)
lookup_flags |= LOOKUP_IN_ROOT;
+ if (how->resolve & RESOLVE_CACHED) {
+ /* Don't bother even trying for create/truncate/tmpfile open */
+ if (flags & (O_TRUNC | O_CREAT | O_TMPFILE))
+ return -EAGAIN;
+ lookup_flags |= LOOKUP_CACHED;
+ }
op->lookup_flags = lookup_flags;
return 0;
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
index a25e6c890975..18852b9ed82b 100644
--- a/fs/orangefs/acl.c
+++ b/fs/orangefs/acl.c
@@ -116,7 +116,8 @@ out:
return error;
}
-int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int orangefs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int error;
struct iattr iattr;
@@ -132,7 +133,8 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
* and "mode" to the new desired value. It is up to
* us to propagate the new mode back to the server...
*/
- error = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
+ error = posix_acl_update_mode(&init_user_ns, inode,
+ &iattr.ia_mode, &acl);
if (error) {
gossip_err("%s: posix_acl_update_mode err: %d\n",
__func__,
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index ec8ae4257975..9b28a7132466 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -487,10 +487,7 @@ static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
gossip_debug(GOSSIP_FILE_DEBUG,
- "orangefs_file_mmap: called on %s\n",
- (file ?
- (char *)file->f_path.dentry->d_name.name :
- (char *)"Unknown"));
+ "orangefs_file_mmap: called on %pD\n", file);
/* set the sequential readahead hint */
vma->vm_flags |= VM_SEQ_READ;
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 48f0547d4850..5079cfafa8d7 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -855,13 +855,13 @@ again:
ORANGEFS_I(inode)->attr_uid = current_fsuid();
ORANGEFS_I(inode)->attr_gid = current_fsgid();
}
- setattr_copy(inode, iattr);
+ setattr_copy(&init_user_ns, inode, iattr);
spin_unlock(&inode->i_lock);
mark_inode_dirty(inode);
if (iattr->ia_valid & ATTR_MODE)
/* change mod on a file that has ACLs */
- ret = posix_acl_chmod(inode, inode->i_mode);
+ ret = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
ret = 0;
out:
@@ -871,12 +871,13 @@ out:
/*
* Change attributes of an object referenced by dentry.
*/
-int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
+int orangefs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
int ret;
gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n",
dentry);
- ret = setattr_prepare(dentry, iattr);
+ ret = setattr_prepare(&init_user_ns, dentry, iattr);
if (ret)
goto out;
ret = __orangefs_setattr(d_inode(dentry), iattr);
@@ -890,8 +891,8 @@ out:
/*
* Obtain attributes of an object given a dentry
*/
-int orangefs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int orangefs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
int ret;
struct inode *inode = path->dentry->d_inode;
@@ -903,7 +904,7 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
ret = orangefs_inode_getattr(inode,
request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
if (ret == 0) {
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
/* override block size reported to stat */
if (!(request_mask & STATX_SIZE))
@@ -919,7 +920,8 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
return ret;
}
-int orangefs_permission(struct inode *inode, int mask)
+int orangefs_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
int ret;
@@ -933,7 +935,7 @@ int orangefs_permission(struct inode *inode, int mask)
if (ret < 0)
return ret;
- return generic_permission(inode, mask);
+ return generic_permission(&init_user_ns, inode, mask);
}
int orangefs_update_time(struct inode *inode, struct timespec64 *time, int flags)
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 3e7cf3d0a494..600e8eee541f 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -15,7 +15,8 @@
/*
* Get a newly allocated inode to go with a negative dentry.
*/
-static int orangefs_create(struct inode *dir,
+static int orangefs_create(struct user_namespace *mnt_userns,
+ struct inode *dir,
struct dentry *dentry,
umode_t mode,
bool exclusive)
@@ -215,7 +216,8 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
return ret;
}
-static int orangefs_symlink(struct inode *dir,
+static int orangefs_symlink(struct user_namespace *mnt_userns,
+ struct inode *dir,
struct dentry *dentry,
const char *symname)
{
@@ -303,7 +305,8 @@ out:
return ret;
}
-static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int orangefs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct orangefs_inode_s *parent = ORANGEFS_I(dir);
struct orangefs_kernel_op_s *new_op;
@@ -372,7 +375,8 @@ out:
return ret;
}
-static int orangefs_rename(struct inode *old_dir,
+static int orangefs_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index e12aeb9623d6..0e6b97682e41 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -107,7 +107,9 @@ extern int orangefs_init_acl(struct inode *inode, struct inode *dir);
extern const struct xattr_handler *orangefs_xattr_handlers[];
extern struct posix_acl *orangefs_get_acl(struct inode *inode, int type);
-extern int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int orangefs_set_acl(struct user_namespace *mnt_userns,
+ struct inode *inode, struct posix_acl *acl,
+ int type);
/*
* orangefs data structures
@@ -359,12 +361,13 @@ struct inode *orangefs_new_inode(struct super_block *sb,
struct orangefs_object_kref *ref);
int __orangefs_setattr(struct inode *, struct iattr *);
-int orangefs_setattr(struct dentry *, struct iattr *);
+int orangefs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
-int orangefs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags);
+int orangefs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags);
-int orangefs_permission(struct inode *inode, int mask);
+int orangefs_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask);
int orangefs_update_time(struct inode *, struct timespec64 *, int);
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index bdc285aea360..9a5b757fbd2f 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -526,6 +526,7 @@ out_unlock:
}
static int orangefs_xattr_set_default(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *unused,
struct inode *inode,
const char *name,
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 0fed532efa68..0b2891c6c71e 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -93,9 +93,9 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
continue; /* Discard */
}
retry:
- size = vfs_getxattr(old, name, value, value_size);
+ size = vfs_getxattr(&init_user_ns, old, name, value, value_size);
if (size == -ERANGE)
- size = vfs_getxattr(old, name, NULL, 0);
+ size = vfs_getxattr(&init_user_ns, old, name, NULL, 0);
if (size < 0) {
error = size;
@@ -115,7 +115,7 @@ retry:
goto retry;
}
- error = vfs_setxattr(new, name, value, size, 0);
+ error = vfs_setxattr(&init_user_ns, new, name, value, size, 0);
if (error) {
if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
break;
@@ -236,7 +236,7 @@ static int ovl_set_size(struct dentry *upperdentry, struct kstat *stat)
.ia_size = stat->size,
};
- return notify_change(upperdentry, &attr, NULL);
+ return notify_change(&init_user_ns, upperdentry, &attr, NULL);
}
static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
@@ -248,7 +248,7 @@ static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
.ia_mtime = stat->mtime,
};
- return notify_change(upperdentry, &attr, NULL);
+ return notify_change(&init_user_ns, upperdentry, &attr, NULL);
}
int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
@@ -260,7 +260,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
.ia_valid = ATTR_MODE,
.ia_mode = stat->mode,
};
- err = notify_change(upperdentry, &attr, NULL);
+ err = notify_change(&init_user_ns, upperdentry, &attr, NULL);
}
if (!err) {
struct iattr attr = {
@@ -268,7 +268,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
.ia_uid = stat->uid,
.ia_gid = stat->gid,
};
- err = notify_change(upperdentry, &attr, NULL);
+ err = notify_change(&init_user_ns, upperdentry, &attr, NULL);
}
if (!err)
ovl_set_timestamps(upperdentry, stat);
@@ -796,7 +796,7 @@ static ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value)
ssize_t res;
char *buf;
- res = vfs_getxattr(dentry, name, NULL, 0);
+ res = vfs_getxattr(&init_user_ns, dentry, name, NULL, 0);
if (res == -ENODATA || res == -EOPNOTSUPP)
res = 0;
@@ -805,7 +805,7 @@ static ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value)
if (!buf)
return -ENOMEM;
- res = vfs_getxattr(dentry, name, buf, res);
+ res = vfs_getxattr(&init_user_ns, dentry, name, buf, res);
if (res < 0)
kfree(buf);
else
@@ -847,8 +847,8 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
* don't want that to happen for normal copy-up operation.
*/
if (capability) {
- err = vfs_setxattr(upperpath.dentry, XATTR_NAME_CAPS,
- capability, cap_size, 0);
+ err = vfs_setxattr(&init_user_ns, upperpath.dentry,
+ XATTR_NAME_CAPS, capability, cap_size, 0);
if (err)
goto out_free;
}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index d1efa3a5a503..836f14b9d3a6 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -449,7 +449,7 @@ static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
if (err < 0)
goto out_free;
- err = vfs_setxattr(upperdentry, name, buffer, size, XATTR_CREATE);
+ err = vfs_setxattr(&init_user_ns, upperdentry, name, buffer, size, XATTR_CREATE);
out_free:
kfree(buffer);
return err;
@@ -508,7 +508,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
.ia_mode = cattr->mode,
};
inode_lock(newdentry->d_inode);
- err = notify_change(newdentry, &attr, NULL);
+ err = notify_change(&init_user_ns, newdentry, &attr, NULL);
inode_unlock(newdentry->d_inode);
if (err)
goto out_cleanup;
@@ -636,7 +636,7 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
inode->i_state |= I_CREATING;
spin_unlock(&inode->i_lock);
- inode_init_owner(inode, dentry->d_parent->d_inode, mode);
+ inode_init_owner(&init_user_ns, inode, dentry->d_parent->d_inode, mode);
attr.mode = inode->i_mode;
err = ovl_create_or_link(dentry, inode, &attr, false);
@@ -650,19 +650,20 @@ out:
return err;
}
-static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int ovl_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
}
-static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int ovl_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
}
-static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t rdev)
+static int ovl_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
/* Don't allow creation of "whiteout" on overlay */
if (S_ISCHR(mode) && rdev == WHITEOUT_DEV)
@@ -671,8 +672,8 @@ static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
return ovl_create_object(dentry, mode, rdev, NULL);
}
-static int ovl_symlink(struct inode *dir, struct dentry *dentry,
- const char *link)
+static int ovl_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *link)
{
return ovl_create_object(dentry, S_IFLNK, 0, link);
}
@@ -821,9 +822,9 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir,
goto out_dput_upper;
if (is_dir)
- err = vfs_rmdir(dir, upper);
+ err = vfs_rmdir(&init_user_ns, dir, upper);
else
- err = vfs_unlink(dir, upper, NULL);
+ err = vfs_unlink(&init_user_ns, dir, upper, NULL);
ovl_dir_modified(dentry->d_parent, ovl_type_origin(dentry));
/*
@@ -1069,9 +1070,9 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
return err;
}
-static int ovl_rename(struct inode *olddir, struct dentry *old,
- struct inode *newdir, struct dentry *new,
- unsigned int flags)
+static int ovl_rename(struct user_namespace *mnt_userns, struct inode *olddir,
+ struct dentry *old, struct inode *newdir,
+ struct dentry *new, unsigned int flags)
{
int err;
struct dentry *old_upperdir;
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 077d3ad343f6..dbfb35fb0ff7 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -50,11 +50,11 @@ static struct file *ovl_open_realfile(const struct file *file,
acc_mode |= MAY_APPEND;
old_cred = ovl_override_creds(inode->i_sb);
- err = inode_permission(realinode, MAY_OPEN | acc_mode);
+ err = inode_permission(&init_user_ns, realinode, MAY_OPEN | acc_mode);
if (err) {
realfile = ERR_PTR(err);
} else {
- if (!inode_owner_or_capable(realinode))
+ if (!inode_owner_or_capable(&init_user_ns, realinode))
flags &= ~O_NOATIME;
realfile = open_with_fake_path(&file->f_path, flags, realinode,
@@ -521,7 +521,7 @@ static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd,
long ret;
struct inode *inode = file_inode(file);
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
ret = mnt_want_write_file(file);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index cf41bcb664bc..003cf83bf78a 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -14,14 +14,15 @@
#include "overlayfs.h"
-int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
int err;
bool full_copy_up = false;
struct dentry *upperdentry;
const struct cred *old_cred;
- err = setattr_prepare(dentry, attr);
+ err = setattr_prepare(&init_user_ns, dentry, attr);
if (err)
return err;
@@ -79,7 +80,7 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
inode_lock(upperdentry->d_inode);
old_cred = ovl_override_creds(dentry->d_sb);
- err = notify_change(upperdentry, attr, NULL);
+ err = notify_change(&init_user_ns, upperdentry, attr, NULL);
revert_creds(old_cred);
if (!err)
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
@@ -154,8 +155,8 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
return 0;
}
-int ovl_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int ovl_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
enum ovl_path_type type;
@@ -277,7 +278,8 @@ out:
return err;
}
-int ovl_permission(struct inode *inode, int mask)
+int ovl_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
struct inode *upperinode = ovl_inode_upper(inode);
struct inode *realinode = upperinode ?: ovl_inode_lower(inode);
@@ -294,7 +296,7 @@ int ovl_permission(struct inode *inode, int mask)
* Check overlay inode with the creds of task and underlying inode
* with creds of mounter
*/
- err = generic_permission(inode, mask);
+ err = generic_permission(&init_user_ns, inode, mask);
if (err)
return err;
@@ -305,7 +307,7 @@ int ovl_permission(struct inode *inode, int mask)
/* Make sure mounter can read file for copy up later */
mask |= MAY_READ;
}
- err = inode_permission(realinode, mask);
+ err = inode_permission(&init_user_ns, realinode, mask);
revert_creds(old_cred);
return err;
@@ -353,7 +355,7 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
if (!value && !upperdentry) {
old_cred = ovl_override_creds(dentry->d_sb);
- err = vfs_getxattr(realdentry, name, NULL, 0);
+ err = vfs_getxattr(&init_user_ns, realdentry, name, NULL, 0);
revert_creds(old_cred);
if (err < 0)
goto out_drop_write;
@@ -369,10 +371,11 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
old_cred = ovl_override_creds(dentry->d_sb);
if (value)
- err = vfs_setxattr(realdentry, name, value, size, flags);
+ err = vfs_setxattr(&init_user_ns, realdentry, name, value, size,
+ flags);
else {
WARN_ON(flags != XATTR_REPLACE);
- err = vfs_removexattr(realdentry, name);
+ err = vfs_removexattr(&init_user_ns, realdentry, name);
}
revert_creds(old_cred);
@@ -394,7 +397,7 @@ int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
ovl_i_dentry_upper(inode) ?: ovl_dentry_lower(dentry);
old_cred = ovl_override_creds(dentry->d_sb);
- res = vfs_getxattr(realdentry, name, value, size);
+ res = vfs_getxattr(&init_user_ns, realdentry, name, value, size);
revert_creds(old_cred);
return res;
}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index cb4e2d60ecf9..95cff83786a5 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -123,7 +123,7 @@ static inline const char *ovl_xattr(struct ovl_fs *ofs, enum ovl_xattr ox)
static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
{
- int err = vfs_rmdir(dir, dentry);
+ int err = vfs_rmdir(&init_user_ns, dir, dentry);
pr_debug("rmdir(%pd2) = %i\n", dentry, err);
return err;
@@ -131,7 +131,7 @@ static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry)
{
- int err = vfs_unlink(dir, dentry, NULL);
+ int err = vfs_unlink(&init_user_ns, dir, dentry, NULL);
pr_debug("unlink(%pd2) = %i\n", dentry, err);
return err;
@@ -140,7 +140,7 @@ static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry)
static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
- int err = vfs_link(old_dentry, dir, new_dentry, NULL);
+ int err = vfs_link(old_dentry, &init_user_ns, dir, new_dentry, NULL);
pr_debug("link(%pd2, %pd2) = %i\n", old_dentry, new_dentry, err);
return err;
@@ -149,7 +149,7 @@ static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir,
static inline int ovl_do_create(struct inode *dir, struct dentry *dentry,
umode_t mode)
{
- int err = vfs_create(dir, dentry, mode, true);
+ int err = vfs_create(&init_user_ns, dir, dentry, mode, true);
pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err);
return err;
@@ -158,7 +158,7 @@ static inline int ovl_do_create(struct inode *dir, struct dentry *dentry,
static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry,
umode_t mode)
{
- int err = vfs_mkdir(dir, dentry, mode);
+ int err = vfs_mkdir(&init_user_ns, dir, dentry, mode);
pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err);
return err;
}
@@ -166,7 +166,7 @@ static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry,
static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t dev)
{
- int err = vfs_mknod(dir, dentry, mode, dev);
+ int err = vfs_mknod(&init_user_ns, dir, dentry, mode, dev);
pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n", dentry, mode, dev, err);
return err;
@@ -175,7 +175,7 @@ static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry,
static inline int ovl_do_symlink(struct inode *dir, struct dentry *dentry,
const char *oldname)
{
- int err = vfs_symlink(dir, dentry, oldname);
+ int err = vfs_symlink(&init_user_ns, dir, dentry, oldname);
pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err);
return err;
@@ -186,7 +186,7 @@ static inline ssize_t ovl_do_getxattr(struct ovl_fs *ofs, struct dentry *dentry,
size_t size)
{
const char *name = ovl_xattr(ofs, ox);
- return vfs_getxattr(dentry, name, value, size);
+ return vfs_getxattr(&init_user_ns, dentry, name, value, size);
}
static inline int ovl_do_setxattr(struct ovl_fs *ofs, struct dentry *dentry,
@@ -194,7 +194,7 @@ static inline int ovl_do_setxattr(struct ovl_fs *ofs, struct dentry *dentry,
size_t size)
{
const char *name = ovl_xattr(ofs, ox);
- int err = vfs_setxattr(dentry, name, value, size, 0);
+ int err = vfs_setxattr(&init_user_ns, dentry, name, value, size, 0);
pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0) = %i\n",
dentry, name, min((int)size, 48), value, size, err);
return err;
@@ -204,7 +204,7 @@ static inline int ovl_do_removexattr(struct ovl_fs *ofs, struct dentry *dentry,
enum ovl_xattr ox)
{
const char *name = ovl_xattr(ofs, ox);
- int err = vfs_removexattr(dentry, name);
+ int err = vfs_removexattr(&init_user_ns, dentry, name);
pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err);
return err;
}
@@ -214,9 +214,18 @@ static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry,
unsigned int flags)
{
int err;
+ struct renamedata rd = {
+ .old_mnt_userns = &init_user_ns,
+ .old_dir = olddir,
+ .old_dentry = olddentry,
+ .new_mnt_userns = &init_user_ns,
+ .new_dir = newdir,
+ .new_dentry = newdentry,
+ .flags = flags,
+ };
pr_debug("rename(%pd2, %pd2, 0x%x)\n", olddentry, newdentry, flags);
- err = vfs_rename(olddir, olddentry, newdir, newdentry, NULL, flags);
+ err = vfs_rename(&rd);
if (err) {
pr_debug("...rename(%pd2, %pd2, ...) = %i\n",
olddentry, newdentry, err);
@@ -226,14 +235,14 @@ static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry,
static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
{
- int err = vfs_whiteout(dir, dentry);
+ int err = vfs_whiteout(&init_user_ns, dir, dentry);
pr_debug("whiteout(%pd2) = %i\n", dentry, err);
return err;
}
static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode)
{
- struct dentry *ret = vfs_tmpfile(dentry, mode, 0);
+ struct dentry *ret = vfs_tmpfile(&init_user_ns, dentry, mode, 0);
int err = PTR_ERR_OR_ZERO(ret);
pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err);
@@ -436,10 +445,12 @@ int ovl_set_nlink_lower(struct dentry *dentry);
unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry,
struct dentry *upperdentry,
unsigned int fallback);
-int ovl_setattr(struct dentry *dentry, struct iattr *attr);
-int ovl_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags);
-int ovl_permission(struct inode *inode, int mask);
+int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
+int ovl_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags);
+int ovl_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask);
int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
const void *value, size_t size, int flags);
int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index d58b8f2bf9d0..fdd72f1a9c5e 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -803,17 +803,19 @@ retry:
* allowed as upper are limited to "normal" ones, where checking
* for the above two errors is sufficient.
*/
- err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
+ err = vfs_removexattr(&init_user_ns, work,
+ XATTR_NAME_POSIX_ACL_DEFAULT);
if (err && err != -ENODATA && err != -EOPNOTSUPP)
goto out_dput;
- err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
+ err = vfs_removexattr(&init_user_ns, work,
+ XATTR_NAME_POSIX_ACL_ACCESS);
if (err && err != -ENODATA && err != -EOPNOTSUPP)
goto out_dput;
/* Clear any inherited mode bits */
inode_lock(work->d_inode);
- err = notify_change(work, &attr, NULL);
+ err = notify_change(&init_user_ns, work, &attr, NULL);
inode_unlock(work->d_inode);
if (err)
goto out_dput;
@@ -865,6 +867,10 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
pr_err("filesystem on '%s' not supported\n", name);
goto out_put;
}
+ if (mnt_user_ns(path->mnt) != &init_user_ns) {
+ pr_err("idmapped layers are currently not supported\n");
+ goto out_put;
+ }
if (!d_is_dir(path->dentry)) {
pr_err("'%s' not a directory\n", name);
goto out_put;
@@ -989,6 +995,7 @@ ovl_posix_acl_xattr_get(const struct xattr_handler *handler,
static int __maybe_unused
ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -1014,7 +1021,7 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
goto out_acl_release;
}
err = -EPERM;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
goto out_acl_release;
posix_acl_release(acl);
@@ -1026,10 +1033,10 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
if (unlikely(inode->i_mode & S_ISGID) &&
handler->flags == ACL_TYPE_ACCESS &&
!in_group_p(inode->i_gid) &&
- !capable_wrt_inode_uidgid(inode, CAP_FSETID)) {
+ !capable_wrt_inode_uidgid(&init_user_ns, inode, CAP_FSETID)) {
struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
- err = ovl_setattr(dentry, &iattr);
+ err = ovl_setattr(&init_user_ns, dentry, &iattr);
if (err)
return err;
}
@@ -1053,6 +1060,7 @@ static int ovl_own_xattr_get(const struct xattr_handler *handler,
}
static int ovl_own_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
@@ -1068,6 +1076,7 @@ static int ovl_other_xattr_get(const struct xattr_handler *handler,
}
static int ovl_other_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 9826b003f1d2..7f5a01a11f97 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -479,12 +479,12 @@ struct file *ovl_path_open(struct path *path, int flags)
BUG();
}
- err = inode_permission(inode, acc_mode | MAY_OPEN);
+ err = inode_permission(&init_user_ns, inode, acc_mode | MAY_OPEN);
if (err)
return ERR_PTR(err);
/* O_NOATIME is an optimization, don't fail if not permitted */
- if (inode_owner_or_capable(inode))
+ if (inode_owner_or_capable(&init_user_ns, inode))
flags |= O_NOATIME;
return dentry_open(path, flags, current_cred());
diff --git a/fs/pipe.c b/fs/pipe.c
index 39c96845a72f..bfd946a9ad01 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -171,7 +171,7 @@ EXPORT_SYMBOL(generic_pipe_buf_try_steal);
*
* Description:
* This function grabs an extra reference to @buf. It's used in
- * in the tee() system call, when we duplicate the buffers in one
+ * the tee() system call, when we duplicate the buffers in one
* pipe into another.
*/
bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
diff --git a/fs/pnode.h b/fs/pnode.h
index 26f74e092bd9..988f1aa9b02a 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -12,7 +12,7 @@
#define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
#define IS_MNT_SLAVE(m) ((m)->mnt_master)
-#define IS_MNT_NEW(m) (!(m)->mnt_ns)
+#define IS_MNT_NEW(m) (!(m)->mnt_ns || is_anon_ns((m)->mnt_ns))
#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 95882b3f5f62..f3309a7edb49 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -345,10 +345,13 @@ EXPORT_SYMBOL(posix_acl_from_mode);
* by the acl. Returns -E... otherwise.
*/
int
-posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want)
+posix_acl_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct posix_acl *acl, int want)
{
const struct posix_acl_entry *pa, *pe, *mask_obj;
int found = 0;
+ kuid_t uid;
+ kgid_t gid;
want &= MAY_READ | MAY_WRITE | MAY_EXEC;
@@ -356,22 +359,26 @@ posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want)
switch(pa->e_tag) {
case ACL_USER_OBJ:
/* (May have been checked already) */
- if (uid_eq(inode->i_uid, current_fsuid()))
+ uid = i_uid_into_mnt(mnt_userns, inode);
+ if (uid_eq(uid, current_fsuid()))
goto check_perm;
break;
case ACL_USER:
- if (uid_eq(pa->e_uid, current_fsuid()))
+ uid = kuid_into_mnt(mnt_userns, pa->e_uid);
+ if (uid_eq(uid, current_fsuid()))
goto mask;
break;
case ACL_GROUP_OBJ:
- if (in_group_p(inode->i_gid)) {
+ gid = i_gid_into_mnt(mnt_userns, inode);
+ if (in_group_p(gid)) {
found = 1;
if ((pa->e_perm & want) == want)
goto mask;
}
break;
case ACL_GROUP:
- if (in_group_p(pa->e_gid)) {
+ gid = kgid_into_mnt(mnt_userns, pa->e_gid);
+ if (in_group_p(gid)) {
found = 1;
if ((pa->e_perm & want) == want)
goto mask;
@@ -551,8 +558,22 @@ __posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode)
}
EXPORT_SYMBOL(__posix_acl_chmod);
+/**
+ * posix_acl_chmod - chmod a posix acl
+ *
+ * @mnt_userns: user namespace of the mount @inode was found from
+ * @inode: inode to check permissions on
+ * @mode: the new mode of @inode
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ */
int
-posix_acl_chmod(struct inode *inode, umode_t mode)
+ posix_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode,
+ umode_t mode)
{
struct posix_acl *acl;
int ret = 0;
@@ -572,7 +593,7 @@ posix_acl_chmod(struct inode *inode, umode_t mode)
ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode);
if (ret)
return ret;
- ret = inode->i_op->set_acl(inode, acl, ACL_TYPE_ACCESS);
+ ret = inode->i_op->set_acl(mnt_userns, inode, acl, ACL_TYPE_ACCESS);
posix_acl_release(acl);
return ret;
}
@@ -631,9 +652,10 @@ EXPORT_SYMBOL_GPL(posix_acl_create);
/**
* posix_acl_update_mode - update mode in set_acl
- * @inode: target inode
- * @mode_p: mode (pointer) for update
- * @acl: acl pointer
+ * @mnt_userns: user namespace of the mount @inode was found from
+ * @inode: target inode
+ * @mode_p: mode (pointer) for update
+ * @acl: acl pointer
*
* Update the file mode when setting an ACL: compute the new file permission
* bits based on the ACL. In addition, if the ACL is equivalent to the new
@@ -642,9 +664,16 @@ EXPORT_SYMBOL_GPL(posix_acl_create);
* As with chmod, clear the setgid bit if the caller is not in the owning group
* or capable of CAP_FSETID (see inode_change_ok).
*
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before checking
+ * permissions. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
+ *
* Called from set_acl inode operations.
*/
-int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
+int posix_acl_update_mode(struct user_namespace *mnt_userns,
+ struct inode *inode, umode_t *mode_p,
struct posix_acl **acl)
{
umode_t mode = inode->i_mode;
@@ -655,8 +684,8 @@ int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
return error;
if (error == 0)
*acl = NULL;
- if (!in_group_p(inode->i_gid) &&
- !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ if (!in_group_p(i_gid_into_mnt(mnt_userns, inode)) &&
+ !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
mode &= ~S_ISGID;
*mode_p = mode;
return 0;
@@ -668,7 +697,8 @@ EXPORT_SYMBOL(posix_acl_update_mode);
*/
static void posix_acl_fix_xattr_userns(
struct user_namespace *to, struct user_namespace *from,
- void *value, size_t size)
+ struct user_namespace *mnt_userns,
+ void *value, size_t size, bool from_user)
{
struct posix_acl_xattr_header *header = value;
struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
@@ -693,10 +723,18 @@ static void posix_acl_fix_xattr_userns(
switch(le16_to_cpu(entry->e_tag)) {
case ACL_USER:
uid = make_kuid(from, le32_to_cpu(entry->e_id));
+ if (from_user)
+ uid = kuid_from_mnt(mnt_userns, uid);
+ else
+ uid = kuid_into_mnt(mnt_userns, uid);
entry->e_id = cpu_to_le32(from_kuid(to, uid));
break;
case ACL_GROUP:
gid = make_kgid(from, le32_to_cpu(entry->e_id));
+ if (from_user)
+ gid = kgid_from_mnt(mnt_userns, gid);
+ else
+ gid = kgid_into_mnt(mnt_userns, gid);
entry->e_id = cpu_to_le32(from_kgid(to, gid));
break;
default:
@@ -705,20 +743,24 @@ static void posix_acl_fix_xattr_userns(
}
}
-void posix_acl_fix_xattr_from_user(void *value, size_t size)
+void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+ void *value, size_t size)
{
struct user_namespace *user_ns = current_user_ns();
- if (user_ns == &init_user_ns)
+ if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
return;
- posix_acl_fix_xattr_userns(&init_user_ns, user_ns, value, size);
+ posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value,
+ size, true);
}
-void posix_acl_fix_xattr_to_user(void *value, size_t size)
+void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+ void *value, size_t size)
{
struct user_namespace *user_ns = current_user_ns();
- if (user_ns == &init_user_ns)
+ if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
return;
- posix_acl_fix_xattr_userns(user_ns, &init_user_ns, value, size);
+ posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value,
+ size, false);
}
/*
@@ -858,7 +900,8 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
}
int
-set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
+set_posix_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ int type, struct posix_acl *acl)
{
if (!IS_POSIXACL(inode))
return -EOPNOTSUPP;
@@ -867,7 +910,7 @@ set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(mnt_userns, inode))
return -EPERM;
if (acl) {
@@ -875,15 +918,16 @@ set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
if (ret)
return ret;
}
- return inode->i_op->set_acl(inode, acl, type);
+ return inode->i_op->set_acl(mnt_userns, inode, acl, type);
}
EXPORT_SYMBOL(set_posix_acl);
static int
posix_acl_xattr_set(const struct xattr_handler *handler,
- struct dentry *unused, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
+ struct user_namespace *mnt_userns,
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags)
{
struct posix_acl *acl = NULL;
int ret;
@@ -893,7 +937,7 @@ posix_acl_xattr_set(const struct xattr_handler *handler,
if (IS_ERR(acl))
return PTR_ERR(acl);
}
- ret = set_posix_acl(inode, handler->flags, acl);
+ ret = set_posix_acl(mnt_userns, inode, handler->flags, acl);
posix_acl_release(acl);
return ret;
}
@@ -922,12 +966,13 @@ const struct xattr_handler posix_acl_default_xattr_handler = {
};
EXPORT_SYMBOL_GPL(posix_acl_default_xattr_handler);
-int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int simple_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int error;
if (type == ACL_TYPE_ACCESS) {
- error = posix_acl_update_mode(inode,
+ error = posix_acl_update_mode(mnt_userns, inode,
&inode->i_mode, &acl);
if (error)
return error;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b3422cda2a91..3851bfcdba56 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -67,7 +67,6 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/rcupdate.h>
-#include <linux/kallsyms.h>
#include <linux/stacktrace.h>
#include <linux/resource.h>
#include <linux/module.h>
@@ -386,19 +385,17 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
unsigned long wchan;
- char symname[KSYM_NAME_LEN];
- if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
- goto print0;
+ if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ wchan = get_wchan(task);
+ else
+ wchan = 0;
- wchan = get_wchan(task);
- if (wchan && !lookup_symbol_name(wchan, symname)) {
- seq_puts(m, symname);
- return 0;
- }
+ if (wchan)
+ seq_printf(m, "%ps", (void *) wchan);
+ else
+ seq_putc(m, '0');
-print0:
- seq_putc(m, '0');
return 0;
}
#endif /* CONFIG_KALLSYMS */
@@ -685,7 +682,8 @@ static int proc_fd_access_allowed(struct inode *inode)
return allowed;
}
-int proc_setattr(struct dentry *dentry, struct iattr *attr)
+int proc_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
int error;
struct inode *inode = d_inode(dentry);
@@ -693,11 +691,11 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & ATTR_MODE)
return -EPERM;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -726,7 +724,8 @@ static bool has_pid_permissions(struct proc_fs_info *fs_info,
}
-static int proc_pid_permission(struct inode *inode, int mask)
+static int proc_pid_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct task_struct *task;
@@ -751,7 +750,7 @@ static int proc_pid_permission(struct inode *inode, int mask)
return -EPERM;
}
- return generic_permission(inode, mask);
+ return generic_permission(&init_user_ns, inode, mask);
}
@@ -1927,14 +1926,14 @@ out_unlock:
return NULL;
}
-int pid_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int query_flags)
+int pid_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct task_struct *task;
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
stat->uid = GLOBAL_ROOT_UID;
stat->gid = GLOBAL_ROOT_GID;
@@ -3473,7 +3472,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
* This function makes sure that the node is always accessible for members of
* same thread group.
*/
-static int proc_tid_comm_permission(struct inode *inode, int mask)
+static int proc_tid_comm_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
bool is_same_tgroup;
struct task_struct *task;
@@ -3492,7 +3492,7 @@ static int proc_tid_comm_permission(struct inode *inode, int mask)
return 0;
}
- return generic_permission(inode, mask);
+ return generic_permission(&init_user_ns, inode, mask);
}
static const struct inode_operations proc_tid_comm_inode_operations = {
@@ -3798,12 +3798,13 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
return 0;
}
-static int proc_task_getattr(const struct path *path, struct kstat *stat,
+static int proc_task_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
struct task_struct *p = get_proc_task(inode);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
if (p) {
stat->nlink += get_nr_threads(p);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index cb51763ed554..07fc4fad2602 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -276,12 +276,13 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
* /proc/pid/fd needs a special permission handler so that a process can still
* access /proc/self/fd after it has executed a setuid().
*/
-int proc_fd_permission(struct inode *inode, int mask)
+int proc_fd_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
struct task_struct *p;
int rv;
- rv = generic_permission(inode, mask);
+ rv = generic_permission(&init_user_ns, inode, mask);
if (rv == 0)
return rv;
diff --git a/fs/proc/fd.h b/fs/proc/fd.h
index f371a602bf58..c5a921a06a0b 100644
--- a/fs/proc/fd.h
+++ b/fs/proc/fd.h
@@ -10,7 +10,8 @@ extern const struct inode_operations proc_fd_inode_operations;
extern const struct file_operations proc_fdinfo_operations;
extern const struct inode_operations proc_fdinfo_inode_operations;
-extern int proc_fd_permission(struct inode *inode, int mask);
+extern int proc_fd_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask);
static inline unsigned int proc_fd(struct inode *inode)
{
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 6c0a05f55d6b..bc86aa87cc41 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -115,17 +115,18 @@ static bool pde_subdir_insert(struct proc_dir_entry *dir,
return true;
}
-static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
+static int proc_notify_change(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
struct proc_dir_entry *de = PDE(inode);
int error;
- error = setattr_prepare(dentry, iattr);
+ error = setattr_prepare(&init_user_ns, dentry, iattr);
if (error)
return error;
- setattr_copy(inode, iattr);
+ setattr_copy(&init_user_ns, inode, iattr);
mark_inode_dirty(inode);
proc_set_user(de, inode->i_uid, inode->i_gid);
@@ -133,7 +134,8 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
return 0;
}
-static int proc_getattr(const struct path *path, struct kstat *stat,
+static int proc_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
@@ -145,7 +147,7 @@ static int proc_getattr(const struct path *path, struct kstat *stat,
}
}
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
return 0;
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index f60b379dcdc7..03415f3fb3a8 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -162,8 +162,10 @@ extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
* base.c
*/
extern const struct dentry_operations pid_dentry_operations;
-extern int pid_getattr(const struct path *, struct kstat *, u32, unsigned int);
-extern int proc_setattr(struct dentry *, struct iattr *);
+extern int pid_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
+extern int proc_setattr(struct user_namespace *, struct dentry *,
+ struct iattr *);
extern void proc_pid_evict_inode(struct proc_inode *);
extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t);
extern void pid_update_inode(struct task_struct *, struct inode *);
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index d6fc74619625..6fa761c9cc78 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -129,15 +129,15 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
show_val_kb(m, "AnonHugePages: ",
- global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR);
+ global_node_page_state(NR_ANON_THPS));
show_val_kb(m, "ShmemHugePages: ",
- global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
+ global_node_page_state(NR_SHMEM_THPS));
show_val_kb(m, "ShmemPmdMapped: ",
- global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
+ global_node_page_state(NR_SHMEM_PMDMAPPED));
show_val_kb(m, "FileHugePages: ",
- global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
+ global_node_page_state(NR_FILE_THPS));
show_val_kb(m, "FilePmdMapped: ",
- global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
+ global_node_page_state(NR_FILE_PMDMAPPED));
#endif
#ifdef CONFIG_CMA
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 18601042af99..15c2e55d2ed2 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -289,7 +289,8 @@ static struct dentry *proc_tgid_net_lookup(struct inode *dir,
return de;
}
-static int proc_tgid_net_getattr(const struct path *path, struct kstat *stat,
+static int proc_tgid_net_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
@@ -297,7 +298,7 @@ static int proc_tgid_net_getattr(const struct path *path, struct kstat *stat,
net = get_proc_task_net(inode);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
if (net != NULL) {
stat->nlink = net->proc_net->nlink;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d2018f70d1fa..984e42f8cb11 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -571,7 +571,7 @@ static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter,
error = -ENOMEM;
if (count >= KMALLOC_MAX_SIZE)
goto out;
- kbuf = kzalloc(count + 1, GFP_KERNEL);
+ kbuf = kvzalloc(count + 1, GFP_KERNEL);
if (!kbuf)
goto out;
@@ -600,7 +600,7 @@ static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter,
error = count;
out_free_buf:
- kfree(kbuf);
+ kvfree(kbuf);
out:
sysctl_head_finish(head);
@@ -785,7 +785,8 @@ out:
return 0;
}
-static int proc_sys_permission(struct inode *inode, int mask)
+static int proc_sys_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask)
{
/*
* sysctl entries that are not writeable,
@@ -813,7 +814,8 @@ static int proc_sys_permission(struct inode *inode, int mask)
return error;
}
-static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
+static int proc_sys_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
@@ -821,16 +823,17 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
return -EPERM;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
-static int proc_sys_getattr(const struct path *path, struct kstat *stat,
+static int proc_sys_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
@@ -840,7 +843,7 @@ static int proc_sys_getattr(const struct path *path, struct kstat *stat,
if (IS_ERR(head))
return PTR_ERR(head);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
if (table)
stat->mode = (stat->mode & S_IFMT) | table->mode;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 5e444d4f9717..c7e3b1350ef8 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -308,10 +308,11 @@ void __init proc_root_init(void)
register_filesystem(&proc_fs_type);
}
-static int proc_root_getattr(const struct path *path, struct kstat *stat,
+static int proc_root_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
- generic_fillattr(d_inode(path->dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
stat->nlink = proc_root.nlink + nr_processes();
return 0;
}
diff --git a/fs/proc/self.c b/fs/proc/self.c
index cc71ce3466dc..72cd69bcaf4a 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -16,13 +16,6 @@ static const char *proc_self_get_link(struct dentry *dentry,
pid_t tgid = task_tgid_nr_ns(current, ns);
char *name;
- /*
- * Not currently supported. Once we can inherit all of struct pid,
- * we can allow this.
- */
- if (current->flags & PF_KTHREAD)
- return ERR_PTR(-EOPNOTSUPP);
-
if (!tgid)
return ERR_PTR(-ENOENT);
/* max length of unsigned int in decimal + NULL term */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 602e3a52884d..e862cab69583 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1036,8 +1036,6 @@ struct clear_refs_private {
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define is_cow_mapping(flags) (((flags) & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE)
-
static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
struct page *page;
@@ -1210,7 +1208,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
struct mm_struct *mm;
struct vm_area_struct *vma;
enum clear_refs_types type;
- struct mmu_gather tlb;
int itype;
int rv;
@@ -1249,7 +1246,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
goto out_unlock;
}
- tlb_gather_mmu(&tlb, mm, 0, -1);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1258,15 +1254,18 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
vma_set_page_prot(vma);
}
+ inc_tlb_flush_pending(mm);
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
0, NULL, mm, 0, -1UL);
mmu_notifier_invalidate_range_start(&range);
}
walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
&cp);
- if (type == CLEAR_REFS_SOFT_DIRTY)
+ if (type == CLEAR_REFS_SOFT_DIRTY) {
mmu_notifier_invalidate_range_end(&range);
- tlb_finish_mmu(&tlb, 0, -1);
+ flush_tlb_mm(mm);
+ dec_tlb_flush_pending(mm);
+ }
out_unlock:
mmap_write_unlock(mm);
out_mm:
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index c3a345c28a93..9a15334da208 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -1503,11 +1503,8 @@ int vmcore_add_device_dump(struct vmcoredd_data *data)
return 0;
out_err:
- if (buf)
- vfree(buf);
-
- if (dump)
- vfree(dump);
+ vfree(buf);
+ vfree(dump);
return ret;
}
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index eafb75755fa3..392ef5162655 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -79,6 +79,9 @@ static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
if (mnt->mnt_flags & fs_infop->flag)
seq_puts(m, fs_infop->str);
}
+
+ if (mnt_user_ns(mnt) != &init_user_ns)
+ seq_puts(m, ",idmapped");
}
static inline void mangle(struct seq_file *m, const char *s)
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 93a217e4f563..14658b009f1b 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -467,7 +467,7 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type,
static void pstore_kill_sb(struct super_block *sb)
{
mutex_lock(&pstore_sb_lock);
- WARN_ON(pstore_sb != sb);
+ WARN_ON(pstore_sb && pstore_sb != sb);
kill_litter_super(sb);
pstore_sb = NULL;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 32f64abc277c..d963ae7902f9 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -269,7 +269,7 @@ static int pstore_compress(const void *in, void *out,
{
int ret;
- if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION))
+ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
return -EINVAL;
ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
@@ -671,7 +671,7 @@ static void decompress_record(struct pstore_record *record)
int unzipped_len;
char *unzipped, *workspace;
- if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed)
+ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
return;
/* Only PSTORE_TYPE_DMESG support compression. */
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index aa8e0b65ff1a..fff363bfd484 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -246,7 +246,7 @@ static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
pr_info("error in header, %d\n", numerr);
prz->corrected_bytes += numerr;
} else if (numerr < 0) {
- pr_info("uncorrectable error in header\n");
+ pr_info_ratelimited("uncorrectable error in header\n");
prz->bad_blocks++;
}
diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c
index 5266ccbec007..7c8f8feac6c3 100644
--- a/fs/pstore/zone.c
+++ b/fs/pstore/zone.c
@@ -23,7 +23,7 @@
#include "internal.h"
/**
- * struct psz_head - header of zone to flush to storage
+ * struct psz_buffer - header of zone to flush to storage
*
* @sig: signature to indicate header (PSZ_SIG xor PSZONE-type value)
* @datalen: length of data in @data
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index c21106557a37..b1467f3921c2 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -164,19 +164,24 @@ static int v2_read_file_info(struct super_block *sb, int type)
quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
(loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
i_size_read(sb_dqopt(sb)->files[type]));
- goto out;
+ goto out_free;
}
if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
quota_error(sb, "Free block number too big (%u >= %u).",
qinfo->dqi_free_blk, qinfo->dqi_blocks);
- goto out;
+ goto out_free;
}
if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
quota_error(sb, "Block with free entry too big (%u >= %u).",
qinfo->dqi_free_entry, qinfo->dqi_blocks);
- goto out;
+ goto out_free;
}
ret = 0;
+out_free:
+ if (ret) {
+ kfree(info->dqi_priv);
+ info->dqi_priv = NULL;
+ }
out:
up_read(&dqopt->dqio_sem);
return ret;
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 355523f4a4bf..ba3525ccc27e 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -22,7 +22,7 @@
#include <linux/uaccess.h>
#include "internal.h"
-static int ramfs_nommu_setattr(struct dentry *, struct iattr *);
+static int ramfs_nommu_setattr(struct user_namespace *, struct dentry *, struct iattr *);
static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
@@ -158,14 +158,15 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
* handle a change of attributes
* - we're specifically interested in a change of size
*/
-static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
+static int ramfs_nommu_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *ia)
{
struct inode *inode = d_inode(dentry);
unsigned int old_ia_valid = ia->ia_valid;
int ret = 0;
/* POSIX UID/GID verification for setting inode attributes */
- ret = setattr_prepare(dentry, ia);
+ ret = setattr_prepare(&init_user_ns, dentry, ia);
if (ret)
return ret;
@@ -185,7 +186,7 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
}
}
- setattr_copy(inode, ia);
+ setattr_copy(&init_user_ns, inode, ia);
out:
ia->ia_valid = old_ia_valid;
return ret;
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index ee179a81b3da..9ebd17d7befb 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -67,7 +67,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
if (inode) {
inode->i_ino = get_next_ino();
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_mapping->a_ops = &ramfs_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping);
@@ -101,7 +101,8 @@ struct inode *ramfs_get_inode(struct super_block *sb,
*/
/* SMP-safe */
static int
-ramfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+ramfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode * inode = ramfs_get_inode(dir->i_sb, dir, mode, dev);
int error = -ENOSPC;
@@ -115,20 +116,23 @@ ramfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
return error;
}
-static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+static int ramfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- int retval = ramfs_mknod(dir, dentry, mode | S_IFDIR, 0);
+ int retval = ramfs_mknod(&init_user_ns, dir, dentry, mode | S_IFDIR, 0);
if (!retval)
inc_nlink(dir);
return retval;
}
-static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
+static int ramfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
- return ramfs_mknod(dir, dentry, mode | S_IFREG, 0);
+ return ramfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
}
-static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
+static int ramfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct inode *inode;
int error = -ENOSPC;
@@ -147,6 +151,18 @@ static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char *
return error;
}
+static int ramfs_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct inode *inode;
+
+ inode = ramfs_get_inode(dir->i_sb, dir, mode, 0);
+ if (!inode)
+ return -ENOSPC;
+ d_tmpfile(dentry, inode);
+ return 0;
+}
+
static const struct inode_operations ramfs_dir_inode_operations = {
.create = ramfs_create,
.lookup = simple_lookup,
@@ -157,6 +173,7 @@ static const struct inode_operations ramfs_dir_inode_operations = {
.rmdir = simple_rmdir,
.mknod = ramfs_mknod,
.rename = simple_rename,
+ .tmpfile = ramfs_tmpfile,
};
/*
diff --git a/fs/read_write.c b/fs/read_write.c
index 75f764b43418..9db7adf160d2 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1188,6 +1188,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
{
struct fd in, out;
struct inode *in_inode, *out_inode;
+ struct pipe_inode_info *opipe;
loff_t pos;
loff_t out_pos;
ssize_t retval;
@@ -1228,9 +1229,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
in_inode = file_inode(in.file);
out_inode = file_inode(out.file);
out_pos = out.file->f_pos;
- retval = rw_verify_area(WRITE, out.file, &out_pos, count);
- if (retval < 0)
- goto fput_out;
if (!max)
max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
@@ -1253,9 +1251,18 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
if (in.file->f_flags & O_NONBLOCK)
fl = SPLICE_F_NONBLOCK;
#endif
- file_start_write(out.file);
- retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
- file_end_write(out.file);
+ opipe = get_pipe_info(out.file, true);
+ if (!opipe) {
+ retval = rw_verify_area(WRITE, out.file, &out_pos, count);
+ if (retval < 0)
+ goto fput_out;
+ file_start_write(out.file);
+ retval = do_splice_direct(in.file, &pos, out.file, &out_pos,
+ count, fl);
+ file_end_write(out.file);
+ } else {
+ retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
+ }
if (retval > 0) {
add_rchar(current, retval);
diff --git a/fs/reiserfs/acl.h b/fs/reiserfs/acl.h
index 0c1c847f992f..fd58618da360 100644
--- a/fs/reiserfs/acl.h
+++ b/fs/reiserfs/acl.h
@@ -49,7 +49,8 @@ static inline int reiserfs_acl_count(size_t size)
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
struct posix_acl *reiserfs_get_acl(struct inode *inode, int type);
-int reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int reiserfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
int reiserfs_acl_chmod(struct inode *inode);
int reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
struct inode *dir, struct dentry *dentry,
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 0b641ae694f1..1db0254bc38b 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -159,7 +159,7 @@ static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end,
barrier_done = reiserfs_commit_for_inode(inode);
reiserfs_write_unlock(inode->i_sb);
if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ blkdev_issue_flush(inode->i_sb->s_bdev);
inode_unlock(inode);
if (barrier_done < 0)
return barrier_done;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index c76d563dec0e..780bb90c1804 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3282,13 +3282,14 @@ static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
return ret;
}
-int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
+int reiserfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
unsigned int ia_valid;
int error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
@@ -3413,7 +3414,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
}
if (!error) {
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
}
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index adb21bea3d60..4f1cbd930179 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -59,7 +59,7 @@ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err)
break;
- if (!inode_owner_or_capable(inode)) {
+ if (!inode_owner_or_capable(&init_user_ns, inode)) {
err = -EPERM;
goto setflags_out;
}
@@ -101,7 +101,7 @@ setflags_out:
err = put_user(inode->i_generation, (int __user *)arg);
break;
case REISERFS_IOC_SETVERSION:
- if (!inode_owner_or_capable(inode)) {
+ if (!inode_owner_or_capable(&init_user_ns, inode)) {
err = -EPERM;
break;
}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 1594687582f0..e6eb05e2b2f1 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -615,12 +615,12 @@ static int new_inode_init(struct inode *inode, struct inode *dir, umode_t mode)
* the quota init calls have to know who to charge the quota to, so
* we have to set uid and gid here
*/
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
return dquot_initialize(inode);
}
-static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int reiserfs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
int retval;
struct inode *inode;
@@ -698,8 +698,8 @@ out_failed:
return retval;
}
-static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t rdev)
+static int reiserfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
int retval;
struct inode *inode;
@@ -781,7 +781,8 @@ out_failed:
return retval;
}
-static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int reiserfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int retval;
struct inode *inode;
@@ -1094,8 +1095,9 @@ out_unlink:
return retval;
}
-static int reiserfs_symlink(struct inode *parent_dir,
- struct dentry *dentry, const char *symname)
+static int reiserfs_symlink(struct user_namespace *mnt_userns,
+ struct inode *parent_dir, struct dentry *dentry,
+ const char *symname)
{
int retval;
struct inode *inode;
@@ -1304,7 +1306,8 @@ static void set_ino_in_dir_entry(struct reiserfs_dir_entry *de,
* one path. If it holds 2 or more, it can get into endless waiting in
* get_empty_nodes or its clones
*/
-static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+static int reiserfs_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index f69871516167..0ca2ac62e534 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -3102,7 +3102,8 @@ static inline void reiserfs_update_sd(struct reiserfs_transaction_handle *th,
}
void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
-int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
+int reiserfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len);
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index fe63a7c3e0da..bd073836e141 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -66,14 +66,14 @@
static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
{
BUG_ON(!inode_is_locked(dir));
- return dir->i_op->create(dir, dentry, mode, true);
+ return dir->i_op->create(&init_user_ns, dir, dentry, mode, true);
}
#endif
static int xattr_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
BUG_ON(!inode_is_locked(dir));
- return dir->i_op->mkdir(dir, dentry, mode);
+ return dir->i_op->mkdir(&init_user_ns, dir, dentry, mode);
}
/*
@@ -352,7 +352,7 @@ static int chown_one_xattr(struct dentry *dentry, void *data)
* ATTR_MODE is set.
*/
attrs->ia_valid &= (ATTR_UID|ATTR_GID);
- err = reiserfs_setattr(dentry, attrs);
+ err = reiserfs_setattr(&init_user_ns, dentry, attrs);
attrs->ia_valid = ia_valid;
return err;
@@ -604,7 +604,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
inode_lock_nested(d_inode(dentry), I_MUTEX_XATTR);
inode_dio_wait(d_inode(dentry));
- err = reiserfs_setattr(dentry, &newattrs);
+ err = reiserfs_setattr(&init_user_ns, dentry, &newattrs);
inode_unlock(d_inode(dentry));
} else
update_ctime(inode);
@@ -948,7 +948,8 @@ static int xattr_mount_check(struct super_block *s)
return 0;
}
-int reiserfs_permission(struct inode *inode, int mask)
+int reiserfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ int mask)
{
/*
* We don't do permission checks on the internal objects.
@@ -957,7 +958,7 @@ int reiserfs_permission(struct inode *inode, int mask)
if (IS_PRIVATE(inode))
return 0;
- return generic_permission(inode, mask);
+ return generic_permission(&init_user_ns, inode, mask);
}
static int xattr_hide_revalidate(struct dentry *dentry, unsigned int flags)
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index c764352447ba..9b3b06da568c 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -16,7 +16,8 @@ int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
int reiserfs_lookup_privroot(struct super_block *sb);
int reiserfs_delete_xattrs(struct inode *inode);
int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
-int reiserfs_permission(struct inode *inode, int mask);
+int reiserfs_permission(struct user_namespace *mnt_userns,
+ struct inode *inode, int mask);
#ifdef CONFIG_REISERFS_FS_XATTR
#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index ccd40df6eb45..a9547144a099 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -18,7 +18,8 @@ static int __reiserfs_set_acl(struct reiserfs_transaction_handle *th,
int
-reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+reiserfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
int error, error2;
struct reiserfs_transaction_handle th;
@@ -40,7 +41,8 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
reiserfs_write_unlock(inode->i_sb);
if (error == 0) {
if (type == ACL_TYPE_ACCESS && acl) {
- error = posix_acl_update_mode(inode, &mode, &acl);
+ error = posix_acl_update_mode(&init_user_ns, inode,
+ &mode, &acl);
if (error)
goto unlock;
update_mode = 1;
@@ -399,5 +401,5 @@ int reiserfs_acl_chmod(struct inode *inode)
!reiserfs_posixacl(inode->i_sb))
return 0;
- return posix_acl_chmod(inode, inode->i_mode);
+ return posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
}
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 20be9a0e5870..8965c8e5e172 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -21,7 +21,8 @@ security_get(const struct xattr_handler *handler, struct dentry *unused,
}
static int
-security_set(const struct xattr_handler *handler, struct dentry *unused,
+security_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns, struct dentry *unused,
struct inode *inode, const char *name, const void *buffer,
size_t size, int flags)
{
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 5ed48da3d02b..d853cea2afcd 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -20,7 +20,8 @@ trusted_get(const struct xattr_handler *handler, struct dentry *unused,
}
static int
-trusted_set(const struct xattr_handler *handler, struct dentry *unused,
+trusted_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns, struct dentry *unused,
struct inode *inode, const char *name, const void *buffer,
size_t size, int flags)
{
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index a573ca45bacc..65d9cd10a5ea 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -18,7 +18,8 @@ user_get(const struct xattr_handler *handler, struct dentry *unused,
}
static int
-user_set(const struct xattr_handler *handler, struct dentry *unused,
+user_set(const struct xattr_handler *handler, struct user_namespace *mnt_userns,
+ struct dentry *unused,
struct inode *inode, const char *name, const void *buffer,
size_t size, int flags)
{
diff --git a/fs/remap_range.c b/fs/remap_range.c
index 77dba3a49e65..e4a5fdd7ad7b 100644
--- a/fs/remap_range.c
+++ b/fs/remap_range.c
@@ -432,13 +432,16 @@ EXPORT_SYMBOL(vfs_clone_file_range);
/* Check whether we are allowed to dedupe the destination file */
static bool allow_file_dedupe(struct file *file)
{
+ struct user_namespace *mnt_userns = file_mnt_user_ns(file);
+ struct inode *inode = file_inode(file);
+
if (capable(CAP_SYS_ADMIN))
return true;
if (file->f_mode & FMODE_WRITE)
return true;
- if (uid_eq(current_fsuid(), file_inode(file)->i_uid))
+ if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)))
return true;
- if (!inode_permission(file_inode(file), MAY_WRITE))
+ if (!inode_permission(mnt_userns, inode, MAY_WRITE))
return true;
return false;
}
diff --git a/fs/select.c b/fs/select.c
index 37aaa8317f3a..945896d0ac9e 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1055,10 +1055,9 @@ static long do_restart_poll(struct restart_block *restart_block)
ret = do_sys_poll(ufds, nfds, to);
- if (ret == -ERESTARTNOHAND) {
- restart_block->fn = do_restart_poll;
- ret = -ERESTART_RESTARTBLOCK;
- }
+ if (ret == -ERESTARTNOHAND)
+ ret = set_restart_fn(restart_block, do_restart_poll);
+
return ret;
}
@@ -1080,7 +1079,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
struct restart_block *restart_block;
restart_block = &current->restart_block;
- restart_block->fn = do_restart_poll;
restart_block->poll.ufds = ufds;
restart_block->poll.nfds = nfds;
@@ -1091,7 +1089,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
} else
restart_block->poll.has_timeout = 0;
- ret = -ERESTART_RESTARTBLOCK;
+ ret = set_restart_fn(restart_block, do_restart_poll);
}
return ret;
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 03a369ccd28c..cb11a34fb871 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -669,7 +669,8 @@ void seq_puts(struct seq_file *m, const char *s)
EXPORT_SYMBOL(seq_puts);
/**
- * A helper routine for putting decimal numbers without rich format of printf().
+ * seq_put_decimal_ull_width - A helper routine for putting decimal numbers
+ * without rich format of printf().
* only 'unsigned long long' is supported.
* @m: seq_file identifying the buffer to which data should be written
* @delimiter: a string which is printed before the number
@@ -1044,7 +1045,7 @@ struct hlist_node *seq_hlist_next_rcu(void *v,
EXPORT_SYMBOL(seq_hlist_next_rcu);
/**
- * seq_hlist_start_precpu - start an iteration of a percpu hlist array
+ * seq_hlist_start_percpu - start an iteration of a percpu hlist array
* @head: pointer to percpu array of struct hlist_heads
* @cpu: pointer to cpu "cursor"
* @pos: start position of sequence
diff --git a/fs/splice.c b/fs/splice.c
index 866d5c2367b2..5dbce4dcc1a7 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -662,12 +662,14 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
/* build the vector */
left = sd.total_len;
- for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++, n++) {
+ for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++) {
struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t this_len = buf->len;
- if (this_len > left)
- this_len = left;
+ /* zero-length bvecs are not supported, skip them */
+ if (!this_len)
+ continue;
+ this_len = min(this_len, left);
ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
@@ -680,6 +682,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
array[n].bv_len = this_len;
array[n].bv_offset = buf->offset;
left -= this_len;
+ n++;
}
iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left);
@@ -771,11 +774,16 @@ static long do_splice_to(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
+ unsigned int p_space;
int ret;
if (unlikely(!(in->f_mode & FMODE_READ)))
return -EBADF;
+ /* Don't try to read more the pipe has space for. */
+ p_space = pipe->max_usage - pipe_occupancy(pipe->head, pipe->tail);
+ len = min_t(size_t, len, p_space << PAGE_SHIFT);
+
ret = rw_verify_area(READ, in, ppos, len);
if (unlikely(ret < 0))
return ret;
@@ -856,15 +864,10 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
WARN_ON_ONCE(!pipe_empty(pipe->head, pipe->tail));
while (len) {
- unsigned int p_space;
size_t read_len;
loff_t pos = sd->pos, prev_pos = pos;
- /* Don't try to read more the pipe has space for. */
- p_space = pipe->max_usage -
- pipe_occupancy(pipe->head, pipe->tail);
- read_len = min_t(size_t, len, p_space << PAGE_SHIFT);
- ret = do_splice_to(in, &pos, pipe, read_len, flags);
+ ret = do_splice_to(in, &pos, pipe, len, flags);
if (unlikely(ret <= 0))
goto out_release;
@@ -1002,6 +1005,23 @@ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags);
+long splice_file_to_pipe(struct file *in,
+ struct pipe_inode_info *opipe,
+ loff_t *offset,
+ size_t len, unsigned int flags)
+{
+ long ret;
+
+ pipe_lock(opipe);
+ ret = wait_for_space(opipe, flags);
+ if (!ret)
+ ret = do_splice_to(in, offset, opipe, len, flags);
+ pipe_unlock(opipe);
+ if (ret > 0)
+ wakeup_pipe_readers(opipe);
+ return ret;
+}
+
/*
* Determine where to splice to/from.
*/
@@ -1081,20 +1101,7 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
if (out->f_flags & O_NONBLOCK)
flags |= SPLICE_F_NONBLOCK;
- pipe_lock(opipe);
- ret = wait_for_space(opipe, flags);
- if (!ret) {
- unsigned int p_space;
-
- /* Don't try to read more the pipe has space for. */
- p_space = opipe->max_usage - pipe_occupancy(opipe->head, opipe->tail);
- len = min_t(size_t, len, p_space << PAGE_SHIFT);
-
- ret = do_splice_to(in, &offset, opipe, len, flags);
- }
- pipe_unlock(opipe);
- if (ret > 0)
- wakeup_pipe_readers(opipe);
+ ret = splice_file_to_pipe(in, opipe, &offset, len, flags);
if (!off_in)
in->f_pos = offset;
else
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 45f44425d856..b9e87ebb1060 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -87,7 +87,7 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
int error, i;
struct bio *bio;
- if (page_count <= BIO_MAX_PAGES)
+ if (page_count <= BIO_MAX_VECS)
bio = bio_alloc(GFP_NOIO, page_count);
else
bio = bio_kmalloc(GFP_NOIO, page_count);
diff --git a/fs/stat.c b/fs/stat.c
index dacecdda2e79..fbc171d038aa 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -26,21 +26,29 @@
/**
* generic_fillattr - Fill in the basic attributes from the inode struct
- * @inode: Inode to use as the source
- * @stat: Where to fill in the attributes
+ * @mnt_userns: user namespace of the mount the inode was found from
+ * @inode: Inode to use as the source
+ * @stat: Where to fill in the attributes
*
* Fill in the basic attributes in the kstat structure from data that's to be
* found on the VFS inode structure. This is the default if no getattr inode
* operation is supplied.
+ *
+ * If the inode has been found through an idmapped mount the user namespace of
+ * the vfsmount must be passed through @mnt_userns. This function will then
+ * take care to map the inode according to @mnt_userns before filling in the
+ * uid and gid filds. On non-idmapped mounts or if permission checking is to be
+ * performed on the raw inode simply passs init_user_ns.
*/
-void generic_fillattr(struct inode *inode, struct kstat *stat)
+void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
+ struct kstat *stat)
{
stat->dev = inode->i_sb->s_dev;
stat->ino = inode->i_ino;
stat->mode = inode->i_mode;
stat->nlink = inode->i_nlink;
- stat->uid = inode->i_uid;
- stat->gid = inode->i_gid;
+ stat->uid = i_uid_into_mnt(mnt_userns, inode);
+ stat->gid = i_gid_into_mnt(mnt_userns, inode);
stat->rdev = inode->i_rdev;
stat->size = i_size_read(inode);
stat->atime = inode->i_atime;
@@ -67,6 +75,7 @@ EXPORT_SYMBOL(generic_fillattr);
int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
+ struct user_namespace *mnt_userns;
struct inode *inode = d_backing_inode(path->dentry);
memset(stat, 0, sizeof(*stat));
@@ -83,11 +92,12 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
if (IS_DAX(inode))
stat->attributes |= STATX_ATTR_DAX;
+ mnt_userns = mnt_user_ns(path->mnt);
if (inode->i_op->getattr)
- return inode->i_op->getattr(path, stat, request_mask,
- query_flags);
+ return inode->i_op->getattr(mnt_userns, path, stat,
+ request_mask, query_flags);
- generic_fillattr(inode, stat);
+ generic_fillattr(mnt_userns, inode, stat);
return 0;
}
EXPORT_SYMBOL(vfs_getattr_nosec);
diff --git a/fs/statfs.c b/fs/statfs.c
index 68cb07788750..0ba34c135593 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -255,7 +255,10 @@ SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
memset(&tmp,0,sizeof(struct ustat));
tmp.f_tfree = sbuf.f_bfree;
- tmp.f_tinode = sbuf.f_ffree;
+ if (IS_ENABLED(CONFIG_ARCH_32BIT_USTAT_F_TINODE))
+ tmp.f_tinode = min_t(u64, sbuf.f_ffree, UINT_MAX);
+ else
+ tmp.f_tinode = sbuf.f_ffree;
return copy_to_user(ubuf, &tmp, sizeof(struct ustat)) ? -EFAULT : 0;
}
diff --git a/fs/super.c b/fs/super.c
index 2c6cdea2ab2d..8c1baca35c16 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -865,7 +865,8 @@ int reconfigure_super(struct fs_context *fc)
if (fc->sb_flags_mask & SB_RDONLY) {
#ifdef CONFIG_BLOCK
- if (!(fc->sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev))
+ if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
+ bdev_read_only(sb->s_bdev))
return -EACCES;
#endif
@@ -1718,12 +1719,6 @@ int freeze_super(struct super_block *sb)
}
EXPORT_SYMBOL(freeze_super);
-/**
- * thaw_super -- unlock filesystem
- * @sb: the super to thaw
- *
- * Unlocks the filesystem and marks it writeable again after freeze_super().
- */
static int thaw_super_locked(struct super_block *sb)
{
int error;
@@ -1759,6 +1754,12 @@ out:
return 0;
}
+/**
+ * thaw_super -- unlock filesystem
+ * @sb: the super to thaw
+ *
+ * Unlocks the filesystem and marks it writeable again after freeze_super().
+ */
int thaw_super(struct super_block *sb)
{
down_write(&sb->s_umount);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 96d0da65e088..9aefa7779b29 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -170,6 +170,16 @@ static int sysfs_kf_bin_mmap(struct kernfs_open_file *of,
return battr->mmap(of->file, kobj, battr, vma);
}
+static int sysfs_kf_bin_open(struct kernfs_open_file *of)
+{
+ struct bin_attribute *battr = of->kn->priv;
+
+ if (battr->mapping)
+ of->file->f_mapping = battr->mapping;
+
+ return 0;
+}
+
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr)
{
struct kernfs_node *kn = kobj->sd, *tmp;
@@ -241,6 +251,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = {
.read = sysfs_kf_bin_read,
.write = sysfs_kf_bin_write,
.mmap = sysfs_kf_bin_mmap,
+ .open = sysfs_kf_bin_open,
};
int sysfs_add_file_mode_ns(struct kernfs_node *parent,
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
index 45fc79a18594..90e00124ea07 100644
--- a/fs/sysv/file.c
+++ b/fs/sysv/file.c
@@ -29,12 +29,13 @@ const struct file_operations sysv_file_operations = {
.splice_read = generic_file_splice_read,
};
-static int sysv_setattr(struct dentry *dentry, struct iattr *attr)
+static int sysv_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
int error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
@@ -47,7 +48,7 @@ static int sysv_setattr(struct dentry *dentry, struct iattr *attr)
sysv_truncate(inode);
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index 6c9801986af6..50df794a3c1f 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -163,7 +163,7 @@ struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
*sbi->s_sb_fic_count = cpu_to_fs16(sbi, count);
fs16_add(sbi, sbi->s_sb_total_free_inodes, -1);
dirty_sb(sb);
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_ino = fs16_to_cpu(sbi, ino);
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
inode->i_blocks = 0;
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index bcb67b0cabe7..8b2e99b7bc9f 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -441,11 +441,11 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size)
return blocks;
}
-int sysv_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int sysv_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct super_block *s = path->dentry->d_sb;
- generic_fillattr(d_inode(path->dentry), stat);
+ generic_fillattr(&init_user_ns, d_inode(path->dentry), stat);
stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
stat->blksize = s->s_blocksize;
return 0;
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index ea2414b385ec..b2e6abc06a2d 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -41,7 +41,8 @@ static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, un
return d_splice_alias(inode, dentry);
}
-static int sysv_mknod(struct inode * dir, struct dentry * dentry, umode_t mode, dev_t rdev)
+static int sysv_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode * inode;
int err;
@@ -60,13 +61,14 @@ static int sysv_mknod(struct inode * dir, struct dentry * dentry, umode_t mode,
return err;
}
-static int sysv_create(struct inode * dir, struct dentry * dentry, umode_t mode, bool excl)
+static int sysv_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
- return sysv_mknod(dir, dentry, mode, 0);
+ return sysv_mknod(&init_user_ns, dir, dentry, mode, 0);
}
-static int sysv_symlink(struct inode * dir, struct dentry * dentry,
- const char * symname)
+static int sysv_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
int err = -ENAMETOOLONG;
int l = strlen(symname)+1;
@@ -108,7 +110,8 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir,
return add_nondir(dentry, inode);
}
-static int sysv_mkdir(struct inode * dir, struct dentry *dentry, umode_t mode)
+static int sysv_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode * inode;
int err;
@@ -186,9 +189,9 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
* Anybody can rename anything with this: the permission checks are left to the
* higher-level routines.
*/
-static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
- struct inode * new_dir, struct dentry * new_dentry,
- unsigned int flags)
+static int sysv_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct inode * old_inode = d_inode(old_dentry);
struct inode * new_inode = d_inode(new_dentry);
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 1cff585526b1..99ddf033da4f 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -141,7 +141,8 @@ extern struct inode *sysv_iget(struct super_block *, unsigned int);
extern int sysv_write_inode(struct inode *, struct writeback_control *wbc);
extern int sysv_sync_inode(struct inode *);
extern void sysv_set_inode(struct inode *, dev_t);
-extern int sysv_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int sysv_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern int sysv_init_icache(void);
extern void sysv_destroy_icache(void);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 0ee8c6dfb036..4b83cbded559 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -67,7 +67,9 @@ static char *get_dname(struct dentry *dentry)
return name;
}
-static int tracefs_syscall_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode)
+static int tracefs_syscall_mkdir(struct user_namespace *mnt_userns,
+ struct inode *inode, struct dentry *dentry,
+ umode_t mode)
{
char *name;
int ret;
diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
index 51a7c8c2c3f0..e564d5ff8781 100644
--- a/fs/ubifs/auth.c
+++ b/fs/ubifs/auth.c
@@ -327,7 +327,7 @@ int ubifs_init_authentication(struct ubifs_info *c)
ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)",
hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ);
err = -EINVAL;
- goto out_free_hash;
+ goto out_free_hmac;
}
err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 9a6b8660425a..d9d8d7794eff 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -94,7 +94,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
*/
inode->i_flags |= S_NOCMTIME;
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_mtime = inode->i_atime = inode->i_ctime =
current_time(inode);
inode->i_mapping->nrpages = 0;
@@ -280,8 +280,8 @@ static int ubifs_prepare_create(struct inode *dir, struct dentry *dentry,
return fscrypt_setup_filename(dir, &dentry->d_name, 0, nm);
}
-static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int ubifs_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode;
struct ubifs_info *c = dir->i_sb->s_fs_info;
@@ -441,8 +441,8 @@ out_budg:
return err;
}
-static int ubifs_tmpfile(struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
return do_tmpfile(dir, dentry, mode, NULL);
}
@@ -942,7 +942,8 @@ out_fname:
return err;
}
-static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int ubifs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct ubifs_inode *dir_ui = ubifs_inode(dir);
@@ -1013,8 +1014,8 @@ out_budg:
return err;
}
-static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int ubifs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode *inode;
struct ubifs_inode *ui;
@@ -1102,8 +1103,8 @@ out_budg:
return err;
}
-static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int ubifs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct inode *inode;
struct ubifs_inode *ui;
@@ -1542,7 +1543,8 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
-static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
+static int ubifs_rename(struct user_namespace *mnt_userns,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -1566,8 +1568,8 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
return do_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
}
-int ubifs_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int ubifs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *stat, u32 request_mask, unsigned int flags)
{
loff_t size;
struct inode *inode = d_inode(path->dentry);
@@ -1589,7 +1591,7 @@ int ubifs_getattr(const struct path *path, struct kstat *stat,
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE);
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
stat->blksize = UBIFS_BLOCK_SIZE;
stat->size = ui->ui_size;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 2bc7780d2963..0e4b4be3aa26 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1257,7 +1257,8 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
return err;
}
-int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
+int ubifs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
int err;
struct inode *inode = d_inode(dentry);
@@ -1265,7 +1266,7 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
dbg_gen("ino %lu, mode %#x, ia_valid %#x",
inode->i_ino, inode->i_mode, attr->ia_valid);
- err = setattr_prepare(dentry, attr);
+ err = setattr_prepare(&init_user_ns, dentry, attr);
if (err)
return err;
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 4363d85a3fd4..2326d5122beb 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -155,7 +155,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (IS_RDONLY(inode))
return -EROFS;
- if (!inode_owner_or_capable(inode))
+ if (!inode_owner_or_capable(&init_user_ns, inode))
return -EACCES;
if (get_user(flags, (int __user *) arg))
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 03410ae0813a..2857e64d673d 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -881,7 +881,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
struct inode *xino;
struct ubifs_dent_node *xent, *pxent = NULL;
- if (ui->xattr_cnt >= ubifs_xattr_max_cnt(c)) {
+ if (ui->xattr_cnt > ubifs_xattr_max_cnt(c)) {
ubifs_err(c, "Cannot delete inode, it has too much xattrs!");
goto out_release;
}
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 79801c9a5b87..0f8a6a16421b 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -559,7 +559,9 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
}
/* authenticate_sleb_hash is split out for stack usage */
-static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash)
+static int noinline_for_stack
+authenticate_sleb_hash(struct ubifs_info *c,
+ struct shash_desc *log_hash, u8 *hash)
{
SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 138b9426c6c1..ddb2ca636c93 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -838,8 +838,10 @@ static int alloc_wbufs(struct ubifs_info *c)
c->jheads[i].wbuf.jhead = i;
c->jheads[i].grouped = 1;
c->jheads[i].log_hash = ubifs_hash_get_desc(c);
- if (IS_ERR(c->jheads[i].log_hash))
+ if (IS_ERR(c->jheads[i].log_hash)) {
+ err = PTR_ERR(c->jheads[i].log_hash);
goto out;
+ }
}
/*
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index fc2cdde3b549..7fdfdbda4b8a 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1989,13 +1989,14 @@ int ubifs_calc_dark(const struct ubifs_info *c, int spc);
/* file.c */
int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync);
-int ubifs_setattr(struct dentry *dentry, struct iattr *attr);
+int ubifs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
int ubifs_update_time(struct inode *inode, struct timespec64 *time, int flags);
/* dir.c */
struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
umode_t mode);
-int ubifs_getattr(const struct path *path, struct kstat *stat,
+int ubifs_getattr(struct user_namespace *mnt_userns, const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
int ubifs_check_dir_empty(struct inode *dir);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index a0b9b349efe6..6b1e9830b274 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -498,7 +498,7 @@ int ubifs_purge_xattrs(struct inode *host)
struct fscrypt_name nm = {0};
int err;
- if (ubifs_inode(host)->xattr_cnt < ubifs_xattr_max_cnt(c))
+ if (ubifs_inode(host)->xattr_cnt <= ubifs_xattr_max_cnt(c))
return 0;
ubifs_warn(c, "inode %lu has too many xattrs, doing a non-atomic deletion",
@@ -681,6 +681,7 @@ static int xattr_get(const struct xattr_handler *handler,
}
static int xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/fs/udf/file.c b/fs/udf/file.c
index ad8eefad27d7..2846dcd92197 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -183,7 +183,7 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
long old_block, new_block;
int result;
- if (inode_permission(inode, MAY_READ) != 0) {
+ if (file_permission(filp, MAY_READ) != 0) {
udf_debug("no permission to access inode %lu\n", inode->i_ino);
return -EPERM;
}
@@ -253,13 +253,14 @@ const struct file_operations udf_file_operations = {
.llseek = generic_file_llseek,
};
-static int udf_setattr(struct dentry *dentry, struct iattr *attr)
+static int udf_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
int error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
@@ -282,7 +283,7 @@ static int udf_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & ATTR_MODE)
udf_update_extra_perms(inode, attr->ia_mode);
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 84ed23edebfd..2ecf0e87660e 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -103,7 +103,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
mutex_unlock(&sbi->s_alloc_mutex);
}
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
inode->i_uid = sbi->s_uid;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index bb89c3e43212..0dd2f93ac048 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -544,11 +544,14 @@ static int udf_do_extend_file(struct inode *inode,
udf_write_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
+
/*
- * We've rewritten the last extent but there may be empty
- * indirect extent after it - enter it.
+ * We've rewritten the last extent. If we are going to add
+ * more extents, we may need to enter possible following
+ * empty indirect extent.
*/
- udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
+ if (new_block_bytes || prealloc_len)
+ udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
}
/* Managed to do everything necessary? */
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index e169d8fe35b5..f146b3089f3d 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -604,8 +604,8 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
return 0;
}
-static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool excl)
+static int udf_create(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode = udf_new_inode(dir, mode);
@@ -623,7 +623,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
return udf_add_nondir(dentry, inode);
}
-static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int udf_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode = udf_new_inode(dir, mode);
@@ -642,8 +643,8 @@ static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
}
-static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t rdev)
+static int udf_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode *inode;
@@ -658,7 +659,8 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
return udf_add_nondir(dentry, inode);
}
-static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int udf_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct udf_fileident_bh fibh;
@@ -877,8 +879,8 @@ out:
return retval;
}
-static int udf_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int udf_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, const char *symname)
{
struct inode *inode = udf_new_inode(dir, S_IFLNK | 0777);
struct pathComponent *pc;
@@ -1065,9 +1067,9 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
/* Anybody can rename anything with this: the permission checks are left to the
* higher-level routines.
*/
-static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int udf_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index d0df217f4712..2f83c1204e20 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -459,6 +459,7 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
{
char *p;
int option;
+ unsigned int uv;
uopt->novrs = 0;
uopt->session = 0xFFFFFFFF;
@@ -508,17 +509,17 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
break;
case Opt_gid:
- if (match_int(args, &option))
+ if (match_uint(args, &uv))
return 0;
- uopt->gid = make_kgid(current_user_ns(), option);
+ uopt->gid = make_kgid(current_user_ns(), uv);
if (!gid_valid(uopt->gid))
return 0;
uopt->flags |= (1 << UDF_FLAG_GID_SET);
break;
case Opt_uid:
- if (match_int(args, &option))
+ if (match_uint(args, &uv))
return 0;
- uopt->uid = make_kuid(current_user_ns(), option);
+ uopt->uid = make_kuid(current_user_ns(), uv);
if (!uid_valid(uopt->uid))
return 0;
uopt->flags |= (1 << UDF_FLAG_UID_SET);
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index c973db239604..9b223421a3c5 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -152,14 +152,15 @@ out_unmap:
return err;
}
-static int udf_symlink_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+static int udf_symlink_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct inode *inode = d_backing_inode(dentry);
struct page *page;
- generic_fillattr(inode, stat);
+ generic_fillattr(&init_user_ns, inode, stat);
page = read_mapping_page(inode->i_mapping, 0, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 969fd60436d3..7e3e08c0166f 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -289,7 +289,7 @@ cg_found:
ufs_mark_sb_dirty(sb);
inode->i_ino = cg * uspi->s_ipg + bit;
- inode_init_owner(inode, dir, mode);
+ inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_blocks = 0;
inode->i_generation = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index c843ec858cf7..debc282c1bb4 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -1211,13 +1211,14 @@ out:
return err;
}
-int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+int ufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
unsigned int ia_valid = attr->ia_valid;
int error;
- error = setattr_prepare(dentry, attr);
+ error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
return error;
@@ -1227,7 +1228,7 @@ int ufs_setattr(struct dentry *dentry, struct iattr *attr)
return error;
}
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 9ef40f100415..29d5a0e0c8f0 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -69,7 +69,8 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, unsi
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ufs_create (struct inode * dir, struct dentry * dentry, umode_t mode,
+static int ufs_create (struct user_namespace * mnt_userns,
+ struct inode * dir, struct dentry * dentry, umode_t mode,
bool excl)
{
struct inode *inode;
@@ -85,7 +86,8 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, umode_t mode,
return ufs_add_nondir(dentry, inode);
}
-static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
+static int ufs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode *inode;
int err;
@@ -104,8 +106,8 @@ static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev
return err;
}
-static int ufs_symlink (struct inode * dir, struct dentry * dentry,
- const char * symname)
+static int ufs_symlink (struct user_namespace * mnt_userns, struct inode * dir,
+ struct dentry * dentry, const char * symname)
{
struct super_block * sb = dir->i_sb;
int err;
@@ -164,7 +166,8 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
return error;
}
-static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+static int ufs_mkdir(struct user_namespace * mnt_userns, struct inode * dir,
+ struct dentry * dentry, umode_t mode)
{
struct inode * inode;
int err;
@@ -240,9 +243,9 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
return err;
}
-static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int ufs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index b49e0efdf3d7..550f7c5a3636 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -123,7 +123,8 @@ extern struct inode *ufs_iget(struct super_block *, unsigned long);
extern int ufs_write_inode (struct inode *, struct writeback_control *);
extern int ufs_sync_inode (struct inode *);
extern void ufs_evict_inode (struct inode *);
-extern int ufs_setattr(struct dentry *dentry, struct iattr *attr);
+extern int ufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr);
/* namei.c */
extern const struct file_operations ufs_dir_operations;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 894cc28142e7..0be8cdd4425a 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -979,14 +979,14 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
static const struct file_operations userfaultfd_fops;
-static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
- struct userfaultfd_ctx *new,
+static int resolve_userfault_fork(struct userfaultfd_ctx *new,
+ struct inode *inode,
struct uffd_msg *msg)
{
int fd;
- fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new,
- O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS));
+ fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
+ O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
if (fd < 0)
return fd;
@@ -996,7 +996,7 @@ static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
}
static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
- struct uffd_msg *msg)
+ struct uffd_msg *msg, struct inode *inode)
{
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
@@ -1107,7 +1107,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
spin_unlock_irq(&ctx->fd_wqh.lock);
if (!ret && msg->event == UFFD_EVENT_FORK) {
- ret = resolve_userfault_fork(ctx, fork_nctx, msg);
+ ret = resolve_userfault_fork(fork_nctx, inode, msg);
spin_lock_irq(&ctx->event_wqh.lock);
if (!list_empty(&fork_event)) {
/*
@@ -1167,6 +1167,7 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
ssize_t _ret, ret = 0;
struct uffd_msg msg;
int no_wait = file->f_flags & O_NONBLOCK;
+ struct inode *inode = file_inode(file);
if (ctx->state == UFFD_STATE_WAIT_API)
return -EINVAL;
@@ -1174,7 +1175,7 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
for (;;) {
if (count < sizeof(msg))
return ret ? ret : -EINVAL;
- _ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
+ _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
if (_ret < 0)
return ret ? ret : _ret;
if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
@@ -1999,8 +2000,8 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
/* prevent the mm struct to be freed */
mmgrab(ctx->mm);
- fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
- O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
+ fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
+ O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
if (fd < 0) {
mmdrop(ctx->mm);
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
diff --git a/fs/utimes.c b/fs/utimes.c
index fd3cc4226224..39f356017635 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -62,7 +62,8 @@ int vfs_utimes(const struct path *path, struct timespec64 *times)
}
retry_deleg:
inode_lock(inode);
- error = notify_change(path->dentry, &newattrs, &delegated_inode);
+ error = notify_change(mnt_user_ns(path->mnt), path->dentry, &newattrs,
+ &delegated_inode);
inode_unlock(inode);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c
index 4d569f14a8d8..7aee0ec63ade 100644
--- a/fs/vboxsf/dir.c
+++ b/fs/vboxsf/dir.c
@@ -288,13 +288,15 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
return 0;
}
-static int vboxsf_dir_mkfile(struct inode *parent, struct dentry *dentry,
+static int vboxsf_dir_mkfile(struct user_namespace *mnt_userns,
+ struct inode *parent, struct dentry *dentry,
umode_t mode, bool excl)
{
return vboxsf_dir_create(parent, dentry, mode, 0);
}
-static int vboxsf_dir_mkdir(struct inode *parent, struct dentry *dentry,
+static int vboxsf_dir_mkdir(struct user_namespace *mnt_userns,
+ struct inode *parent, struct dentry *dentry,
umode_t mode)
{
return vboxsf_dir_create(parent, dentry, mode, 1);
@@ -332,7 +334,8 @@ static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
return 0;
}
-static int vboxsf_dir_rename(struct inode *old_parent,
+static int vboxsf_dir_rename(struct user_namespace *mnt_userns,
+ struct inode *old_parent,
struct dentry *old_dentry,
struct inode *new_parent,
struct dentry *new_dentry,
@@ -374,7 +377,8 @@ err_put_old_path:
return err;
}
-static int vboxsf_dir_symlink(struct inode *parent, struct dentry *dentry,
+static int vboxsf_dir_symlink(struct user_namespace *mnt_userns,
+ struct inode *parent, struct dentry *dentry,
const char *symname)
{
struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c
index 018057546067..3b847e3fba24 100644
--- a/fs/vboxsf/utils.c
+++ b/fs/vboxsf/utils.c
@@ -212,8 +212,8 @@ int vboxsf_inode_revalidate(struct dentry *dentry)
return 0;
}
-int vboxsf_getattr(const struct path *path, struct kstat *kstat,
- u32 request_mask, unsigned int flags)
+int vboxsf_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *kstat, u32 request_mask, unsigned int flags)
{
int err;
struct dentry *dentry = path->dentry;
@@ -233,11 +233,12 @@ int vboxsf_getattr(const struct path *path, struct kstat *kstat,
if (err)
return err;
- generic_fillattr(d_inode(dentry), kstat);
+ generic_fillattr(&init_user_ns, d_inode(dentry), kstat);
return 0;
}
-int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr)
+int vboxsf_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr)
{
struct vboxsf_inode *sf_i = VBOXSF_I(d_inode(dentry));
struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
diff --git a/fs/vboxsf/vfsmod.h b/fs/vboxsf/vfsmod.h
index 18f95b00fc33..760524e78c88 100644
--- a/fs/vboxsf/vfsmod.h
+++ b/fs/vboxsf/vfsmod.h
@@ -90,9 +90,11 @@ int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
struct shfl_fsobjinfo *info);
int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info);
int vboxsf_inode_revalidate(struct dentry *dentry);
-int vboxsf_getattr(const struct path *path, struct kstat *kstat,
- u32 request_mask, unsigned int query_flags);
-int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr);
+int vboxsf_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ struct kstat *kstat, u32 request_mask,
+ unsigned int query_flags);
+int vboxsf_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *iattr);
struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
struct dentry *dentry);
int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
diff --git a/fs/verity/Makefile b/fs/verity/Makefile
index 570e9136334d..435559a4fa9e 100644
--- a/fs/verity/Makefile
+++ b/fs/verity/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_FS_VERITY) += enable.o \
init.o \
measure.o \
open.o \
+ read_metadata.o \
verify.o
obj-$(CONFIG_FS_VERITY_BUILTIN_SIGNATURES) += signature.o
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
index f7e997a01ad0..77e159a0346b 100644
--- a/fs/verity/enable.c
+++ b/fs/verity/enable.c
@@ -369,7 +369,7 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
* has verity enabled, and to stabilize the data being hashed.
*/
- err = inode_permission(inode, MAY_WRITE);
+ err = file_permission(filp, MAY_WRITE);
if (err)
return err;
diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h
index 6413d28664d6..a7920434bae5 100644
--- a/fs/verity/fsverity_private.h
+++ b/fs/verity/fsverity_private.h
@@ -122,12 +122,17 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
const u8 *salt, size_t salt_size);
struct fsverity_info *fsverity_create_info(const struct inode *inode,
- void *desc, size_t desc_size);
+ struct fsverity_descriptor *desc,
+ size_t desc_size);
void fsverity_set_info(struct inode *inode, struct fsverity_info *vi);
void fsverity_free_info(struct fsverity_info *vi);
+int fsverity_get_descriptor(struct inode *inode,
+ struct fsverity_descriptor **desc_ret,
+ size_t *desc_size_ret);
+
int __init fsverity_init_info_cache(void);
void __init fsverity_exit_info_cache(void);
@@ -135,15 +140,13 @@ void __init fsverity_exit_info_cache(void);
#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
int fsverity_verify_signature(const struct fsverity_info *vi,
- const struct fsverity_descriptor *desc,
- size_t desc_size);
+ const u8 *signature, size_t sig_size);
int __init fsverity_init_signature(void);
#else /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
static inline int
fsverity_verify_signature(const struct fsverity_info *vi,
- const struct fsverity_descriptor *desc,
- size_t desc_size)
+ const u8 *signature, size_t sig_size)
{
return 0;
}
diff --git a/fs/verity/open.c b/fs/verity/open.c
index 228d0eca3e2e..60ff8af7219f 100644
--- a/fs/verity/open.c
+++ b/fs/verity/open.c
@@ -142,45 +142,17 @@ static int compute_file_digest(struct fsverity_hash_alg *hash_alg,
}
/*
- * Validate the given fsverity_descriptor and create a new fsverity_info from
- * it. The signature (if present) is also checked.
+ * Create a new fsverity_info from the given fsverity_descriptor (with optional
+ * appended signature), and check the signature if present. The
+ * fsverity_descriptor must have already undergone basic validation.
*/
struct fsverity_info *fsverity_create_info(const struct inode *inode,
- void *_desc, size_t desc_size)
+ struct fsverity_descriptor *desc,
+ size_t desc_size)
{
- struct fsverity_descriptor *desc = _desc;
struct fsverity_info *vi;
int err;
- if (desc_size < sizeof(*desc)) {
- fsverity_err(inode, "Unrecognized descriptor size: %zu bytes",
- desc_size);
- return ERR_PTR(-EINVAL);
- }
-
- if (desc->version != 1) {
- fsverity_err(inode, "Unrecognized descriptor version: %u",
- desc->version);
- return ERR_PTR(-EINVAL);
- }
-
- if (memchr_inv(desc->__reserved, 0, sizeof(desc->__reserved))) {
- fsverity_err(inode, "Reserved bits set in descriptor");
- return ERR_PTR(-EINVAL);
- }
-
- if (desc->salt_size > sizeof(desc->salt)) {
- fsverity_err(inode, "Invalid salt_size: %u", desc->salt_size);
- return ERR_PTR(-EINVAL);
- }
-
- if (le64_to_cpu(desc->data_size) != inode->i_size) {
- fsverity_err(inode,
- "Wrong data_size: %llu (desc) != %lld (inode)",
- le64_to_cpu(desc->data_size), inode->i_size);
- return ERR_PTR(-EINVAL);
- }
-
vi = kmem_cache_zalloc(fsverity_info_cachep, GFP_KERNEL);
if (!vi)
return ERR_PTR(-ENOMEM);
@@ -209,7 +181,8 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode,
vi->tree_params.hash_alg->name,
vi->tree_params.digest_size, vi->file_digest);
- err = fsverity_verify_signature(vi, desc, desc_size);
+ err = fsverity_verify_signature(vi, desc->signature,
+ le32_to_cpu(desc->sig_size));
out:
if (err) {
fsverity_free_info(vi);
@@ -245,15 +218,57 @@ void fsverity_free_info(struct fsverity_info *vi)
kmem_cache_free(fsverity_info_cachep, vi);
}
-/* Ensure the inode has an ->i_verity_info */
-static int ensure_verity_info(struct inode *inode)
+static bool validate_fsverity_descriptor(struct inode *inode,
+ const struct fsverity_descriptor *desc,
+ size_t desc_size)
{
- struct fsverity_info *vi = fsverity_get_info(inode);
- struct fsverity_descriptor *desc;
- int res;
+ if (desc_size < sizeof(*desc)) {
+ fsverity_err(inode, "Unrecognized descriptor size: %zu bytes",
+ desc_size);
+ return false;
+ }
- if (vi)
- return 0;
+ if (desc->version != 1) {
+ fsverity_err(inode, "Unrecognized descriptor version: %u",
+ desc->version);
+ return false;
+ }
+
+ if (memchr_inv(desc->__reserved, 0, sizeof(desc->__reserved))) {
+ fsverity_err(inode, "Reserved bits set in descriptor");
+ return false;
+ }
+
+ if (desc->salt_size > sizeof(desc->salt)) {
+ fsverity_err(inode, "Invalid salt_size: %u", desc->salt_size);
+ return false;
+ }
+
+ if (le64_to_cpu(desc->data_size) != inode->i_size) {
+ fsverity_err(inode,
+ "Wrong data_size: %llu (desc) != %lld (inode)",
+ le64_to_cpu(desc->data_size), inode->i_size);
+ return false;
+ }
+
+ if (le32_to_cpu(desc->sig_size) > desc_size - sizeof(*desc)) {
+ fsverity_err(inode, "Signature overflows verity descriptor");
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Read the inode's fsverity_descriptor (with optional appended signature) from
+ * the filesystem, and do basic validation of it.
+ */
+int fsverity_get_descriptor(struct inode *inode,
+ struct fsverity_descriptor **desc_ret,
+ size_t *desc_size_ret)
+{
+ int res;
+ struct fsverity_descriptor *desc;
res = inode->i_sb->s_vop->get_verity_descriptor(inode, NULL, 0);
if (res < 0) {
@@ -272,20 +287,46 @@ static int ensure_verity_info(struct inode *inode)
res = inode->i_sb->s_vop->get_verity_descriptor(inode, desc, res);
if (res < 0) {
fsverity_err(inode, "Error %d reading verity descriptor", res);
- goto out_free_desc;
+ kfree(desc);
+ return res;
+ }
+
+ if (!validate_fsverity_descriptor(inode, desc, res)) {
+ kfree(desc);
+ return -EINVAL;
}
- vi = fsverity_create_info(inode, desc, res);
+ *desc_ret = desc;
+ *desc_size_ret = res;
+ return 0;
+}
+
+/* Ensure the inode has an ->i_verity_info */
+static int ensure_verity_info(struct inode *inode)
+{
+ struct fsverity_info *vi = fsverity_get_info(inode);
+ struct fsverity_descriptor *desc;
+ size_t desc_size;
+ int err;
+
+ if (vi)
+ return 0;
+
+ err = fsverity_get_descriptor(inode, &desc, &desc_size);
+ if (err)
+ return err;
+
+ vi = fsverity_create_info(inode, desc, desc_size);
if (IS_ERR(vi)) {
- res = PTR_ERR(vi);
+ err = PTR_ERR(vi);
goto out_free_desc;
}
fsverity_set_info(inode, vi);
- res = 0;
+ err = 0;
out_free_desc:
kfree(desc);
- return res;
+ return err;
}
/**
diff --git a/fs/verity/read_metadata.c b/fs/verity/read_metadata.c
new file mode 100644
index 000000000000..7e2d0c7bdf0d
--- /dev/null
+++ b/fs/verity/read_metadata.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Ioctl to read verity metadata
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/backing-dev.h>
+#include <linux/highmem.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+static int fsverity_read_merkle_tree(struct inode *inode,
+ const struct fsverity_info *vi,
+ void __user *buf, u64 offset, int length)
+{
+ const struct fsverity_operations *vops = inode->i_sb->s_vop;
+ u64 end_offset;
+ unsigned int offs_in_page;
+ pgoff_t index, last_index;
+ int retval = 0;
+ int err = 0;
+
+ end_offset = min(offset + length, vi->tree_params.tree_size);
+ if (offset >= end_offset)
+ return 0;
+ offs_in_page = offset_in_page(offset);
+ last_index = (end_offset - 1) >> PAGE_SHIFT;
+
+ /*
+ * Iterate through each Merkle tree page in the requested range and copy
+ * the requested portion to userspace. Note that the Merkle tree block
+ * size isn't important here, as we are returning a byte stream; i.e.,
+ * we can just work with pages even if the tree block size != PAGE_SIZE.
+ */
+ for (index = offset >> PAGE_SHIFT; index <= last_index; index++) {
+ unsigned long num_ra_pages =
+ min_t(unsigned long, last_index - index + 1,
+ inode->i_sb->s_bdi->io_pages);
+ unsigned int bytes_to_copy = min_t(u64, end_offset - offset,
+ PAGE_SIZE - offs_in_page);
+ struct page *page;
+ const void *virt;
+
+ page = vops->read_merkle_tree_page(inode, index, num_ra_pages);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ fsverity_err(inode,
+ "Error %d reading Merkle tree page %lu",
+ err, index);
+ break;
+ }
+
+ virt = kmap(page);
+ if (copy_to_user(buf, virt + offs_in_page, bytes_to_copy)) {
+ kunmap(page);
+ put_page(page);
+ err = -EFAULT;
+ break;
+ }
+ kunmap(page);
+ put_page(page);
+
+ retval += bytes_to_copy;
+ buf += bytes_to_copy;
+ offset += bytes_to_copy;
+
+ if (fatal_signal_pending(current)) {
+ err = -EINTR;
+ break;
+ }
+ cond_resched();
+ offs_in_page = 0;
+ }
+ return retval ? retval : err;
+}
+
+/* Copy the requested portion of the buffer to userspace. */
+static int fsverity_read_buffer(void __user *dst, u64 offset, int length,
+ const void *src, size_t src_length)
+{
+ if (offset >= src_length)
+ return 0;
+ src += offset;
+ src_length -= offset;
+
+ length = min_t(size_t, length, src_length);
+
+ if (copy_to_user(dst, src, length))
+ return -EFAULT;
+
+ return length;
+}
+
+static int fsverity_read_descriptor(struct inode *inode,
+ void __user *buf, u64 offset, int length)
+{
+ struct fsverity_descriptor *desc;
+ size_t desc_size;
+ int res;
+
+ res = fsverity_get_descriptor(inode, &desc, &desc_size);
+ if (res)
+ return res;
+
+ /* don't include the signature */
+ desc_size = offsetof(struct fsverity_descriptor, signature);
+ desc->sig_size = 0;
+
+ res = fsverity_read_buffer(buf, offset, length, desc, desc_size);
+
+ kfree(desc);
+ return res;
+}
+
+static int fsverity_read_signature(struct inode *inode,
+ void __user *buf, u64 offset, int length)
+{
+ struct fsverity_descriptor *desc;
+ size_t desc_size;
+ int res;
+
+ res = fsverity_get_descriptor(inode, &desc, &desc_size);
+ if (res)
+ return res;
+
+ if (desc->sig_size == 0) {
+ res = -ENODATA;
+ goto out;
+ }
+
+ /*
+ * Include only the signature. Note that fsverity_get_descriptor()
+ * already verified that sig_size is in-bounds.
+ */
+ res = fsverity_read_buffer(buf, offset, length, desc->signature,
+ le32_to_cpu(desc->sig_size));
+out:
+ kfree(desc);
+ return res;
+}
+
+/**
+ * fsverity_ioctl_read_metadata() - read verity metadata from a file
+ * @filp: file to read the metadata from
+ * @uarg: user pointer to fsverity_read_metadata_arg
+ *
+ * Return: length read on success, 0 on EOF, -errno on failure
+ */
+int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg)
+{
+ struct inode *inode = file_inode(filp);
+ const struct fsverity_info *vi;
+ struct fsverity_read_metadata_arg arg;
+ int length;
+ void __user *buf;
+
+ vi = fsverity_get_info(inode);
+ if (!vi)
+ return -ENODATA; /* not a verity file */
+ /*
+ * Note that we don't have to explicitly check that the file is open for
+ * reading, since verity files can only be opened for reading.
+ */
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.__reserved)
+ return -EINVAL;
+
+ /* offset + length must not overflow. */
+ if (arg.offset + arg.length < arg.offset)
+ return -EINVAL;
+
+ /* Ensure that the return value will fit in INT_MAX. */
+ length = min_t(u64, arg.length, INT_MAX);
+
+ buf = u64_to_user_ptr(arg.buf_ptr);
+
+ switch (arg.metadata_type) {
+ case FS_VERITY_METADATA_TYPE_MERKLE_TREE:
+ return fsverity_read_merkle_tree(inode, vi, buf, arg.offset,
+ length);
+ case FS_VERITY_METADATA_TYPE_DESCRIPTOR:
+ return fsverity_read_descriptor(inode, buf, arg.offset, length);
+ case FS_VERITY_METADATA_TYPE_SIGNATURE:
+ return fsverity_read_signature(inode, buf, arg.offset, length);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(fsverity_ioctl_read_metadata);
diff --git a/fs/verity/signature.c b/fs/verity/signature.c
index 012468eda2a7..143a530a8008 100644
--- a/fs/verity/signature.c
+++ b/fs/verity/signature.c
@@ -29,21 +29,19 @@ static struct key *fsverity_keyring;
/**
* fsverity_verify_signature() - check a verity file's signature
* @vi: the file's fsverity_info
- * @desc: the file's fsverity_descriptor
- * @desc_size: size of @desc
+ * @signature: the file's built-in signature
+ * @sig_size: size of signature in bytes, or 0 if no signature
*
- * If the file's fs-verity descriptor includes a signature of the file digest,
- * verify it against the certificates in the fs-verity keyring.
+ * If the file includes a signature of its fs-verity file digest, verify it
+ * against the certificates in the fs-verity keyring.
*
* Return: 0 on success (signature valid or not required); -errno on failure
*/
int fsverity_verify_signature(const struct fsverity_info *vi,
- const struct fsverity_descriptor *desc,
- size_t desc_size)
+ const u8 *signature, size_t sig_size)
{
const struct inode *inode = vi->inode;
const struct fsverity_hash_alg *hash_alg = vi->tree_params.hash_alg;
- const u32 sig_size = le32_to_cpu(desc->sig_size);
struct fsverity_formatted_digest *d;
int err;
@@ -56,11 +54,6 @@ int fsverity_verify_signature(const struct fsverity_info *vi,
return 0;
}
- if (sig_size > desc_size - sizeof(*desc)) {
- fsverity_err(inode, "Signature overflows verity descriptor");
- return -EBADMSG;
- }
-
d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL);
if (!d)
return -ENOMEM;
@@ -70,8 +63,7 @@ int fsverity_verify_signature(const struct fsverity_info *vi,
memcpy(d->digest, vi->file_digest, hash_alg->digest_size);
err = verify_pkcs7_signature(d, sizeof(*d) + hash_alg->digest_size,
- desc->signature, sig_size,
- fsverity_keyring,
+ signature, sig_size, fsverity_keyring,
VERIFYING_UNSPECIFIED_SIGNATURE,
NULL, NULL);
kfree(d);
diff --git a/fs/xattr.c b/fs/xattr.c
index fd57153b1f61..b3444e06cded 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -83,7 +83,8 @@ xattr_resolve_name(struct inode *inode, const char **name)
* because different namespaces have very different rules.
*/
static int
-xattr_permission(struct inode *inode, const char *name, int mask)
+xattr_permission(struct user_namespace *mnt_userns, struct inode *inode,
+ const char *name, int mask)
{
/*
* We can never set or remove an extended attribute on a read-only
@@ -97,7 +98,7 @@ xattr_permission(struct inode *inode, const char *name, int mask)
* to be writen back improperly if their true value is
* unknown to the vfs.
*/
- if (HAS_UNMAPPED_ID(inode))
+ if (HAS_UNMAPPED_ID(mnt_userns, inode))
return -EPERM;
}
@@ -127,11 +128,12 @@ xattr_permission(struct inode *inode, const char *name, int mask)
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
- (mask & MAY_WRITE) && !inode_owner_or_capable(inode))
+ (mask & MAY_WRITE) &&
+ !inode_owner_or_capable(mnt_userns, inode))
return -EPERM;
}
- return inode_permission(inode, mask);
+ return inode_permission(mnt_userns, inode, mask);
}
/*
@@ -162,8 +164,9 @@ xattr_supported_namespace(struct inode *inode, const char *prefix)
EXPORT_SYMBOL(xattr_supported_namespace);
int
-__vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+__vfs_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct inode *inode, const char *name, const void *value,
+ size_t size, int flags)
{
const struct xattr_handler *handler;
@@ -174,7 +177,8 @@ __vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
return -EOPNOTSUPP;
if (size == 0)
value = ""; /* empty EA, do not remove */
- return handler->set(handler, dentry, inode, name, value, size, flags);
+ return handler->set(handler, mnt_userns, dentry, inode, name, value,
+ size, flags);
}
EXPORT_SYMBOL(__vfs_setxattr);
@@ -182,6 +186,7 @@ EXPORT_SYMBOL(__vfs_setxattr);
* __vfs_setxattr_noperm - perform setxattr operation without performing
* permission checks.
*
+ * @mnt_userns - user namespace of the mount the inode was found from
* @dentry - object to perform setxattr on
* @name - xattr name to set
* @value - value to set @name to
@@ -194,8 +199,9 @@ EXPORT_SYMBOL(__vfs_setxattr);
* is executed. It also assumes that the caller will make the appropriate
* permission checks.
*/
-int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+int __vfs_setxattr_noperm(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
{
struct inode *inode = dentry->d_inode;
int error = -EAGAIN;
@@ -205,7 +211,8 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
if (issec)
inode->i_flags &= ~S_NOSEC;
if (inode->i_opflags & IOP_XATTR) {
- error = __vfs_setxattr(dentry, inode, name, value, size, flags);
+ error = __vfs_setxattr(mnt_userns, dentry, inode, name, value,
+ size, flags);
if (!error) {
fsnotify_xattr(dentry);
security_inode_post_setxattr(dentry, name, value,
@@ -244,18 +251,19 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
* a delegation was broken on, NULL if none.
*/
int
-__vfs_setxattr_locked(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags,
- struct inode **delegated_inode)
+__vfs_setxattr_locked(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *name, const void *value, size_t size,
+ int flags, struct inode **delegated_inode)
{
struct inode *inode = dentry->d_inode;
int error;
- error = xattr_permission(inode, name, MAY_WRITE);
+ error = xattr_permission(mnt_userns, inode, name, MAY_WRITE);
if (error)
return error;
- error = security_inode_setxattr(dentry, name, value, size, flags);
+ error = security_inode_setxattr(mnt_userns, dentry, name, value, size,
+ flags);
if (error)
goto out;
@@ -263,7 +271,8 @@ __vfs_setxattr_locked(struct dentry *dentry, const char *name,
if (error)
goto out;
- error = __vfs_setxattr_noperm(dentry, name, value, size, flags);
+ error = __vfs_setxattr_noperm(mnt_userns, dentry, name, value,
+ size, flags);
out:
return error;
@@ -271,8 +280,8 @@ out:
EXPORT_SYMBOL_GPL(__vfs_setxattr_locked);
int
-vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- size_t size, int flags)
+vfs_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *name, const void *value, size_t size, int flags)
{
struct inode *inode = dentry->d_inode;
struct inode *delegated_inode = NULL;
@@ -280,7 +289,7 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
int error;
if (size && strcmp(name, XATTR_NAME_CAPS) == 0) {
- error = cap_convert_nscap(dentry, &value, size);
+ error = cap_convert_nscap(mnt_userns, dentry, &value, size);
if (error < 0)
return error;
size = error;
@@ -288,8 +297,8 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
retry_deleg:
inode_lock(inode);
- error = __vfs_setxattr_locked(dentry, name, value, size, flags,
- &delegated_inode);
+ error = __vfs_setxattr_locked(mnt_userns, dentry, name, value, size,
+ flags, &delegated_inode);
inode_unlock(inode);
if (delegated_inode) {
@@ -305,18 +314,20 @@ retry_deleg:
EXPORT_SYMBOL_GPL(vfs_setxattr);
static ssize_t
-xattr_getsecurity(struct inode *inode, const char *name, void *value,
- size_t size)
+xattr_getsecurity(struct user_namespace *mnt_userns, struct inode *inode,
+ const char *name, void *value, size_t size)
{
void *buffer = NULL;
ssize_t len;
if (!value || !size) {
- len = security_inode_getsecurity(inode, name, &buffer, false);
+ len = security_inode_getsecurity(mnt_userns, inode, name,
+ &buffer, false);
goto out_noalloc;
}
- len = security_inode_getsecurity(inode, name, &buffer, true);
+ len = security_inode_getsecurity(mnt_userns, inode, name, &buffer,
+ true);
if (len < 0)
return len;
if (size < len) {
@@ -339,15 +350,16 @@ out_noalloc:
* Returns the result of alloc, if failed, or the getxattr operation.
*/
ssize_t
-vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
- size_t xattr_size, gfp_t flags)
+vfs_getxattr_alloc(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *name, char **xattr_value, size_t xattr_size,
+ gfp_t flags)
{
const struct xattr_handler *handler;
struct inode *inode = dentry->d_inode;
char *value = *xattr_value;
int error;
- error = xattr_permission(inode, name, MAY_READ);
+ error = xattr_permission(mnt_userns, inode, name, MAY_READ);
if (error)
return error;
@@ -388,12 +400,13 @@ __vfs_getxattr(struct dentry *dentry, struct inode *inode, const char *name,
EXPORT_SYMBOL(__vfs_getxattr);
ssize_t
-vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
+vfs_getxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *name, void *value, size_t size)
{
struct inode *inode = dentry->d_inode;
int error;
- error = xattr_permission(inode, name, MAY_READ);
+ error = xattr_permission(mnt_userns, inode, name, MAY_READ);
if (error)
return error;
@@ -404,7 +417,8 @@ vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
if (!strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN)) {
const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
- int ret = xattr_getsecurity(inode, suffix, value, size);
+ int ret = xattr_getsecurity(mnt_userns, inode, suffix, value,
+ size);
/*
* Only overwrite the return value if a security module
* is actually active.
@@ -439,7 +453,8 @@ vfs_listxattr(struct dentry *dentry, char *list, size_t size)
EXPORT_SYMBOL_GPL(vfs_listxattr);
int
-__vfs_removexattr(struct dentry *dentry, const char *name)
+__vfs_removexattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *name)
{
struct inode *inode = d_inode(dentry);
const struct xattr_handler *handler;
@@ -449,7 +464,8 @@ __vfs_removexattr(struct dentry *dentry, const char *name)
return PTR_ERR(handler);
if (!handler->set)
return -EOPNOTSUPP;
- return handler->set(handler, dentry, inode, name, NULL, 0, XATTR_REPLACE);
+ return handler->set(handler, mnt_userns, dentry, inode, name, NULL, 0,
+ XATTR_REPLACE);
}
EXPORT_SYMBOL(__vfs_removexattr);
@@ -463,17 +479,18 @@ EXPORT_SYMBOL(__vfs_removexattr);
* a delegation was broken on, NULL if none.
*/
int
-__vfs_removexattr_locked(struct dentry *dentry, const char *name,
- struct inode **delegated_inode)
+__vfs_removexattr_locked(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
+ struct inode **delegated_inode)
{
struct inode *inode = dentry->d_inode;
int error;
- error = xattr_permission(inode, name, MAY_WRITE);
+ error = xattr_permission(mnt_userns, inode, name, MAY_WRITE);
if (error)
return error;
- error = security_inode_removexattr(dentry, name);
+ error = security_inode_removexattr(mnt_userns, dentry, name);
if (error)
goto out;
@@ -481,7 +498,7 @@ __vfs_removexattr_locked(struct dentry *dentry, const char *name,
if (error)
goto out;
- error = __vfs_removexattr(dentry, name);
+ error = __vfs_removexattr(mnt_userns, dentry, name);
if (!error) {
fsnotify_xattr(dentry);
@@ -494,7 +511,8 @@ out:
EXPORT_SYMBOL_GPL(__vfs_removexattr_locked);
int
-vfs_removexattr(struct dentry *dentry, const char *name)
+vfs_removexattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const char *name)
{
struct inode *inode = dentry->d_inode;
struct inode *delegated_inode = NULL;
@@ -502,7 +520,8 @@ vfs_removexattr(struct dentry *dentry, const char *name)
retry_deleg:
inode_lock(inode);
- error = __vfs_removexattr_locked(dentry, name, &delegated_inode);
+ error = __vfs_removexattr_locked(mnt_userns, dentry,
+ name, &delegated_inode);
inode_unlock(inode);
if (delegated_inode) {
@@ -519,8 +538,9 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
* Extended attribute SET operations
*/
static long
-setxattr(struct dentry *d, const char __user *name, const void __user *value,
- size_t size, int flags)
+setxattr(struct user_namespace *mnt_userns, struct dentry *d,
+ const char __user *name, const void __user *value, size_t size,
+ int flags)
{
int error;
void *kvalue = NULL;
@@ -547,10 +567,10 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
}
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
- posix_acl_fix_xattr_from_user(kvalue, size);
+ posix_acl_fix_xattr_from_user(mnt_userns, kvalue, size);
}
- error = vfs_setxattr(d, kname, kvalue, size, flags);
+ error = vfs_setxattr(mnt_userns, d, kname, kvalue, size, flags);
out:
kvfree(kvalue);
@@ -563,13 +583,15 @@ static int path_setxattr(const char __user *pathname,
{
struct path path;
int error;
+
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
error = mnt_want_write(path.mnt);
if (!error) {
- error = setxattr(path.dentry, name, value, size, flags);
+ error = setxattr(mnt_user_ns(path.mnt), path.dentry, name,
+ value, size, flags);
mnt_drop_write(path.mnt);
}
path_put(&path);
@@ -605,7 +627,9 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
audit_file(f.file);
error = mnt_want_write_file(f.file);
if (!error) {
- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
+ error = setxattr(file_mnt_user_ns(f.file),
+ f.file->f_path.dentry, name,
+ value, size, flags);
mnt_drop_write_file(f.file);
}
fdput(f);
@@ -616,8 +640,8 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
* Extended attribute GET operations
*/
static ssize_t
-getxattr(struct dentry *d, const char __user *name, void __user *value,
- size_t size)
+getxattr(struct user_namespace *mnt_userns, struct dentry *d,
+ const char __user *name, void __user *value, size_t size)
{
ssize_t error;
void *kvalue = NULL;
@@ -637,11 +661,11 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
return -ENOMEM;
}
- error = vfs_getxattr(d, kname, kvalue, size);
+ error = vfs_getxattr(mnt_userns, d, kname, kvalue, size);
if (error > 0) {
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
- posix_acl_fix_xattr_to_user(kvalue, error);
+ posix_acl_fix_xattr_to_user(mnt_userns, kvalue, error);
if (size && copy_to_user(value, kvalue, error))
error = -EFAULT;
} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
@@ -665,7 +689,7 @@ retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
return error;
- error = getxattr(path.dentry, name, value, size);
+ error = getxattr(mnt_user_ns(path.mnt), path.dentry, name, value, size);
path_put(&path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
@@ -695,7 +719,8 @@ SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
if (!f.file)
return error;
audit_file(f.file);
- error = getxattr(f.file->f_path.dentry, name, value, size);
+ error = getxattr(file_mnt_user_ns(f.file), f.file->f_path.dentry,
+ name, value, size);
fdput(f);
return error;
}
@@ -779,7 +804,8 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
* Extended attribute REMOVE operations
*/
static long
-removexattr(struct dentry *d, const char __user *name)
+removexattr(struct user_namespace *mnt_userns, struct dentry *d,
+ const char __user *name)
{
int error;
char kname[XATTR_NAME_MAX + 1];
@@ -790,7 +816,7 @@ removexattr(struct dentry *d, const char __user *name)
if (error < 0)
return error;
- return vfs_removexattr(d, kname);
+ return vfs_removexattr(mnt_userns, d, kname);
}
static int path_removexattr(const char __user *pathname,
@@ -804,7 +830,7 @@ retry:
return error;
error = mnt_want_write(path.mnt);
if (!error) {
- error = removexattr(path.dentry, name);
+ error = removexattr(mnt_user_ns(path.mnt), path.dentry, name);
mnt_drop_write(path.mnt);
}
path_put(&path);
@@ -837,7 +863,8 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
audit_file(f.file);
error = mnt_want_write_file(f.file);
if (!error) {
- error = removexattr(f.file->f_path.dentry, name);
+ error = removexattr(file_mnt_user_ns(f.file),
+ f.file->f_path.dentry, name);
mnt_drop_write_file(f.file);
}
fdput(f);
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 7cb9f064ac64..0c623d3c1036 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2474,6 +2474,47 @@ xfs_defer_agfl_block(
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
}
+#ifdef DEBUG
+/*
+ * Check if an AGF has a free extent record whose length is equal to
+ * args->minlen.
+ */
+STATIC int
+xfs_exact_minlen_extent_available(
+ struct xfs_alloc_arg *args,
+ struct xfs_buf *agbp,
+ int *stat)
+{
+ struct xfs_btree_cur *cnt_cur;
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ int error = 0;
+
+ cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
+ args->agno, XFS_BTNUM_CNT);
+ error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
+ if (error)
+ goto out;
+
+ if (*stat == 0) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
+ if (error)
+ goto out;
+
+ if (*stat == 1 && flen != args->minlen)
+ *stat = 0;
+
+out:
+ xfs_btree_del_cursor(cnt_cur, error);
+
+ return error;
+}
+#endif
+
/*
* Decide whether to use this allocation group for this allocation.
* If so, fix up the btree freelist's size.
@@ -2545,6 +2586,15 @@ xfs_alloc_fix_freelist(
if (!xfs_alloc_space_available(args, need, flags))
goto out_agbp_relse;
+#ifdef DEBUG
+ if (args->alloc_minlen_only) {
+ int stat;
+
+ error = xfs_exact_minlen_extent_available(args, agbp, &stat);
+ if (error || !stat)
+ goto out_agbp_relse;
+ }
+#endif
/*
* Make the freelist shorter if it's too long.
*
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 6c22b12176b8..a4427c5775c2 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -75,6 +75,9 @@ typedef struct xfs_alloc_arg {
char wasfromfl; /* set if allocation is from freelist */
struct xfs_owner_info oinfo; /* owner of blocks being allocated */
enum xfs_ag_resv_type resv; /* block reservation to use */
+#ifdef DEBUG
+ bool alloc_minlen_only; /* allocate exact minlen extent */
+#endif
} xfs_alloc_arg_t;
/*
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index fd8e6418a0d3..472b3039eabb 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -396,6 +396,7 @@ xfs_attr_set(
struct xfs_trans_res tres;
bool rsvd = (args->attr_filter & XFS_ATTR_ROOT);
int error, local;
+ int rmt_blks = 0;
unsigned int total;
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
@@ -442,34 +443,33 @@ xfs_attr_set(
tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
total = args->total;
+
+ if (!local)
+ rmt_blks = xfs_attr3_rmt_blocks(mp, args->valuelen);
} else {
XFS_STATS_INC(mp, xs_attr_remove);
tres = M_RES(mp)->tr_attrrm;
total = XFS_ATTRRM_SPACE_RES(mp);
+ rmt_blks = xfs_attr3_rmt_blocks(mp, XFS_XATTR_SIZE_MAX);
}
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
- error = xfs_trans_alloc(mp, &tres, total, 0,
- rsvd ? XFS_TRANS_RESERVE : 0, &args->trans);
+ error = xfs_trans_alloc_inode(dp, &tres, total, 0, rsvd, &args->trans);
if (error)
return error;
- xfs_ilock(dp, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(args->trans, dp, 0);
- if (args->value) {
- unsigned int quota_flags = XFS_QMOPT_RES_REGBLKS;
-
- if (rsvd)
- quota_flags |= XFS_QMOPT_FORCE_RES;
- error = xfs_trans_reserve_quota_nblks(args->trans, dp,
- args->total, 0, quota_flags);
+ if (args->value || xfs_inode_hasattr(dp)) {
+ error = xfs_iext_count_may_overflow(dp, XFS_ATTR_FORK,
+ XFS_IEXT_ATTR_MANIP_CNT(rmt_blks));
if (error)
goto out_trans_cancel;
+ }
+ if (args->value) {
error = xfs_has_attr(args);
if (error == -EEXIST && (args->attr_flags & XATTR_CREATE))
goto out_trans_cancel;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index bc446418e227..e0905ad171f0 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1079,21 +1079,13 @@ xfs_bmap_add_attrfork(
blks = XFS_ADDAFORK_SPACE_RES(mp);
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
- rsvd ? XFS_TRANS_RESERVE : 0, &tp);
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
+ rsvd, &tp);
if (error)
return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
- XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
- XFS_QMOPT_RES_REGBLKS);
- if (error)
- goto trans_cancel;
if (XFS_IFORK_Q(ip))
goto trans_cancel;
- xfs_trans_ijoin(tp, ip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_bmap_set_attrforkoff(ip, size, &version);
if (error)
@@ -3463,34 +3455,16 @@ xfs_bmap_btalloc_accounting(
args->len);
}
-STATIC int
-xfs_bmap_btalloc(
- struct xfs_bmalloca *ap) /* bmap alloc argument struct */
+static int
+xfs_bmap_compute_alignments(
+ struct xfs_bmalloca *ap,
+ struct xfs_alloc_arg *args)
{
- xfs_mount_t *mp; /* mount point structure */
- xfs_alloctype_t atype = 0; /* type for allocation routines */
- xfs_extlen_t align = 0; /* minimum allocation alignment */
- xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
- xfs_agnumber_t ag;
- xfs_alloc_arg_t args;
- xfs_fileoff_t orig_offset;
- xfs_extlen_t orig_length;
- xfs_extlen_t blen;
- xfs_extlen_t nextminlen = 0;
- int nullfb; /* true if ap->firstblock isn't set */
- int isaligned;
- int tryagain;
- int error;
- int stripe_align;
-
- ASSERT(ap->length);
- orig_offset = ap->offset;
- orig_length = ap->length;
-
- mp = ap->ip->i_mount;
+ struct xfs_mount *mp = args->mp;
+ xfs_extlen_t align = 0; /* minimum allocation alignment */
+ int stripe_align = 0;
/* stripe alignment for allocation is determined by mount parameters */
- stripe_align = 0;
if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
stripe_align = mp->m_swidth;
else if (mp->m_dalign)
@@ -3501,13 +3475,171 @@ xfs_bmap_btalloc(
else if (ap->datatype & XFS_ALLOC_USERDATA)
align = xfs_get_extsz_hint(ap->ip);
if (align) {
- error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
- align, 0, ap->eof, 0, ap->conv,
- &ap->offset, &ap->length);
- ASSERT(!error);
+ if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
+ ap->eof, 0, ap->conv, &ap->offset,
+ &ap->length))
+ ASSERT(0);
ASSERT(ap->length);
}
+ /* apply extent size hints if obtained earlier */
+ if (align) {
+ args->prod = align;
+ div_u64_rem(ap->offset, args->prod, &args->mod);
+ if (args->mod)
+ args->mod = args->prod - args->mod;
+ } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
+ args->prod = 1;
+ args->mod = 0;
+ } else {
+ args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
+ div_u64_rem(ap->offset, args->prod, &args->mod);
+ if (args->mod)
+ args->mod = args->prod - args->mod;
+ }
+
+ return stripe_align;
+}
+
+static void
+xfs_bmap_process_allocated_extent(
+ struct xfs_bmalloca *ap,
+ struct xfs_alloc_arg *args,
+ xfs_fileoff_t orig_offset,
+ xfs_extlen_t orig_length)
+{
+ int nullfb;
+
+ nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
+
+ /*
+ * check the allocation happened at the same or higher AG than
+ * the first block that was allocated.
+ */
+ ASSERT(nullfb ||
+ XFS_FSB_TO_AGNO(args->mp, ap->tp->t_firstblock) <=
+ XFS_FSB_TO_AGNO(args->mp, args->fsbno));
+
+ ap->blkno = args->fsbno;
+ if (nullfb)
+ ap->tp->t_firstblock = args->fsbno;
+ ap->length = args->len;
+ /*
+ * If the extent size hint is active, we tried to round the
+ * caller's allocation request offset down to extsz and the
+ * length up to another extsz boundary. If we found a free
+ * extent we mapped it in starting at this new offset. If the
+ * newly mapped space isn't long enough to cover any of the
+ * range of offsets that was originally requested, move the
+ * mapping up so that we can fill as much of the caller's
+ * original request as possible. Free space is apparently
+ * very fragmented so we're unlikely to be able to satisfy the
+ * hints anyway.
+ */
+ if (ap->length <= orig_length)
+ ap->offset = orig_offset;
+ else if (ap->offset + ap->length < orig_offset + orig_length)
+ ap->offset = orig_offset + orig_length - ap->length;
+ xfs_bmap_btalloc_accounting(ap, args);
+}
+
+#ifdef DEBUG
+static int
+xfs_bmap_exact_minlen_extent_alloc(
+ struct xfs_bmalloca *ap)
+{
+ struct xfs_mount *mp = ap->ip->i_mount;
+ struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp };
+ xfs_fileoff_t orig_offset;
+ xfs_extlen_t orig_length;
+ int error;
+
+ ASSERT(ap->length);
+
+ if (ap->minlen != 1) {
+ ap->blkno = NULLFSBLOCK;
+ ap->length = 0;
+ return 0;
+ }
+
+ orig_offset = ap->offset;
+ orig_length = ap->length;
+
+ args.alloc_minlen_only = 1;
+
+ xfs_bmap_compute_alignments(ap, &args);
+
+ if (ap->tp->t_firstblock == NULLFSBLOCK) {
+ /*
+ * Unlike the longest extent available in an AG, we don't track
+ * the length of an AG's shortest extent.
+ * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
+ * hence we can afford to start traversing from the 0th AG since
+ * we need not be concerned about a drop in performance in
+ * "debug only" code paths.
+ */
+ ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
+ } else {
+ ap->blkno = ap->tp->t_firstblock;
+ }
+
+ args.fsbno = ap->blkno;
+ args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
+ args.total = args.minlen = args.maxlen = ap->minlen;
+
+ args.alignment = 1;
+ args.minalignslop = 0;
+
+ args.minleft = ap->minleft;
+ args.wasdel = ap->wasdel;
+ args.resv = XFS_AG_RESV_NONE;
+ args.datatype = ap->datatype;
+
+ error = xfs_alloc_vextent(&args);
+ if (error)
+ return error;
+
+ if (args.fsbno != NULLFSBLOCK) {
+ xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
+ orig_length);
+ } else {
+ ap->blkno = NULLFSBLOCK;
+ ap->length = 0;
+ }
+
+ return 0;
+}
+#else
+
+#define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
+
+#endif
+
+STATIC int
+xfs_bmap_btalloc(
+ struct xfs_bmalloca *ap)
+{
+ struct xfs_mount *mp = ap->ip->i_mount;
+ struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp };
+ xfs_alloctype_t atype = 0;
+ xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
+ xfs_agnumber_t ag;
+ xfs_fileoff_t orig_offset;
+ xfs_extlen_t orig_length;
+ xfs_extlen_t blen;
+ xfs_extlen_t nextminlen = 0;
+ int nullfb; /* true if ap->firstblock isn't set */
+ int isaligned;
+ int tryagain;
+ int error;
+ int stripe_align;
+
+ ASSERT(ap->length);
+ orig_offset = ap->offset;
+ orig_length = ap->length;
+
+ stripe_align = xfs_bmap_compute_alignments(ap, &args);
nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
@@ -3538,9 +3670,6 @@ xfs_bmap_btalloc(
* Normal allocation, done through xfs_alloc_vextent.
*/
tryagain = isaligned = 0;
- memset(&args, 0, sizeof(args));
- args.tp = ap->tp;
- args.mp = mp;
args.fsbno = ap->blkno;
args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
@@ -3571,21 +3700,7 @@ xfs_bmap_btalloc(
args.total = ap->total;
args.minlen = ap->minlen;
}
- /* apply extent size hints if obtained earlier */
- if (align) {
- args.prod = align;
- div_u64_rem(ap->offset, args.prod, &args.mod);
- if (args.mod)
- args.mod = args.prod - args.mod;
- } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
- args.prod = 1;
- args.mod = 0;
- } else {
- args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
- div_u64_rem(ap->offset, args.prod, &args.mod);
- if (args.mod)
- args.mod = args.prod - args.mod;
- }
+
/*
* If we are not low on available data blocks, and the underlying
* logical volume manager is a stripe, and the file offset is zero then
@@ -3687,37 +3802,10 @@ xfs_bmap_btalloc(
return error;
ap->tp->t_flags |= XFS_TRANS_LOWMODE;
}
+
if (args.fsbno != NULLFSBLOCK) {
- /*
- * check the allocation happened at the same or higher AG than
- * the first block that was allocated.
- */
- ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
- XFS_FSB_TO_AGNO(mp, args.fsbno));
-
- ap->blkno = args.fsbno;
- if (ap->tp->t_firstblock == NULLFSBLOCK)
- ap->tp->t_firstblock = args.fsbno;
- ASSERT(nullfb || fb_agno <= args.agno);
- ap->length = args.len;
- /*
- * If the extent size hint is active, we tried to round the
- * caller's allocation request offset down to extsz and the
- * length up to another extsz boundary. If we found a free
- * extent we mapped it in starting at this new offset. If the
- * newly mapped space isn't long enough to cover any of the
- * range of offsets that was originally requested, move the
- * mapping up so that we can fill as much of the caller's
- * original request as possible. Free space is apparently
- * very fragmented so we're unlikely to be able to satisfy the
- * hints anyway.
- */
- if (ap->length <= orig_length)
- ap->offset = orig_offset;
- else if (ap->offset + ap->length < orig_offset + orig_length)
- ap->offset = orig_offset + orig_length - ap->length;
- xfs_bmap_btalloc_accounting(ap, &args);
+ xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
+ orig_length);
} else {
ap->blkno = NULLFSBLOCK;
ap->length = 0;
@@ -4001,8 +4089,7 @@ xfs_bmapi_reserve_delalloc(
* blocks. This number gets adjusted later. We return if we haven't
* allocated blocks already inside this loop.
*/
- error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
- XFS_QMOPT_RES_REGBLKS);
+ error = xfs_quota_reserve_blkres(ip, alen);
if (error)
return error;
@@ -4048,8 +4135,7 @@ out_unreserve_blocks:
xfs_mod_fdblocks(mp, alen, false);
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
- xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
- XFS_QMOPT_RES_REGBLKS);
+ xfs_quota_unreserve_blkres(ip, alen);
return error;
}
@@ -4083,6 +4169,10 @@ xfs_bmap_alloc_userdata(
return xfs_bmap_rtalloc(bma);
}
+ if (unlikely(XFS_TEST_ERROR(false, mp,
+ XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
+ return xfs_bmap_exact_minlen_extent_alloc(bma);
+
return xfs_bmap_btalloc(bma);
}
@@ -4119,10 +4209,15 @@ xfs_bmapi_allocate(
else
bma->minlen = 1;
- if (bma->flags & XFS_BMAPI_METADATA)
- error = xfs_bmap_btalloc(bma);
- else
+ if (bma->flags & XFS_BMAPI_METADATA) {
+ if (unlikely(XFS_TEST_ERROR(false, mp,
+ XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
+ error = xfs_bmap_exact_minlen_extent_alloc(bma);
+ else
+ error = xfs_bmap_btalloc(bma);
+ } else {
error = xfs_bmap_alloc_userdata(bma);
+ }
if (error || bma->blkno == NULLFSBLOCK)
return error;
@@ -4527,6 +4622,12 @@ xfs_bmapi_convert_delalloc(
return error;
xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ error = xfs_iext_count_may_overflow(ip, whichfork,
+ XFS_IEXT_ADD_NOSPLIT_CNT);
+ if (error)
+ goto out_trans_cancel;
+
xfs_trans_ijoin(tp, ip, 0);
if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
@@ -4826,9 +4927,8 @@ xfs_bmap_del_extent_delay(
* sb counters as we might have to borrow some blocks for the
* indirect block accounting.
*/
- error = xfs_trans_reserve_quota_nblks(NULL, ip,
- -((long)del->br_blockcount), 0,
- isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ ASSERT(!isrt);
+ error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
if (error)
return error;
ip->i_delayed_blks -= del->br_blockcount;
@@ -5145,6 +5245,27 @@ xfs_bmap_del_extent_real(
/*
* Deleting the middle of the extent.
*/
+
+ /*
+ * For directories, -ENOSPC is returned since a directory entry
+ * remove operation must not fail due to low extent count
+ * availability. -ENOSPC will be handled by higher layers of XFS
+ * by letting the corresponding empty Data/Free blocks to linger
+ * until a future remove operation. Dabtree blocks would be
+ * swapped with the last block in the leaf space and then the
+ * new last block will be unmapped.
+ *
+ * The above logic also applies to the source directory entry of
+ * a rename operation.
+ */
+ error = xfs_iext_count_may_overflow(ip, whichfork, 1);
+ if (error) {
+ ASSERT(S_ISDIR(VFS_I(ip)->i_mode) &&
+ whichfork == XFS_DATA_FORK);
+ error = -ENOSPC;
+ goto done;
+ }
+
old = got;
got.br_blockcount = del->br_startoff - got.br_startoff;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index c4d7a9241dc3..5b6fcb9b44e2 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -353,20 +353,17 @@ xfs_btree_free_block(
*/
void
xfs_btree_del_cursor(
- xfs_btree_cur_t *cur, /* btree cursor */
- int error) /* del because of error */
+ struct xfs_btree_cur *cur, /* btree cursor */
+ int error) /* del because of error */
{
- int i; /* btree level */
+ int i; /* btree level */
/*
- * Clear the buffer pointers, and release the buffers.
- * If we're doing this in the face of an error, we
- * need to make sure to inspect all of the entries
- * in the bc_bufs array for buffers to be unlocked.
- * This is because some of the btree code works from
- * level n down to 0, and if we get an error along
- * the way we won't have initialized all the entries
- * down to 0.
+ * Clear the buffer pointers and release the buffers. If we're doing
+ * this because of an error, inspect all of the entries in the bc_bufs
+ * array for buffers to be unlocked. This is because some of the btree
+ * code works from level n down to 0, and if we get an error along the
+ * way we won't have initialized all the entries down to 0.
*/
for (i = 0; i < cur->bc_nlevels; i++) {
if (cur->bc_bufs[i])
@@ -374,17 +371,11 @@ xfs_btree_del_cursor(
else if (!error)
break;
}
- /*
- * Can't free a bmap cursor without having dealt with the
- * allocated indirect blocks' accounting.
- */
- ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP ||
- cur->bc_ino.allocated == 0);
- /*
- * Free the cursor.
- */
+
+ ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
+ XFS_FORCED_SHUTDOWN(cur->bc_mp));
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
- kmem_free((void *)cur->bc_ops);
+ kmem_free(cur->bc_ops);
kmem_cache_free(xfs_btree_cur_zone, cur);
}
@@ -2814,7 +2805,7 @@ xfs_btree_split_worker(
struct xfs_btree_split_args *args = container_of(work,
struct xfs_btree_split_args, work);
unsigned long pflags;
- unsigned long new_pflags = PF_MEMALLOC_NOFS;
+ unsigned long new_pflags = 0;
/*
* we are in a transaction context here, but may also be doing work
@@ -2826,12 +2817,20 @@ xfs_btree_split_worker(
new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
current_set_flags_nested(&pflags, new_pflags);
+ xfs_trans_set_context(args->cur->bc_tp);
args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
args->key, args->curp, args->stat);
- complete(args->done);
+ xfs_trans_clear_context(args->cur->bc_tp);
current_restore_flags_nested(&pflags, new_pflags);
+
+ /*
+ * Do not access args after complete() has run here. We don't own args
+ * and the owner may run and free args before we return here.
+ */
+ complete(args->done);
+
}
/*
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index e55378640b05..d03e6098ded9 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -47,8 +47,6 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
xfs_extlen_t tot);
-extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp,
- xfs_ino_t inum);
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_extlen_t tot);
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 2463b5d73447..8c4f76bba88b 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -1018,7 +1018,7 @@ xfs_dir2_sf_removename(
/*
* Check whether the sf dir replace operation need more blocks.
*/
-bool
+static bool
xfs_dir2_sf_replace_needblock(
struct xfs_inode *dp,
xfs_ino_t inum)
diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h
index 53b305dea381..6ca9084b6934 100644
--- a/fs/xfs/libxfs/xfs_errortag.h
+++ b/fs/xfs/libxfs/xfs_errortag.h
@@ -56,7 +56,9 @@
#define XFS_ERRTAG_FORCE_SUMMARY_RECALC 33
#define XFS_ERRTAG_IUNLINK_FALLBACK 34
#define XFS_ERRTAG_BUF_IOERROR 35
-#define XFS_ERRTAG_MAX 36
+#define XFS_ERRTAG_REDUCE_MAX_IEXTENTS 36
+#define XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT 37
+#define XFS_ERRTAG_MAX 38
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -97,5 +99,7 @@
#define XFS_RANDOM_FORCE_SUMMARY_RECALC 1
#define XFS_RANDOM_IUNLINK_FALLBACK (XFS_RANDOM_DEFAULT/10)
#define XFS_RANDOM_BUF_IOERROR XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_REDUCE_MAX_IEXTENTS 1
+#define XFS_RANDOM_BMAP_ALLOC_MINLEN_EXTENT 1
#endif /* __XFS_ERRORTAG_H_ */
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 2a2e3cfd94f0..6fad140d4c8e 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -250,6 +250,7 @@ typedef struct xfs_fsop_resblks {
#define XFS_FSOP_GEOM_FLAGS_RMAPBT (1 << 19) /* reverse mapping btree */
#define XFS_FSOP_GEOM_FLAGS_REFLINK (1 << 20) /* files can share blocks */
#define XFS_FSOP_GEOM_FLAGS_BIGTIME (1 << 21) /* 64-bit nsec timestamps */
+#define XFS_FSOP_GEOM_FLAGS_INOBTCNT (1 << 22) /* inobt btree counter */
/*
* Minimum and maximum sizes need for growth checks.
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 7575de5cecb1..e080d7e07643 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -23,6 +23,8 @@
#include "xfs_da_btree.h"
#include "xfs_dir2_priv.h"
#include "xfs_attr_leaf.h"
+#include "xfs_types.h"
+#include "xfs_errortag.h"
kmem_zone_t *xfs_ifork_zone;
@@ -728,3 +730,28 @@ xfs_ifork_verify_local_attr(
return 0;
}
+
+int
+xfs_iext_count_may_overflow(
+ struct xfs_inode *ip,
+ int whichfork,
+ int nr_to_add)
+{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ uint64_t max_exts;
+ uint64_t nr_exts;
+
+ if (whichfork == XFS_COW_FORK)
+ return 0;
+
+ max_exts = (whichfork == XFS_ATTR_FORK) ? MAXAEXTNUM : MAXEXTNUM;
+
+ if (XFS_TEST_ERROR(false, ip->i_mount, XFS_ERRTAG_REDUCE_MAX_IEXTENTS))
+ max_exts = 10;
+
+ nr_exts = ifp->if_nextents + nr_to_add;
+ if (nr_exts < ifp->if_nextents || nr_exts > max_exts)
+ return -EFBIG;
+
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index a4953e95c4f3..9e2137cd7372 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -35,6 +35,67 @@ struct xfs_ifork {
#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */
/*
+ * Worst-case increase in the fork extent count when we're adding a single
+ * extent to a fork and there's no possibility of splitting an existing mapping.
+ */
+#define XFS_IEXT_ADD_NOSPLIT_CNT (1)
+
+/*
+ * Punching out an extent from the middle of an existing extent can cause the
+ * extent count to increase by 1.
+ * i.e. | Old extent | Hole | Old extent |
+ */
+#define XFS_IEXT_PUNCH_HOLE_CNT (1)
+
+/*
+ * Directory entry addition can cause the following,
+ * 1. Data block can be added/removed.
+ * A new extent can cause extent count to increase by 1.
+ * 2. Free disk block can be added/removed.
+ * Same behaviour as described above for Data block.
+ * 3. Dabtree blocks.
+ * XFS_DA_NODE_MAXDEPTH blocks can be added. Each of these can be new
+ * extents. Hence extent count can increase by XFS_DA_NODE_MAXDEPTH.
+ */
+#define XFS_IEXT_DIR_MANIP_CNT(mp) \
+ ((XFS_DA_NODE_MAXDEPTH + 1 + 1) * (mp)->m_dir_geo->fsbcount)
+
+/*
+ * Adding/removing an xattr can cause XFS_DA_NODE_MAXDEPTH extents to
+ * be added. One extra extent for dabtree in case a local attr is
+ * large enough to cause a double split. It can also cause extent
+ * count to increase proportional to the size of a remote xattr's
+ * value.
+ */
+#define XFS_IEXT_ATTR_MANIP_CNT(rmt_blks) \
+ (XFS_DA_NODE_MAXDEPTH + max(1, rmt_blks))
+
+/*
+ * A write to a sub-interval of an existing unwritten extent causes the original
+ * extent to be split into 3 extents
+ * i.e. | Unwritten | Real | Unwritten |
+ * Hence extent count can increase by 2.
+ */
+#define XFS_IEXT_WRITE_UNWRITTEN_CNT (2)
+
+
+/*
+ * Moving an extent to data fork can cause a sub-interval of an existing extent
+ * to be unmapped. This will increase extent count by 1. Mapping in the new
+ * extent can increase the extent count by 1 again i.e.
+ * | Old extent | New extent | Old extent |
+ * Hence number of extents increases by 2.
+ */
+#define XFS_IEXT_REFLINK_END_COW_CNT (2)
+
+/*
+ * Removing an initial range of source/donor file's extent and adding a new
+ * extent (from donor/source file) in its place will cause extent count to
+ * increase by 1.
+ */
+#define XFS_IEXT_SWAP_RMAP_CNT (1)
+
+/*
* Fork handling.
*/
@@ -172,5 +233,7 @@ extern void xfs_ifork_init_cow(struct xfs_inode *ip);
int xfs_ifork_verify_local_data(struct xfs_inode *ip);
int xfs_ifork_verify_local_attr(struct xfs_inode *ip);
+int xfs_iext_count_may_overflow(struct xfs_inode *ip, int whichfork,
+ int nr_to_add);
#endif /* __XFS_INODE_FORK_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index bbda117e5d85..60e6d255e5e2 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -1138,6 +1138,8 @@ xfs_fs_geometry(
geo->flags |= XFS_FSOP_GEOM_FLAGS_REFLINK;
if (xfs_sb_version_hasbigtime(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_BIGTIME;
+ if (xfs_sb_version_hasinobtcounts(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_INOBTCNT;
if (xfs_sb_version_hassector(sbp))
geo->logsectsize = sbp->sb_logsectsize;
else
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 8ea6d4aa3f55..53456f3de881 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -888,7 +888,7 @@ xchk_stop_reaping(
struct xfs_scrub *sc)
{
sc->flags |= XCHK_REAPING_DISABLED;
- xfs_stop_block_reaping(sc->mp);
+ xfs_blockgc_stop(sc->mp);
}
/* Restart background reaping of resources. */
@@ -896,6 +896,6 @@ void
xchk_start_reaping(
struct xfs_scrub *sc)
{
- xfs_start_block_reaping(sc->mp);
+ xfs_blockgc_start(sc->mp);
sc->flags &= ~XCHK_REAPING_DISABLED;
}
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 779cb73b3d00..d02bef24b32b 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -238,7 +238,8 @@ xfs_acl_set_mode(
}
int
-xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+xfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type)
{
umode_t mode;
bool set_mode = false;
@@ -252,7 +253,7 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
return error;
if (type == ACL_TYPE_ACCESS) {
- error = posix_acl_update_mode(inode, &mode, &acl);
+ error = posix_acl_update_mode(mnt_userns, inode, &mode, &acl);
if (error)
return error;
set_mode = true;
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index c042c0868016..7bdb3a4ed798 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -11,7 +11,8 @@ struct posix_acl;
#ifdef CONFIG_XFS_POSIX_ACL
extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
-extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int xfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
+ struct posix_acl *acl, int type);
extern int __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
void xfs_forget_acl(struct inode *inode, const char *name);
#else
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 4304c6416fbb..b4186d666157 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -62,7 +62,7 @@ xfs_setfilesize_trans_alloc(
* We hand off the transaction to the completion thread now, so
* clear the flag here.
*/
- current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
+ xfs_trans_clear_context(tp);
return 0;
}
@@ -125,7 +125,7 @@ xfs_setfilesize_ioend(
* thus we need to mark ourselves as being in a transaction manually.
* Similarly for freeze protection.
*/
- current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
+ xfs_trans_set_context(tp);
__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
/* we abort the update if there was an IO error */
@@ -568,6 +568,12 @@ xfs_vm_writepage(
{
struct xfs_writepage_ctx wpc = { };
+ if (WARN_ON_ONCE(current->journal_info)) {
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
+ return 0;
+ }
+
return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
}
@@ -578,6 +584,13 @@ xfs_vm_writepages(
{
struct xfs_writepage_ctx wpc = { };
+ /*
+ * Writing back data in a transaction context can result in recursive
+ * transactions. This is bad, so issue a warning and get out of here.
+ */
+ if (WARN_ON_ONCE(current->journal_info))
+ return 0;
+
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
}
diff --git a/fs/xfs/xfs_bio_io.c b/fs/xfs/xfs_bio_io.c
index e2148f2d5d6b..17f36db2f792 100644
--- a/fs/xfs/xfs_bio_io.c
+++ b/fs/xfs/xfs_bio_io.c
@@ -6,7 +6,7 @@
static inline unsigned int bio_max_vecs(unsigned int count)
{
- return min_t(unsigned, howmany(count, PAGE_SIZE), BIO_MAX_PAGES);
+ return bio_max_segs(howmany(count, PAGE_SIZE));
}
int
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 93e4d8ae6e92..2344757ede63 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -471,6 +471,7 @@ xfs_bui_item_recover(
xfs_exntst_t state;
unsigned int bui_type;
int whichfork;
+ int iext_delta;
int error = 0;
if (!xfs_bui_validate(mp, buip)) {
@@ -508,6 +509,15 @@ xfs_bui_item_recover(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
+ if (bui_type == XFS_BMAP_MAP)
+ iext_delta = XFS_IEXT_ADD_NOSPLIT_CNT;
+ else
+ iext_delta = XFS_IEXT_PUNCH_HOLE_CNT;
+
+ error = xfs_iext_count_may_overflow(ip, whichfork, iext_delta);
+ if (error)
+ goto err_cancel;
+
count = bmap->me_len;
error = xfs_trans_log_finish_bmap_update(tp, budp, bui_type, ip,
whichfork, bmap->me_startoff, bmap->me_startblock,
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 7371a7f7c652..e7d68318e6a5 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -727,11 +727,9 @@ xfs_alloc_file_space(
xfs_fileoff_t startoffset_fsb;
xfs_fileoff_t endoffset_fsb;
int nimaps;
- int quota_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
- uint qblocks, resblks, resrtextents;
int error;
trace_xfs_alloc_file_space(ip);
@@ -761,6 +759,7 @@ xfs_alloc_file_space(
*/
while (allocatesize_fsb && !error) {
xfs_fileoff_t s, e;
+ unsigned int dblocks, rblocks, resblks;
/*
* Determine space reservations for data/realtime.
@@ -790,45 +789,31 @@ xfs_alloc_file_space(
*/
resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
if (unlikely(rt)) {
- resrtextents = qblocks = resblks;
- resrtextents /= mp->m_sb.sb_rextsize;
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
- quota_flag = XFS_QMOPT_RES_RTBLKS;
+ dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ rblocks = resblks;
} else {
- resrtextents = 0;
- resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
- quota_flag = XFS_QMOPT_RES_REGBLKS;
+ dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
+ rblocks = 0;
}
/*
* Allocate and setup the transaction.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
- resrtextents, 0, &tp);
-
- /*
- * Check for running out of space
- */
- if (error) {
- /*
- * Free the transaction structure.
- */
- ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
- break;
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
- 0, quota_flag);
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
+ dblocks, rblocks, false, &tp);
if (error)
- goto error1;
+ break;
- xfs_trans_ijoin(tp, ip, 0);
+ error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
+ XFS_IEXT_ADD_NOSPLIT_CNT);
+ if (error)
+ goto error;
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
allocatesize_fsb, alloc_type, 0, imapp,
&nimaps);
if (error)
- goto error0;
+ goto error;
/*
* Complete the transaction
@@ -851,10 +836,7 @@ xfs_alloc_file_space(
return error;
-error0: /* unlock inode, unreserve quota blocks, cancel trans */
- xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
-
-error1: /* Just cancel transaction */
+error:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
@@ -872,20 +854,16 @@ xfs_unmap_extent(
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
int error;
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
- if (error) {
- ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
+ false, &tp);
+ if (error)
return error;
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
- ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
+ error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
+ XFS_IEXT_PUNCH_HOLE_CNT);
if (error)
goto out_trans_cancel;
- xfs_trans_ijoin(tp, ip, 0);
-
error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
if (error)
goto out_trans_cancel;
@@ -1163,6 +1141,11 @@ xfs_insert_file_space(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
+ error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
+ XFS_IEXT_PUNCH_HOLE_CNT);
+ if (error)
+ goto out_trans_cancel;
+
/*
* The extent shifting code works on extent granularity. So, if stop_fsb
* is not the starting block of extent, we need to split the extent at
@@ -1384,6 +1367,22 @@ xfs_swap_extent_rmap(
irec.br_blockcount);
trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
+ if (xfs_bmap_is_real_extent(&uirec)) {
+ error = xfs_iext_count_may_overflow(ip,
+ XFS_DATA_FORK,
+ XFS_IEXT_SWAP_RMAP_CNT);
+ if (error)
+ goto out;
+ }
+
+ if (xfs_bmap_is_real_extent(&irec)) {
+ error = xfs_iext_count_may_overflow(tip,
+ XFS_DATA_FORK,
+ XFS_IEXT_SWAP_RMAP_CNT);
+ if (error)
+ goto out;
+ }
+
/* Remove the mapping from the donor file. */
xfs_bmap_unmap_extent(tp, tip, &uirec);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index f8400bbd6473..37a1d12762d8 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -43,7 +43,7 @@ static kmem_zone_t *xfs_buf_zone;
* pag_buf_lock
* lru_lock
*
- * xfs_buftarg_wait_rele
+ * xfs_buftarg_drain_rele
* lru_lock
* b_lock (trylock due to inversion)
*
@@ -88,7 +88,7 @@ xfs_buf_vmap_len(
* because the corresponding decrement is deferred to buffer release. Buffers
* can undergo I/O multiple times in a hold-release cycle and per buffer I/O
* tracking adds unnecessary overhead. This is used for sychronization purposes
- * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
+ * with unmount (see xfs_buftarg_drain()), so all we really need is a count of
* in-flight buffers.
*
* Buffers that are never released (e.g., superblock, iclog buffers) must set
@@ -1480,7 +1480,7 @@ xfs_buf_ioapply_map(
int op)
{
int page_index;
- int total_nr_pages = bp->b_page_count;
+ unsigned int total_nr_pages = bp->b_page_count;
int nr_pages;
struct bio *bio;
sector_t sector = bp->b_maps[map].bm_bn;
@@ -1505,7 +1505,7 @@ xfs_buf_ioapply_map(
next_chunk:
atomic_inc(&bp->b_io_remaining);
- nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
+ nr_pages = bio_max_segs(total_nr_pages);
bio = bio_alloc(GFP_NOIO, nr_pages);
bio_set_dev(bio, bp->b_target->bt_bdev);
@@ -1786,7 +1786,7 @@ __xfs_buf_mark_corrupt(
* while freeing all the buffers only held by the LRU.
*/
static enum lru_status
-xfs_buftarg_wait_rele(
+xfs_buftarg_drain_rele(
struct list_head *item,
struct list_lru_one *lru,
spinlock_t *lru_lock,
@@ -1798,7 +1798,7 @@ xfs_buftarg_wait_rele(
if (atomic_read(&bp->b_hold) > 1) {
/* need to wait, so skip it this pass */
- trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
+ trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
return LRU_SKIP;
}
if (!spin_trylock(&bp->b_lock))
@@ -1815,14 +1815,13 @@ xfs_buftarg_wait_rele(
return LRU_REMOVED;
}
+/*
+ * Wait for outstanding I/O on the buftarg to complete.
+ */
void
-xfs_wait_buftarg(
+xfs_buftarg_wait(
struct xfs_buftarg *btp)
{
- LIST_HEAD(dispose);
- int loop = 0;
- bool write_fail = false;
-
/*
* First wait on the buftarg I/O count for all in-flight buffers to be
* released. This is critical as new buffers do not make the LRU until
@@ -1838,10 +1837,21 @@ xfs_wait_buftarg(
while (percpu_counter_sum(&btp->bt_io_count))
delay(100);
flush_workqueue(btp->bt_mount->m_buf_workqueue);
+}
+
+void
+xfs_buftarg_drain(
+ struct xfs_buftarg *btp)
+{
+ LIST_HEAD(dispose);
+ int loop = 0;
+ bool write_fail = false;
+
+ xfs_buftarg_wait(btp);
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {
- list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
+ list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
&dispose, LONG_MAX);
while (!list_empty(&dispose)) {
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 5d91a31298a4..459ca34f26f5 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -152,7 +152,7 @@ struct xfs_buf {
struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */
struct xfs_mount *b_mount;
- xfs_buftarg_t *b_target; /* buffer target (device) */
+ struct xfs_buftarg *b_target; /* buffer target (device) */
void *b_addr; /* virtual address of buffer */
struct work_struct b_ioend_work;
struct completion b_iowait; /* queue for I/O waiters */
@@ -344,11 +344,12 @@ xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
/*
* Handling of buftargs.
*/
-extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
- struct block_device *, struct dax_device *);
+extern struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *,
+ struct block_device *, struct dax_device *);
extern void xfs_free_buftarg(struct xfs_buftarg *);
-extern void xfs_wait_buftarg(xfs_buftarg_t *);
-extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
+extern void xfs_buftarg_wait(struct xfs_buftarg *);
+extern void xfs_buftarg_drain(struct xfs_buftarg *);
+extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 1d95ed387d66..bd8379b98374 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -314,8 +314,14 @@ xfs_dquot_disk_alloc(
return -ESRCH;
}
- /* Create the block mapping. */
xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
+
+ error = xfs_iext_count_may_overflow(quotip, XFS_DATA_FORK,
+ XFS_IEXT_ADD_NOSPLIT_CNT);
+ if (error)
+ return error;
+
+ /* Create the block mapping. */
error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
&nmaps);
@@ -500,6 +506,42 @@ xfs_dquot_alloc(
return dqp;
}
+/* Check the ondisk dquot's id and type match what the incore dquot expects. */
+static bool
+xfs_dquot_check_type(
+ struct xfs_dquot *dqp,
+ struct xfs_disk_dquot *ddqp)
+{
+ uint8_t ddqp_type;
+ uint8_t dqp_type;
+
+ ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK;
+ dqp_type = xfs_dquot_type(dqp);
+
+ if (be32_to_cpu(ddqp->d_id) != dqp->q_id)
+ return false;
+
+ /*
+ * V5 filesystems always expect an exact type match. V4 filesystems
+ * expect an exact match for user dquots and for non-root group and
+ * project dquots.
+ */
+ if (xfs_sb_version_hascrc(&dqp->q_mount->m_sb) ||
+ dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
+ return ddqp_type == dqp_type;
+
+ /*
+ * V4 filesystems support either group or project quotas, but not both
+ * at the same time. The non-user quota file can be switched between
+ * group and project quota uses depending on the mount options, which
+ * means that we can encounter the other type when we try to load quota
+ * defaults. Quotacheck will soon reset the the entire quota file
+ * (including the root dquot) anyway, but don't log scary corruption
+ * reports to dmesg.
+ */
+ return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ;
+}
+
/* Copy the in-core quota fields in from the on-disk buffer. */
STATIC int
xfs_dquot_from_disk(
@@ -512,8 +554,7 @@ xfs_dquot_from_disk(
* Ensure that we got the type and ID we were looking for.
* Everything else was checked by the dquot buffer verifier.
*/
- if ((ddqp->d_type & XFS_DQTYPE_REC_MASK) != xfs_dquot_type(dqp) ||
- be32_to_cpu(ddqp->d_id) != dqp->q_id) {
+ if (!xfs_dquot_check_type(dqp, ddqp)) {
xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
"Metadata corruption detected at %pS, quota %u",
__this_address, dqp->q_id);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 7f6e20899473..185b4915b7bf 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -54,6 +54,8 @@ static unsigned int xfs_errortag_random_default[] = {
XFS_RANDOM_FORCE_SUMMARY_RECALC,
XFS_RANDOM_IUNLINK_FALLBACK,
XFS_RANDOM_BUF_IOERROR,
+ XFS_RANDOM_REDUCE_MAX_IEXTENTS,
+ XFS_RANDOM_BMAP_ALLOC_MINLEN_EXTENT,
};
struct xfs_errortag_attr {
@@ -164,6 +166,8 @@ XFS_ERRORTAG_ATTR_RW(force_repair, XFS_ERRTAG_FORCE_SCRUB_REPAIR);
XFS_ERRORTAG_ATTR_RW(bad_summary, XFS_ERRTAG_FORCE_SUMMARY_RECALC);
XFS_ERRORTAG_ATTR_RW(iunlink_fallback, XFS_ERRTAG_IUNLINK_FALLBACK);
XFS_ERRORTAG_ATTR_RW(buf_ioerror, XFS_ERRTAG_BUF_IOERROR);
+XFS_ERRORTAG_ATTR_RW(reduce_max_iextents, XFS_ERRTAG_REDUCE_MAX_IEXTENTS);
+XFS_ERRORTAG_ATTR_RW(bmap_alloc_minlen_extent, XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT);
static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(noerror),
@@ -202,6 +206,8 @@ static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(bad_summary),
XFS_ERRORTAG_ATTR_LIST(iunlink_fallback),
XFS_ERRORTAG_ATTR_LIST(buf_ioerror),
+ XFS_ERRORTAG_ATTR_LIST(reduce_max_iextents),
+ XFS_ERRORTAG_ATTR_LIST(bmap_alloc_minlen_extent),
NULL,
};
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 3991e59cfd18..ef17c1f6db32 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -344,7 +344,6 @@ xfs_extent_busy_trim(
ASSERT(*len > 0);
spin_lock(&args->pag->pagb_lock);
-restart:
fbno = *bno;
flen = *len;
rbp = args->pag->pagb_tree.rb_node;
@@ -363,19 +362,6 @@ restart:
continue;
}
- /*
- * If this is a metadata allocation, try to reuse the busy
- * extent instead of trimming the allocation.
- */
- if (!(args->datatype & XFS_ALLOC_USERDATA) &&
- !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
- if (!xfs_extent_busy_update_extent(args->mp, args->pag,
- busyp, fbno, flen,
- false))
- goto restart;
- continue;
- }
-
if (bbno <= fbno) {
/* start overlap */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 5b0f93f73837..a007ca0711d9 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -29,6 +29,7 @@
#include <linux/backing-dev.h>
#include <linux/mman.h>
#include <linux/fadvise.h>
+#include <linux/mount.h>
static const struct vm_operations_struct xfs_file_vm_ops;
@@ -118,6 +119,54 @@ xfs_dir_fsync(
return xfs_log_force_inode(ip);
}
+static xfs_lsn_t
+xfs_fsync_lsn(
+ struct xfs_inode *ip,
+ bool datasync)
+{
+ if (!xfs_ipincount(ip))
+ return 0;
+ if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
+ return 0;
+ return ip->i_itemp->ili_last_lsn;
+}
+
+/*
+ * All metadata updates are logged, which means that we just have to flush the
+ * log up to the latest LSN that touched the inode.
+ *
+ * If we have concurrent fsync/fdatasync() calls, we need them to all block on
+ * the log force before we clear the ili_fsync_fields field. This ensures that
+ * we don't get a racing sync operation that does not wait for the metadata to
+ * hit the journal before returning. If we race with clearing ili_fsync_fields,
+ * then all that will happen is the log force will do nothing as the lsn will
+ * already be on disk. We can't race with setting ili_fsync_fields because that
+ * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
+ * shared until after the ili_fsync_fields is cleared.
+ */
+static int
+xfs_fsync_flush_log(
+ struct xfs_inode *ip,
+ bool datasync,
+ int *log_flushed)
+{
+ int error = 0;
+ xfs_lsn_t lsn;
+
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ lsn = xfs_fsync_lsn(ip, datasync);
+ if (lsn) {
+ error = xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC,
+ log_flushed);
+
+ spin_lock(&ip->i_itemp->ili_lock);
+ ip->i_itemp->ili_fsync_fields = 0;
+ spin_unlock(&ip->i_itemp->ili_lock);
+ }
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ return error;
+}
+
STATIC int
xfs_file_fsync(
struct file *file,
@@ -125,13 +174,10 @@ xfs_file_fsync(
loff_t end,
int datasync)
{
- struct inode *inode = file->f_mapping->host;
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_inode_log_item *iip = ip->i_itemp;
+ struct xfs_inode *ip = XFS_I(file->f_mapping->host);
struct xfs_mount *mp = ip->i_mount;
int error = 0;
int log_flushed = 0;
- xfs_lsn_t lsn = 0;
trace_xfs_file_fsync(ip);
@@ -156,32 +202,13 @@ xfs_file_fsync(
xfs_blkdev_issue_flush(mp->m_ddev_targp);
/*
- * All metadata updates are logged, which means that we just have to
- * flush the log up to the latest LSN that touched the inode. If we have
- * concurrent fsync/fdatasync() calls, we need them to all block on the
- * log force before we clear the ili_fsync_fields field. This ensures
- * that we don't get a racing sync operation that does not wait for the
- * metadata to hit the journal before returning. If we race with
- * clearing the ili_fsync_fields, then all that will happen is the log
- * force will do nothing as the lsn will already be on disk. We can't
- * race with setting ili_fsync_fields because that is done under
- * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
- * until after the ili_fsync_fields is cleared.
+ * Any inode that has dirty modifications in the log is pinned. The
+ * racy check here for a pinned inode while not catch modifications
+ * that happen concurrently to the fsync call, but fsync semantics
+ * only require to sync previously completed I/O.
*/
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- if (xfs_ipincount(ip)) {
- if (!datasync ||
- (iip->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
- lsn = iip->ili_last_lsn;
- }
-
- if (lsn) {
- error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
- spin_lock(&iip->ili_lock);
- iip->ili_fsync_fields = 0;
- spin_unlock(&iip->ili_lock);
- }
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ if (xfs_ipincount(ip))
+ error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
/*
* If we only have a single device, and the log force about was
@@ -197,30 +224,42 @@ xfs_file_fsync(
return error;
}
+static int
+xfs_ilock_iocb(
+ struct kiocb *iocb,
+ unsigned int lock_mode)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, lock_mode))
+ return -EAGAIN;
+ } else {
+ xfs_ilock(ip, lock_mode);
+ }
+
+ return 0;
+}
+
STATIC ssize_t
-xfs_file_dio_aio_read(
+xfs_file_dio_read(
struct kiocb *iocb,
struct iov_iter *to)
{
struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
- size_t count = iov_iter_count(to);
ssize_t ret;
- trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
+ trace_xfs_file_direct_read(iocb, to);
- if (!count)
+ if (!iov_iter_count(to))
return 0; /* skip atime */
file_accessed(iocb->ki_filp);
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
- return -EAGAIN;
- } else {
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
- }
- ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
- is_sync_kiocb(iocb));
+ ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
+ if (ret)
+ return ret;
+ ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
@@ -232,21 +271,16 @@ xfs_file_dax_read(
struct iov_iter *to)
{
struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
- size_t count = iov_iter_count(to);
ssize_t ret = 0;
- trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
+ trace_xfs_file_dax_read(iocb, to);
- if (!count)
+ if (!iov_iter_count(to))
return 0; /* skip atime */
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
- return -EAGAIN;
- } else {
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
- }
-
+ ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
+ if (ret)
+ return ret;
ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
@@ -255,21 +289,18 @@ xfs_file_dax_read(
}
STATIC ssize_t
-xfs_file_buffered_aio_read(
+xfs_file_buffered_read(
struct kiocb *iocb,
struct iov_iter *to)
{
struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
ssize_t ret;
- trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
+ trace_xfs_file_buffered_read(iocb, to);
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
- return -EAGAIN;
- } else {
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
- }
+ ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
+ if (ret)
+ return ret;
ret = generic_file_read_iter(iocb, to);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
@@ -293,9 +324,9 @@ xfs_file_read_iter(
if (IS_DAX(inode))
ret = xfs_file_dax_read(iocb, to);
else if (iocb->ki_flags & IOCB_DIRECT)
- ret = xfs_file_dio_aio_read(iocb, to);
+ ret = xfs_file_dio_read(iocb, to);
else
- ret = xfs_file_buffered_aio_read(iocb, to);
+ ret = xfs_file_buffered_read(iocb, to);
if (ret > 0)
XFS_STATS_ADD(mp, xs_read_bytes, ret);
@@ -310,7 +341,7 @@ xfs_file_read_iter(
* if called for a direct write beyond i_size.
*/
STATIC ssize_t
-xfs_file_aio_write_checks(
+xfs_file_write_checks(
struct kiocb *iocb,
struct iov_iter *from,
int *iolock)
@@ -328,7 +359,14 @@ restart:
if (error <= 0)
return error;
- error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ error = break_layout(inode, false);
+ if (error == -EWOULDBLOCK)
+ error = -EAGAIN;
+ } else {
+ error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
+ }
+
if (error)
return error;
@@ -339,7 +377,11 @@ restart:
if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
xfs_iunlock(ip, *iolock);
*iolock = XFS_IOLOCK_EXCL;
- xfs_ilock(ip, *iolock);
+ error = xfs_ilock_iocb(iocb, *iolock);
+ if (error) {
+ *iolock = 0;
+ return error;
+ }
goto restart;
}
/*
@@ -361,6 +403,10 @@ restart:
isize = i_size_read(inode);
if (iocb->ki_pos > isize) {
spin_unlock(&ip->i_flags_lock);
+
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return -EAGAIN;
+
if (!drained_dio) {
if (*iolock == XFS_IOLOCK_SHARED) {
xfs_iunlock(ip, *iolock);
@@ -389,12 +435,6 @@ restart:
} else
spin_unlock(&ip->i_flags_lock);
- /*
- * Updating the timestamps will grab the ilock again from
- * xfs_fs_dirty_inode, so we have to call it after dropping the
- * lock above. Eventually we should look into a way to avoid
- * the pointless lock roundtrip.
- */
return file_modified(file);
}
@@ -480,122 +520,149 @@ static const struct iomap_dio_ops xfs_dio_write_ops = {
};
/*
- * xfs_file_dio_aio_write - handle direct IO writes
- *
- * Lock the inode appropriately to prepare for and issue a direct IO write.
- * By separating it from the buffered write path we remove all the tricky to
- * follow locking changes and looping.
- *
- * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
- * until we're sure the bytes at the new EOF have been zeroed and/or the cached
- * pages are flushed out.
- *
- * In most cases the direct IO writes will be done holding IOLOCK_SHARED
- * allowing them to be done in parallel with reads and other direct IO writes.
- * However, if the IO is not aligned to filesystem blocks, the direct IO layer
- * needs to do sub-block zeroing and that requires serialisation against other
- * direct IOs to the same block. In this case we need to serialise the
- * submission of the unaligned IOs so that we don't get racing block zeroing in
- * the dio layer. To avoid the problem with aio, we also need to wait for
- * outstanding IOs to complete so that unwritten extent conversion is completed
- * before we try to map the overlapping block. This is currently implemented by
- * hitting it with a big hammer (i.e. inode_dio_wait()).
- *
- * Returns with locks held indicated by @iolock and errors indicated by
- * negative return values.
+ * Handle block aligned direct I/O writes
*/
-STATIC ssize_t
-xfs_file_dio_aio_write(
+static noinline ssize_t
+xfs_file_dio_write_aligned(
+ struct xfs_inode *ip,
struct kiocb *iocb,
struct iov_iter *from)
{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- ssize_t ret = 0;
- int unaligned_io = 0;
- int iolock;
- size_t count = iov_iter_count(from);
- struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ int iolock = XFS_IOLOCK_SHARED;
+ ssize_t ret;
- /* DIO must be aligned to device logical sector size */
- if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
- return -EINVAL;
+ ret = xfs_ilock_iocb(iocb, iolock);
+ if (ret)
+ return ret;
+ ret = xfs_file_write_checks(iocb, from, &iolock);
+ if (ret)
+ goto out_unlock;
/*
- * Don't take the exclusive iolock here unless the I/O is unaligned to
- * the file system block size. We don't need to consider the EOF
- * extension case here because xfs_file_aio_write_checks() will relock
- * the inode as necessary for EOF zeroing cases and fill out the new
- * inode size as appropriate.
+ * We don't need to hold the IOLOCK exclusively across the IO, so demote
+ * the iolock back to shared if we had to take the exclusive lock in
+ * xfs_file_write_checks() for other reasons.
*/
- if ((iocb->ki_pos & mp->m_blockmask) ||
- ((iocb->ki_pos + count) & mp->m_blockmask)) {
- unaligned_io = 1;
-
- /*
- * We can't properly handle unaligned direct I/O to reflink
- * files yet, as we can't unshare a partial block.
- */
- if (xfs_is_cow_inode(ip)) {
- trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
- return -ENOTBLK;
- }
- iolock = XFS_IOLOCK_EXCL;
- } else {
+ if (iolock == XFS_IOLOCK_EXCL) {
+ xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
iolock = XFS_IOLOCK_SHARED;
}
+ trace_xfs_file_direct_write(iocb, from);
+ ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
+ &xfs_dio_write_ops, 0);
+out_unlock:
+ if (iolock)
+ xfs_iunlock(ip, iolock);
+ return ret;
+}
- if (iocb->ki_flags & IOCB_NOWAIT) {
- /* unaligned dio always waits, bail */
- if (unaligned_io)
- return -EAGAIN;
- if (!xfs_ilock_nowait(ip, iolock))
+/*
+ * Handle block unaligned direct I/O writes
+ *
+ * In most cases direct I/O writes will be done holding IOLOCK_SHARED, allowing
+ * them to be done in parallel with reads and other direct I/O writes. However,
+ * if the I/O is not aligned to filesystem blocks, the direct I/O layer may need
+ * to do sub-block zeroing and that requires serialisation against other direct
+ * I/O to the same block. In this case we need to serialise the submission of
+ * the unaligned I/O so that we don't get racing block zeroing in the dio layer.
+ * In the case where sub-block zeroing is not required, we can do concurrent
+ * sub-block dios to the same block successfully.
+ *
+ * Optimistically submit the I/O using the shared lock first, but use the
+ * IOMAP_DIO_OVERWRITE_ONLY flag to tell the lower layers to return -EAGAIN
+ * if block allocation or partial block zeroing would be required. In that case
+ * we try again with the exclusive lock.
+ */
+static noinline ssize_t
+xfs_file_dio_write_unaligned(
+ struct xfs_inode *ip,
+ struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ size_t isize = i_size_read(VFS_I(ip));
+ size_t count = iov_iter_count(from);
+ int iolock = XFS_IOLOCK_SHARED;
+ unsigned int flags = IOMAP_DIO_OVERWRITE_ONLY;
+ ssize_t ret;
+
+ /*
+ * Extending writes need exclusivity because of the sub-block zeroing
+ * that the DIO code always does for partial tail blocks beyond EOF, so
+ * don't even bother trying the fast path in this case.
+ */
+ if (iocb->ki_pos > isize || iocb->ki_pos + count >= isize) {
+retry_exclusive:
+ if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
- } else {
- xfs_ilock(ip, iolock);
+ iolock = XFS_IOLOCK_EXCL;
+ flags = IOMAP_DIO_FORCE_WAIT;
}
- ret = xfs_file_aio_write_checks(iocb, from, &iolock);
+ ret = xfs_ilock_iocb(iocb, iolock);
if (ret)
- goto out;
- count = iov_iter_count(from);
+ return ret;
/*
- * If we are doing unaligned IO, we can't allow any other overlapping IO
- * in-flight at the same time or we risk data corruption. Wait for all
- * other IO to drain before we submit. If the IO is aligned, demote the
- * iolock if we had to take the exclusive lock in
- * xfs_file_aio_write_checks() for other reasons.
+ * We can't properly handle unaligned direct I/O to reflink files yet,
+ * as we can't unshare a partial block.
*/
- if (unaligned_io) {
- inode_dio_wait(inode);
- } else if (iolock == XFS_IOLOCK_EXCL) {
- xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
- iolock = XFS_IOLOCK_SHARED;
+ if (xfs_is_cow_inode(ip)) {
+ trace_xfs_reflink_bounce_dio_write(iocb, from);
+ ret = -ENOTBLK;
+ goto out_unlock;
}
- trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
+ ret = xfs_file_write_checks(iocb, from, &iolock);
+ if (ret)
+ goto out_unlock;
+
/*
- * If unaligned, this is the only IO in-flight. Wait on it before we
- * release the iolock to prevent subsequent overlapping IO.
+ * If we are doing exclusive unaligned I/O, this must be the only I/O
+ * in-flight. Otherwise we risk data corruption due to unwritten extent
+ * conversions from the AIO end_io handler. Wait for all other I/O to
+ * drain first.
*/
+ if (flags & IOMAP_DIO_FORCE_WAIT)
+ inode_dio_wait(VFS_I(ip));
+
+ trace_xfs_file_direct_write(iocb, from);
ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
- &xfs_dio_write_ops,
- is_sync_kiocb(iocb) || unaligned_io);
-out:
- xfs_iunlock(ip, iolock);
+ &xfs_dio_write_ops, flags);
/*
- * No fallback to buffered IO after short writes for XFS, direct I/O
- * will either complete fully or return an error.
+ * Retry unaligned I/O with exclusive blocking semantics if the DIO
+ * layer rejected it for mapping or locking reasons. If we are doing
+ * nonblocking user I/O, propagate the error.
*/
- ASSERT(ret < 0 || ret == count);
+ if (ret == -EAGAIN && !(iocb->ki_flags & IOCB_NOWAIT)) {
+ ASSERT(flags & IOMAP_DIO_OVERWRITE_ONLY);
+ xfs_iunlock(ip, iolock);
+ goto retry_exclusive;
+ }
+
+out_unlock:
+ if (iolock)
+ xfs_iunlock(ip, iolock);
return ret;
}
+static ssize_t
+xfs_file_dio_write(
+ struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ size_t count = iov_iter_count(from);
+
+ /* direct I/O must be aligned to device logical sector size */
+ if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
+ return -EINVAL;
+ if ((iocb->ki_pos | count) & ip->i_mount->m_blockmask)
+ return xfs_file_dio_write_unaligned(ip, iocb, from);
+ return xfs_file_dio_write_aligned(ip, iocb, from);
+}
+
static noinline ssize_t
xfs_file_dax_write(
struct kiocb *iocb,
@@ -605,31 +672,26 @@ xfs_file_dax_write(
struct xfs_inode *ip = XFS_I(inode);
int iolock = XFS_IOLOCK_EXCL;
ssize_t ret, error = 0;
- size_t count;
loff_t pos;
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!xfs_ilock_nowait(ip, iolock))
- return -EAGAIN;
- } else {
- xfs_ilock(ip, iolock);
- }
-
- ret = xfs_file_aio_write_checks(iocb, from, &iolock);
+ ret = xfs_ilock_iocb(iocb, iolock);
+ if (ret)
+ return ret;
+ ret = xfs_file_write_checks(iocb, from, &iolock);
if (ret)
goto out;
pos = iocb->ki_pos;
- count = iov_iter_count(from);
- trace_xfs_file_dax_write(ip, count, pos);
+ trace_xfs_file_dax_write(iocb, from);
ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos);
error = xfs_setfilesize(ip, pos, ret);
}
out:
- xfs_iunlock(ip, iolock);
+ if (iolock)
+ xfs_iunlock(ip, iolock);
if (error)
return error;
@@ -643,7 +705,7 @@ out:
}
STATIC ssize_t
-xfs_file_buffered_aio_write(
+xfs_file_buffered_write(
struct kiocb *iocb,
struct iov_iter *from)
{
@@ -652,7 +714,7 @@ xfs_file_buffered_aio_write(
struct inode *inode = mapping->host;
struct xfs_inode *ip = XFS_I(inode);
ssize_t ret;
- int enospc = 0;
+ bool cleared_space = false;
int iolock;
if (iocb->ki_flags & IOCB_NOWAIT)
@@ -662,14 +724,14 @@ write_retry:
iolock = XFS_IOLOCK_EXCL;
xfs_ilock(ip, iolock);
- ret = xfs_file_aio_write_checks(iocb, from, &iolock);
+ ret = xfs_file_write_checks(iocb, from, &iolock);
if (ret)
goto out;
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
- trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
+ trace_xfs_file_buffered_write(iocb, from);
ret = iomap_file_buffered_write(iocb, from,
&xfs_buffered_write_iomap_ops);
if (likely(ret >= 0))
@@ -682,27 +744,23 @@ write_retry:
* metadata space. This reduces the chances that the eofblocks scan
* waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
* also behaves as a filter to prevent too many eofblocks scans from
- * running at the same time.
+ * running at the same time. Use a synchronous scan to increase the
+ * effectiveness of the scan.
*/
- if (ret == -EDQUOT && !enospc) {
+ if (ret == -EDQUOT && !cleared_space) {
xfs_iunlock(ip, iolock);
- enospc = xfs_inode_free_quota_eofblocks(ip);
- if (enospc)
- goto write_retry;
- enospc = xfs_inode_free_quota_cowblocks(ip);
- if (enospc)
- goto write_retry;
- iolock = 0;
- } else if (ret == -ENOSPC && !enospc) {
+ xfs_blockgc_free_quota(ip, XFS_EOF_FLAGS_SYNC);
+ cleared_space = true;
+ goto write_retry;
+ } else if (ret == -ENOSPC && !cleared_space) {
struct xfs_eofblocks eofb = {0};
- enospc = 1;
+ cleared_space = true;
xfs_flush_inodes(ip->i_mount);
xfs_iunlock(ip, iolock);
eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
- xfs_icache_free_eofblocks(ip->i_mount, &eofb);
- xfs_icache_free_cowblocks(ip->i_mount, &eofb);
+ xfs_blockgc_free_space(ip->i_mount, &eofb);
goto write_retry;
}
@@ -749,12 +807,12 @@ xfs_file_write_iter(
* CoW. In all other directio scenarios we do not
* allow an operation to fall back to buffered mode.
*/
- ret = xfs_file_dio_aio_write(iocb, from);
+ ret = xfs_file_dio_write(iocb, from);
if (ret != -ENOTBLK)
return ret;
}
- return xfs_file_buffered_aio_write(iocb, from);
+ return xfs_file_buffered_write(iocb, from);
}
static void
@@ -994,7 +1052,8 @@ xfs_file_fallocate(
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size;
- error = xfs_vn_setattr_size(file_dentry(file), &iattr);
+ error = xfs_vn_setattr_size(file_mnt_user_ns(file),
+ file_dentry(file), &iattr);
if (error)
goto out_unlock;
}
@@ -1319,17 +1378,19 @@ xfs_filemap_pfn_mkwrite(
return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
}
-static void
+static vm_fault_t
xfs_filemap_map_pages(
struct vm_fault *vmf,
pgoff_t start_pgoff,
pgoff_t end_pgoff)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
+ vm_fault_t ret;
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- filemap_map_pages(vmf, start_pgoff, end_pgoff);
+ ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ return ret;
}
static const struct vm_operations_struct xfs_file_vm_ops = {
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 959ce91a3755..a2a407039227 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -25,17 +25,17 @@
*/
static int
xfs_growfs_data_private(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_growfs_data_t *in) /* growfs data input struct */
+ struct xfs_mount *mp, /* mount point for filesystem */
+ struct xfs_growfs_data *in) /* growfs data input struct */
{
struct xfs_buf *bp;
int error;
xfs_agnumber_t nagcount;
xfs_agnumber_t nagimax = 0;
- xfs_rfsblock_t nb, nb_mod;
- xfs_rfsblock_t new;
+ xfs_rfsblock_t nb, nb_div, nb_mod;
+ xfs_rfsblock_t delta;
xfs_agnumber_t oagcount;
- xfs_trans_t *tp;
+ struct xfs_trans *tp;
struct aghdr_init_data id = {};
nb = in->newblocks;
@@ -50,16 +50,16 @@ xfs_growfs_data_private(
return error;
xfs_buf_relse(bp);
- new = nb; /* use new as a temporary here */
- nb_mod = do_div(new, mp->m_sb.sb_agblocks);
- nagcount = new + (nb_mod != 0);
+ nb_div = nb;
+ nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
+ nagcount = nb_div + (nb_mod != 0);
if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
nagcount--;
nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
if (nb < mp->m_sb.sb_dblocks)
return -EINVAL;
}
- new = nb - mp->m_sb.sb_dblocks;
+ delta = nb - mp->m_sb.sb_dblocks;
oagcount = mp->m_sb.sb_agcount;
/* allocate the new per-ag structures */
@@ -89,7 +89,7 @@ xfs_growfs_data_private(
INIT_LIST_HEAD(&id.buffer_list);
for (id.agno = nagcount - 1;
id.agno >= oagcount;
- id.agno--, new -= id.agsize) {
+ id.agno--, delta -= id.agsize) {
if (id.agno == nagcount - 1)
id.agsize = nb -
@@ -110,8 +110,8 @@ xfs_growfs_data_private(
xfs_trans_agblocks_delta(tp, id.nfree);
/* If there are new blocks in the old last AG, extend it. */
- if (new) {
- error = xfs_ag_extend_space(mp, tp, &id, new);
+ if (delta) {
+ error = xfs_ag_extend_space(mp, tp, &id, delta);
if (error)
goto out_trans_cancel;
}
@@ -143,7 +143,7 @@ xfs_growfs_data_private(
* If we expanded the last AG, free the per-AG reservation
* so we can reinitialize it with the new size.
*/
- if (new) {
+ if (delta) {
struct xfs_perag *pag;
pag = xfs_perag_get(mp, id.agno);
@@ -170,8 +170,8 @@ out_trans_cancel:
static int
xfs_growfs_log_private(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_growfs_log_t *in) /* growfs log input struct */
+ struct xfs_mount *mp, /* mount point for filesystem */
+ struct xfs_growfs_log *in) /* growfs log input struct */
{
xfs_extlen_t nb;
@@ -268,7 +268,7 @@ out_error:
int
xfs_growfs_log(
xfs_mount_t *mp,
- xfs_growfs_log_t *in)
+ struct xfs_growfs_log *in)
{
int error;
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index 92869f6ec8d3..2cffe51a31e8 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -6,8 +6,8 @@
#ifndef __XFS_FSOPS_H__
#define __XFS_FSOPS_H__
-extern int xfs_growfs_data(xfs_mount_t *mp, xfs_growfs_data_t *in);
-extern int xfs_growfs_log(xfs_mount_t *mp, xfs_growfs_log_t *in);
+extern int xfs_growfs_data(struct xfs_mount *mp, struct xfs_growfs_data *in);
+extern int xfs_growfs_log(struct xfs_mount *mp, struct xfs_growfs_log *in);
extern void xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
extern int xfs_reserve_blocks(xfs_mount_t *mp, uint64_t *inval,
xfs_fsop_resblks_t *outval);
diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c
index fa55ab8b8d80..f62fa652c2fd 100644
--- a/fs/xfs/xfs_globals.c
+++ b/fs/xfs/xfs_globals.c
@@ -8,8 +8,8 @@
/*
* Tunable XFS parameters. xfs_params is required even when CONFIG_SYSCTL=n,
* other XFS code uses these values. Times are measured in centisecs (i.e.
- * 100ths of a second) with the exception of eofb_timer and cowb_timer, which
- * are measured in seconds.
+ * 100ths of a second) with the exception of blockgc_timer, which is measured
+ * in seconds.
*/
xfs_param_t xfs_params = {
/* MIN DFLT MAX */
@@ -28,8 +28,7 @@ xfs_param_t xfs_params = {
.rotorstep = { 1, 1, 255 },
.inherit_nodfrg = { 0, 1, 1 },
.fstrm_timer = { 1, 30*100, 3600*100},
- .eofb_timer = { 1, 300, 3600*24},
- .cowb_timer = { 1, 1800, 3600*24},
+ .blockgc_timer = { 1, 300, 3600*24},
};
struct xfs_globals xfs_globals = {
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index deb99300d171..1d7720a0c068 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -916,69 +916,6 @@ xfs_inode_walk(
}
/*
- * Background scanning to trim post-EOF preallocated space. This is queued
- * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
- */
-void
-xfs_queue_eofblocks(
- struct xfs_mount *mp)
-{
- rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
- queue_delayed_work(mp->m_eofblocks_workqueue,
- &mp->m_eofblocks_work,
- msecs_to_jiffies(xfs_eofb_secs * 1000));
- rcu_read_unlock();
-}
-
-void
-xfs_eofblocks_worker(
- struct work_struct *work)
-{
- struct xfs_mount *mp = container_of(to_delayed_work(work),
- struct xfs_mount, m_eofblocks_work);
-
- if (!sb_start_write_trylock(mp->m_super))
- return;
- xfs_icache_free_eofblocks(mp, NULL);
- sb_end_write(mp->m_super);
-
- xfs_queue_eofblocks(mp);
-}
-
-/*
- * Background scanning to trim preallocated CoW space. This is queued
- * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
- * (We'll just piggyback on the post-EOF prealloc space workqueue.)
- */
-void
-xfs_queue_cowblocks(
- struct xfs_mount *mp)
-{
- rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
- queue_delayed_work(mp->m_eofblocks_workqueue,
- &mp->m_cowblocks_work,
- msecs_to_jiffies(xfs_cowb_secs * 1000));
- rcu_read_unlock();
-}
-
-void
-xfs_cowblocks_worker(
- struct work_struct *work)
-{
- struct xfs_mount *mp = container_of(to_delayed_work(work),
- struct xfs_mount, m_cowblocks_work);
-
- if (!sb_start_write_trylock(mp->m_super))
- return;
- xfs_icache_free_cowblocks(mp, NULL);
- sb_end_write(mp->m_super);
-
- xfs_queue_cowblocks(mp);
-}
-
-/*
* Grab the inode for reclaim exclusively.
*
* We have found this inode via a lookup under RCU, so the inode may have
@@ -1346,14 +1283,17 @@ xfs_reclaim_worker(
STATIC int
xfs_inode_free_eofblocks(
struct xfs_inode *ip,
- void *args)
+ void *args,
+ unsigned int *lockflags)
{
struct xfs_eofblocks *eofb = args;
bool wait;
- int ret;
wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
+ if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
+ return 0;
+
if (!xfs_can_free_eofblocks(ip, false)) {
/* inode could be preallocated or append-only */
trace_xfs_inode_free_eofblocks_invalid(ip);
@@ -1380,130 +1320,68 @@ xfs_inode_free_eofblocks(
return -EAGAIN;
return 0;
}
+ *lockflags |= XFS_IOLOCK_EXCL;
- ret = xfs_free_eofblocks(ip);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-
- return ret;
-}
-
-int
-xfs_icache_free_eofblocks(
- struct xfs_mount *mp,
- struct xfs_eofblocks *eofb)
-{
- return xfs_inode_walk(mp, 0, xfs_inode_free_eofblocks, eofb,
- XFS_ICI_EOFBLOCKS_TAG);
+ return xfs_free_eofblocks(ip);
}
/*
- * Run eofblocks scans on the quotas applicable to the inode. For inodes with
- * multiple quotas, we don't know exactly which quota caused an allocation
- * failure. We make a best effort by including each quota under low free space
- * conditions (less than 1% free space) in the scan.
+ * Background scanning to trim preallocated space. This is queued based on the
+ * 'speculative_prealloc_lifetime' tunable (5m by default).
*/
-static int
-__xfs_inode_free_quota_eofblocks(
- struct xfs_inode *ip,
- int (*execute)(struct xfs_mount *mp,
- struct xfs_eofblocks *eofb))
-{
- int scan = 0;
- struct xfs_eofblocks eofb = {0};
- struct xfs_dquot *dq;
-
- /*
- * Run a sync scan to increase effectiveness and use the union filter to
- * cover all applicable quotas in a single scan.
- */
- eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
-
- if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
- dq = xfs_inode_dquot(ip, XFS_DQTYPE_USER);
- if (dq && xfs_dquot_lowsp(dq)) {
- eofb.eof_uid = VFS_I(ip)->i_uid;
- eofb.eof_flags |= XFS_EOF_FLAGS_UID;
- scan = 1;
- }
- }
-
- if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
- dq = xfs_inode_dquot(ip, XFS_DQTYPE_GROUP);
- if (dq && xfs_dquot_lowsp(dq)) {
- eofb.eof_gid = VFS_I(ip)->i_gid;
- eofb.eof_flags |= XFS_EOF_FLAGS_GID;
- scan = 1;
- }
- }
-
- if (scan)
- execute(ip->i_mount, &eofb);
-
- return scan;
-}
-
-int
-xfs_inode_free_quota_eofblocks(
- struct xfs_inode *ip)
-{
- return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
-}
-
-static inline unsigned long
-xfs_iflag_for_tag(
- int tag)
+static inline void
+xfs_blockgc_queue(
+ struct xfs_perag *pag)
{
- switch (tag) {
- case XFS_ICI_EOFBLOCKS_TAG:
- return XFS_IEOFBLOCKS;
- case XFS_ICI_COWBLOCKS_TAG:
- return XFS_ICOWBLOCKS;
- default:
- ASSERT(0);
- return 0;
- }
+ rcu_read_lock();
+ if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
+ queue_delayed_work(pag->pag_mount->m_blockgc_workqueue,
+ &pag->pag_blockgc_work,
+ msecs_to_jiffies(xfs_blockgc_secs * 1000));
+ rcu_read_unlock();
}
static void
-__xfs_inode_set_blocks_tag(
- xfs_inode_t *ip,
- void (*execute)(struct xfs_mount *mp),
- void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
- int error, unsigned long caller_ip),
- int tag)
+xfs_blockgc_set_iflag(
+ struct xfs_inode *ip,
+ unsigned long iflag)
{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_perag *pag;
- int tagged;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
+ int tagged;
+
+ ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
/*
* Don't bother locking the AG and looking up in the radix trees
* if we already know that we have the tag set.
*/
- if (ip->i_flags & xfs_iflag_for_tag(tag))
+ if (ip->i_flags & iflag)
return;
spin_lock(&ip->i_flags_lock);
- ip->i_flags |= xfs_iflag_for_tag(tag);
+ ip->i_flags |= iflag;
spin_unlock(&ip->i_flags_lock);
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
- tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
+ tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG);
radix_tree_tag_set(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
+ XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
+ XFS_ICI_BLOCKGC_TAG);
if (!tagged) {
- /* propagate the eofblocks tag up into the perag radix tree */
+ /* propagate the blockgc tag up into the perag radix tree */
spin_lock(&ip->i_mount->m_perag_lock);
radix_tree_tag_set(&ip->i_mount->m_perag_tree,
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- tag);
+ XFS_ICI_BLOCKGC_TAG);
spin_unlock(&ip->i_mount->m_perag_lock);
/* kick off background trimming */
- execute(ip->i_mount);
+ xfs_blockgc_queue(pag);
- set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
+ trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
+ _RET_IP_);
}
spin_unlock(&pag->pag_ici_lock);
@@ -1515,38 +1393,43 @@ xfs_inode_set_eofblocks_tag(
xfs_inode_t *ip)
{
trace_xfs_inode_set_eofblocks_tag(ip);
- return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
- trace_xfs_perag_set_eofblocks,
- XFS_ICI_EOFBLOCKS_TAG);
+ return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
}
static void
-__xfs_inode_clear_blocks_tag(
- xfs_inode_t *ip,
- void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
- int error, unsigned long caller_ip),
- int tag)
+xfs_blockgc_clear_iflag(
+ struct xfs_inode *ip,
+ unsigned long iflag)
{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_perag *pag;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
+ bool clear_tag;
+
+ ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
spin_lock(&ip->i_flags_lock);
- ip->i_flags &= ~xfs_iflag_for_tag(tag);
+ ip->i_flags &= ~iflag;
+ clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
spin_unlock(&ip->i_flags_lock);
+ if (!clear_tag)
+ return;
+
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
- if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
- /* clear the eofblocks tag from the perag radix tree */
+ XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
+ XFS_ICI_BLOCKGC_TAG);
+ if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
+ /* clear the blockgc tag from the perag radix tree */
spin_lock(&ip->i_mount->m_perag_lock);
radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- tag);
+ XFS_ICI_BLOCKGC_TAG);
spin_unlock(&ip->i_mount->m_perag_lock);
- clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
+ trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
+ _RET_IP_);
}
spin_unlock(&pag->pag_ici_lock);
@@ -1558,8 +1441,7 @@ xfs_inode_clear_eofblocks_tag(
xfs_inode_t *ip)
{
trace_xfs_inode_clear_eofblocks_tag(ip);
- return __xfs_inode_clear_blocks_tag(ip,
- trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
+ return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
}
/*
@@ -1609,20 +1491,42 @@ xfs_prep_free_cowblocks(
STATIC int
xfs_inode_free_cowblocks(
struct xfs_inode *ip,
- void *args)
+ void *args,
+ unsigned int *lockflags)
{
struct xfs_eofblocks *eofb = args;
+ bool wait;
int ret = 0;
+ wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
+
+ if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
+ return 0;
+
if (!xfs_prep_free_cowblocks(ip))
return 0;
if (!xfs_inode_matches_eofb(ip, eofb))
return 0;
- /* Free the CoW blocks */
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
- xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ /*
+ * If the caller is waiting, return -EAGAIN to keep the background
+ * scanner moving and revisit the inode in a subsequent pass.
+ */
+ if (!(*lockflags & XFS_IOLOCK_EXCL) &&
+ !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+ if (wait)
+ return -EAGAIN;
+ return 0;
+ }
+ *lockflags |= XFS_IOLOCK_EXCL;
+
+ if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
+ if (wait)
+ return -EAGAIN;
+ return 0;
+ }
+ *lockflags |= XFS_MMAPLOCK_EXCL;
/*
* Check again, nobody else should be able to dirty blocks or change
@@ -1630,37 +1534,15 @@ xfs_inode_free_cowblocks(
*/
if (xfs_prep_free_cowblocks(ip))
ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
-
- xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-
return ret;
}
-int
-xfs_icache_free_cowblocks(
- struct xfs_mount *mp,
- struct xfs_eofblocks *eofb)
-{
- return xfs_inode_walk(mp, 0, xfs_inode_free_cowblocks, eofb,
- XFS_ICI_COWBLOCKS_TAG);
-}
-
-int
-xfs_inode_free_quota_cowblocks(
- struct xfs_inode *ip)
-{
- return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
-}
-
void
xfs_inode_set_cowblocks_tag(
xfs_inode_t *ip)
{
trace_xfs_inode_set_cowblocks_tag(ip);
- return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
- trace_xfs_perag_set_cowblocks,
- XFS_ICI_COWBLOCKS_TAG);
+ return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
}
void
@@ -1668,24 +1550,158 @@ xfs_inode_clear_cowblocks_tag(
xfs_inode_t *ip)
{
trace_xfs_inode_clear_cowblocks_tag(ip);
- return __xfs_inode_clear_blocks_tag(ip,
- trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
+ return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
}
+#define for_each_perag_tag(mp, next_agno, pag, tag) \
+ for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
+ (pag) != NULL; \
+ (next_agno) = (pag)->pag_agno + 1, \
+ xfs_perag_put(pag), \
+ (pag) = xfs_perag_get_tag((mp), (next_agno), (tag)))
+
+
/* Disable post-EOF and CoW block auto-reclamation. */
void
-xfs_stop_block_reaping(
+xfs_blockgc_stop(
struct xfs_mount *mp)
{
- cancel_delayed_work_sync(&mp->m_eofblocks_work);
- cancel_delayed_work_sync(&mp->m_cowblocks_work);
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ cancel_delayed_work_sync(&pag->pag_blockgc_work);
}
/* Enable post-EOF and CoW block auto-reclamation. */
void
-xfs_start_block_reaping(
+xfs_blockgc_start(
struct xfs_mount *mp)
{
- xfs_queue_eofblocks(mp);
- xfs_queue_cowblocks(mp);
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ xfs_blockgc_queue(pag);
+}
+
+/* Scan one incore inode for block preallocations that we can remove. */
+static int
+xfs_blockgc_scan_inode(
+ struct xfs_inode *ip,
+ void *args)
+{
+ unsigned int lockflags = 0;
+ int error;
+
+ error = xfs_inode_free_eofblocks(ip, args, &lockflags);
+ if (error)
+ goto unlock;
+
+ error = xfs_inode_free_cowblocks(ip, args, &lockflags);
+unlock:
+ if (lockflags)
+ xfs_iunlock(ip, lockflags);
+ return error;
+}
+
+/* Background worker that trims preallocated space. */
+void
+xfs_blockgc_worker(
+ struct work_struct *work)
+{
+ struct xfs_perag *pag = container_of(to_delayed_work(work),
+ struct xfs_perag, pag_blockgc_work);
+ struct xfs_mount *mp = pag->pag_mount;
+ int error;
+
+ if (!sb_start_write_trylock(mp->m_super))
+ return;
+ error = xfs_inode_walk_ag(pag, 0, xfs_blockgc_scan_inode, NULL,
+ XFS_ICI_BLOCKGC_TAG);
+ if (error)
+ xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
+ pag->pag_agno, error);
+ sb_end_write(mp->m_super);
+ xfs_blockgc_queue(pag);
+}
+
+/*
+ * Try to free space in the filesystem by purging eofblocks and cowblocks.
+ */
+int
+xfs_blockgc_free_space(
+ struct xfs_mount *mp,
+ struct xfs_eofblocks *eofb)
+{
+ trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
+
+ return xfs_inode_walk(mp, 0, xfs_blockgc_scan_inode, eofb,
+ XFS_ICI_BLOCKGC_TAG);
+}
+
+/*
+ * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
+ * quota caused an allocation failure, so we make a best effort by including
+ * each quota under low free space conditions (less than 1% free space) in the
+ * scan.
+ *
+ * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
+ * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or
+ * MMAPLOCK.
+ */
+int
+xfs_blockgc_free_dquots(
+ struct xfs_mount *mp,
+ struct xfs_dquot *udqp,
+ struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp,
+ unsigned int eof_flags)
+{
+ struct xfs_eofblocks eofb = {0};
+ bool do_work = false;
+
+ if (!udqp && !gdqp && !pdqp)
+ return 0;
+
+ /*
+ * Run a scan to free blocks using the union filter to cover all
+ * applicable quotas in a single scan.
+ */
+ eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags;
+
+ if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
+ eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
+ eofb.eof_flags |= XFS_EOF_FLAGS_UID;
+ do_work = true;
+ }
+
+ if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
+ eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
+ eofb.eof_flags |= XFS_EOF_FLAGS_GID;
+ do_work = true;
+ }
+
+ if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
+ eofb.eof_prid = pdqp->q_id;
+ eofb.eof_flags |= XFS_EOF_FLAGS_PRID;
+ do_work = true;
+ }
+
+ if (!do_work)
+ return 0;
+
+ return xfs_blockgc_free_space(mp, &eofb);
+}
+
+/* Run cow/eofblocks scans on the quotas attached to the inode. */
+int
+xfs_blockgc_free_quota(
+ struct xfs_inode *ip,
+ unsigned int eof_flags)
+{
+ return xfs_blockgc_free_dquots(ip->i_mount,
+ xfs_inode_dquot(ip, XFS_DQTYPE_USER),
+ xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
+ xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);
}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 3a4c8b382cd0..d1fddb152420 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -23,8 +23,8 @@ struct xfs_eofblocks {
#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
in xfs_inode_walk */
#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
-#define XFS_ICI_EOFBLOCKS_TAG 1 /* inode has blocks beyond EOF */
-#define XFS_ICI_COWBLOCKS_TAG 2 /* inode can have cow blocks to gc */
+/* Inode has speculative preallocations (posteof or cow) to clean. */
+#define XFS_ICI_BLOCKGC_TAG 1
/*
* Flags for xfs_iget()
@@ -54,19 +54,19 @@ long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
+int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
+ struct xfs_dquot *gdqp, struct xfs_dquot *pdqp,
+ unsigned int eof_flags);
+int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int eof_flags);
+int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_eofblocks *eofb);
+
void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
-int xfs_icache_free_eofblocks(struct xfs_mount *, struct xfs_eofblocks *);
-int xfs_inode_free_quota_eofblocks(struct xfs_inode *ip);
-void xfs_eofblocks_worker(struct work_struct *);
-void xfs_queue_eofblocks(struct xfs_mount *);
void xfs_inode_set_cowblocks_tag(struct xfs_inode *ip);
void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
-int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *);
-int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip);
-void xfs_cowblocks_worker(struct work_struct *);
-void xfs_queue_cowblocks(struct xfs_mount *);
+
+void xfs_blockgc_worker(struct work_struct *work);
int xfs_inode_walk(struct xfs_mount *mp, int iter_flags,
int (*execute)(struct xfs_inode *ip, void *args),
@@ -75,7 +75,7 @@ int xfs_inode_walk(struct xfs_mount *mp, int iter_flags,
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t ino, bool *inuse);
-void xfs_stop_block_reaping(struct xfs_mount *mp);
-void xfs_start_block_reaping(struct xfs_mount *mp);
+void xfs_blockgc_stop(struct xfs_mount *mp);
+void xfs_blockgc_start(struct xfs_mount *mp);
#endif
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b7352bc4c815..f93370bd7b1e 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -766,6 +766,7 @@ xfs_inode_inherit_flags2(
*/
static int
xfs_init_new_inode(
+ struct user_namespace *mnt_userns,
struct xfs_trans *tp,
struct xfs_inode *pip,
xfs_ino_t ino,
@@ -775,6 +776,7 @@ xfs_init_new_inode(
prid_t prid,
struct xfs_inode **ipp)
{
+ struct inode *dir = pip ? VFS_I(pip) : NULL;
struct xfs_mount *mp = tp->t_mountp;
struct xfs_inode *ip;
unsigned int flags;
@@ -804,18 +806,17 @@ xfs_init_new_inode(
ASSERT(ip != NULL);
inode = VFS_I(ip);
- inode->i_mode = mode;
set_nlink(inode, nlink);
- inode->i_uid = current_fsuid();
inode->i_rdev = rdev;
ip->i_d.di_projid = prid;
- if (pip && XFS_INHERIT_GID(pip)) {
- inode->i_gid = VFS_I(pip)->i_gid;
- if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
- inode->i_mode |= S_ISGID;
+ if (dir && !(dir->i_mode & S_ISGID) &&
+ (mp->m_flags & XFS_MOUNT_GRPID)) {
+ inode->i_uid = fsuid_into_mnt(mnt_userns);
+ inode->i_gid = dir->i_gid;
+ inode->i_mode = mode;
} else {
- inode->i_gid = current_fsgid();
+ inode_init_owner(mnt_userns, inode, dir, mode);
}
/*
@@ -824,7 +825,8 @@ xfs_init_new_inode(
* (and only if the irix_sgid_inherit compatibility variable is set).
*/
if (irix_sgid_inherit &&
- (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
+ (inode->i_mode & S_ISGID) &&
+ !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
inode->i_mode &= ~S_ISGID;
ip->i_d.di_size = 0;
@@ -901,6 +903,7 @@ xfs_init_new_inode(
*/
int
xfs_dir_ialloc(
+ struct user_namespace *mnt_userns,
struct xfs_trans **tpp,
struct xfs_inode *dp,
umode_t mode,
@@ -933,7 +936,8 @@ xfs_dir_ialloc(
return error;
ASSERT(ino != NULLFSINO);
- return xfs_init_new_inode(*tpp, dp, ino, mode, nlink, rdev, prid, ipp);
+ return xfs_init_new_inode(mnt_userns, *tpp, dp, ino, mode, nlink, rdev,
+ prid, ipp);
}
/*
@@ -973,6 +977,7 @@ xfs_bumplink(
int
xfs_create(
+ struct user_namespace *mnt_userns,
xfs_inode_t *dp,
struct xfs_name *name,
umode_t mode,
@@ -1002,9 +1007,10 @@ xfs_create(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
- &udqp, &gdqp, &pdqp);
+ error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),
+ fsgid_into_mnt(mnt_userns), prid,
+ XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
+ &udqp, &gdqp, &pdqp);
if (error)
return error;
@@ -1022,23 +1028,22 @@ xfs_create(
* the case we'll drop the one we have and get a more
* appropriate transaction later.
*/
- error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
+ error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
+ &tp);
if (error == -ENOSPC) {
/* flush outstanding delalloc blocks and retry */
xfs_flush_inodes(mp);
- error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
+ error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
+ resblks, &tp);
}
if (error)
- goto out_release_inode;
+ goto out_release_dquots;
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
unlock_dp_on_error = true;
- /*
- * Reserve disk quota and the inode.
- */
- error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
- pdqp, resblks, 1, 0);
+ error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
+ XFS_IEXT_DIR_MANIP_CNT(mp));
if (error)
goto out_trans_cancel;
@@ -1047,7 +1052,8 @@ xfs_create(
* entry pointing to them, but a directory also the "." entry
* pointing to itself.
*/
- error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
+ error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, is_dir ? 2 : 1, rdev,
+ prid, &ip);
if (error)
goto out_trans_cancel;
@@ -1116,7 +1122,7 @@ xfs_create(
xfs_finish_inode_setup(ip);
xfs_irele(ip);
}
-
+ out_release_dquots:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
@@ -1128,6 +1134,7 @@ xfs_create(
int
xfs_create_tmpfile(
+ struct user_namespace *mnt_userns,
struct xfs_inode *dp,
umode_t mode,
struct xfs_inode **ipp)
@@ -1151,25 +1158,22 @@ xfs_create_tmpfile(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
- &udqp, &gdqp, &pdqp);
+ error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),
+ fsgid_into_mnt(mnt_userns), prid,
+ XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
+ &udqp, &gdqp, &pdqp);
if (error)
return error;
resblks = XFS_IALLOC_SPACE_RES(mp);
tres = &M_RES(mp)->tr_create_tmpfile;
- error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
- if (error)
- goto out_release_inode;
-
- error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
- pdqp, resblks, 1, 0);
+ error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
+ &tp);
if (error)
- goto out_trans_cancel;
+ goto out_release_dquots;
- error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
+ error = xfs_dir_ialloc(mnt_userns, &tp, dp, mode, 0, 0, prid, &ip);
if (error)
goto out_trans_cancel;
@@ -1210,7 +1214,7 @@ xfs_create_tmpfile(
xfs_finish_inode_setup(ip);
xfs_irele(ip);
}
-
+ out_release_dquots:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
@@ -1258,6 +1262,11 @@ xfs_link(
xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
+ error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
+ XFS_IEXT_DIR_MANIP_CNT(mp));
+ if (error)
+ goto error_return;
+
/*
* If we are using project inheritance, we only allow hard link
* creation in our tree when the project IDs are the same; else
@@ -2977,13 +2986,15 @@ out_trans_abort:
*/
static int
xfs_rename_alloc_whiteout(
+ struct user_namespace *mnt_userns,
struct xfs_inode *dp,
struct xfs_inode **wip)
{
struct xfs_inode *tmpfile;
int error;
- error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
+ error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
+ &tmpfile);
if (error)
return error;
@@ -3005,6 +3016,7 @@ xfs_rename_alloc_whiteout(
*/
int
xfs_rename(
+ struct user_namespace *mnt_userns,
struct xfs_inode *src_dp,
struct xfs_name *src_name,
struct xfs_inode *src_ip,
@@ -3017,7 +3029,7 @@ xfs_rename(
struct xfs_trans *tp;
struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
- struct xfs_buf *agibp;
+ int i;
int num_inodes = __XFS_SORT_INODES;
bool new_parent = (src_dp != target_dp);
bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
@@ -3036,7 +3048,7 @@ xfs_rename(
*/
if (flags & RENAME_WHITEOUT) {
ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
- error = xfs_rename_alloc_whiteout(target_dp, &wip);
+ error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
if (error)
return error;
@@ -3106,6 +3118,35 @@ xfs_rename(
/*
* Check for expected errors before we dirty the transaction
* so we can return an error without a transaction abort.
+ *
+ * Extent count overflow check:
+ *
+ * From the perspective of src_dp, a rename operation is essentially a
+ * directory entry remove operation. Hence the only place where we check
+ * for extent count overflow for src_dp is in
+ * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns
+ * -ENOSPC when it detects a possible extent count overflow and in
+ * response, the higher layers of directory handling code do the
+ * following:
+ * 1. Data/Free blocks: XFS lets these blocks linger until a
+ * future remove operation removes them.
+ * 2. Dabtree blocks: XFS swaps the blocks with the last block in the
+ * Leaf space and unmaps the last block.
+ *
+ * For target_dp, there are two cases depending on whether the
+ * destination directory entry exists or not.
+ *
+ * When destination directory entry does not exist (i.e. target_ip ==
+ * NULL), extent count overflow check is performed only when transaction
+ * has a non-zero sized space reservation associated with it. With a
+ * zero-sized space reservation, XFS allows a rename operation to
+ * continue only when the directory has sufficient free space in its
+ * data/leaf/free space blocks to hold the new entry.
+ *
+ * When destination directory entry exists (i.e. target_ip != NULL), all
+ * we need to do is change the inode number associated with the already
+ * existing entry. Hence there is no need to perform an extent count
+ * overflow check.
*/
if (target_ip == NULL) {
/*
@@ -3116,6 +3157,12 @@ xfs_rename(
error = xfs_dir_canenter(tp, target_dp, target_name);
if (error)
goto out_trans_cancel;
+ } else {
+ error = xfs_iext_count_may_overflow(target_dp,
+ XFS_DATA_FORK,
+ XFS_IEXT_DIR_MANIP_CNT(mp));
+ if (error)
+ goto out_trans_cancel;
}
} else {
/*
@@ -3131,6 +3178,30 @@ xfs_rename(
}
/*
+ * Lock the AGI buffers we need to handle bumping the nlink of the
+ * whiteout inode off the unlinked list and to handle dropping the
+ * nlink of the target inode. Per locking order rules, do this in
+ * increasing AG order and before directory block allocation tries to
+ * grab AGFs because we grab AGIs before AGFs.
+ *
+ * The (vfs) caller must ensure that if src is a directory then
+ * target_ip is either null or an empty directory.
+ */
+ for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
+ if (inodes[i] == wip ||
+ (inodes[i] == target_ip &&
+ (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
+ struct xfs_buf *bp;
+ xfs_agnumber_t agno;
+
+ agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
+ error = xfs_read_agi(mp, tp, agno, &bp);
+ if (error)
+ goto out_trans_cancel;
+ }
+ }
+
+ /*
* Directory entry creation below may acquire the AGF. Remove
* the whiteout from the unlinked list first to preserve correct
* AGI/AGF locking order. This dirties the transaction so failures
@@ -3182,22 +3253,6 @@ xfs_rename(
* In case there is already an entry with the same
* name at the destination directory, remove it first.
*/
-
- /*
- * Check whether the replace operation will need to allocate
- * blocks. This happens when the shortform directory lacks
- * space and we have to convert it to a block format directory.
- * When more blocks are necessary, we must lock the AGI first
- * to preserve locking order (AGI -> AGF).
- */
- if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
- error = xfs_read_agi(mp, tp,
- XFS_INO_TO_AGNO(mp, target_ip->i_ino),
- &agibp);
- if (error)
- goto out_trans_cancel;
- }
-
error = xfs_dir_replace(tp, target_dp, target_name,
src_ip->i_ino, spaceres);
if (error)
@@ -3273,9 +3328,16 @@ xfs_rename(
if (wip) {
error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
spaceres);
- } else
+ } else {
+ /*
+ * NOTE: We don't need to check for extent count overflow here
+ * because the dir remove name code will leave the dir block in
+ * place if the extent count would overflow.
+ */
error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
spaceres);
+ }
+
if (error)
goto out_trans_cancel;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index eca333f5f715..88ee4c3930ae 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -369,15 +369,18 @@ int xfs_release(struct xfs_inode *ip);
void xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name);
-int xfs_create(struct xfs_inode *dp, struct xfs_name *name,
+int xfs_create(struct user_namespace *mnt_userns,
+ struct xfs_inode *dp, struct xfs_name *name,
umode_t mode, dev_t rdev, struct xfs_inode **ipp);
-int xfs_create_tmpfile(struct xfs_inode *dp, umode_t mode,
+int xfs_create_tmpfile(struct user_namespace *mnt_userns,
+ struct xfs_inode *dp, umode_t mode,
struct xfs_inode **ipp);
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode *ip);
int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
struct xfs_name *target_name);
-int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
+int xfs_rename(struct user_namespace *mnt_userns,
+ struct xfs_inode *src_dp, struct xfs_name *src_name,
struct xfs_inode *src_ip, struct xfs_inode *target_dp,
struct xfs_name *target_name,
struct xfs_inode *target_ip, unsigned int flags);
@@ -407,9 +410,10 @@ void xfs_lock_two_inodes(struct xfs_inode *ip0, uint ip0_mode,
xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
-int xfs_dir_ialloc(struct xfs_trans **tpp, struct xfs_inode *dp, umode_t mode,
- xfs_nlink_t nlink, dev_t dev, prid_t prid,
- struct xfs_inode **ipp);
+int xfs_dir_ialloc(struct user_namespace *mnt_userns,
+ struct xfs_trans **tpp, struct xfs_inode *dp,
+ umode_t mode, xfs_nlink_t nlink, dev_t dev,
+ prid_t prid, struct xfs_inode **ipp);
static inline int
xfs_itruncate_extents(
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 3fbd98f61ea5..99dfe89a8d08 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -693,7 +693,8 @@ xfs_ioc_space(
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = bf->l_start;
- error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
+ error = xfs_vn_setattr_size(file_mnt_user_ns(filp), file_dentry(filp),
+ &iattr);
if (error)
goto out_unlock;
@@ -734,13 +735,15 @@ xfs_fsinumbers_fmt(
STATIC int
xfs_ioc_fsbulkstat(
- xfs_mount_t *mp,
+ struct file *file,
unsigned int cmd,
void __user *arg)
{
+ struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount;
struct xfs_fsop_bulkreq bulkreq;
struct xfs_ibulk breq = {
.mp = mp,
+ .mnt_userns = file_mnt_user_ns(file),
.ocount = 0,
};
xfs_ino_t lastino;
@@ -908,13 +911,15 @@ xfs_bulk_ireq_teardown(
/* Handle the v5 bulkstat ioctl. */
STATIC int
xfs_ioc_bulkstat(
- struct xfs_mount *mp,
+ struct file *file,
unsigned int cmd,
struct xfs_bulkstat_req __user *arg)
{
+ struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount;
struct xfs_bulk_ireq hdr;
struct xfs_ibulk breq = {
.mp = mp,
+ .mnt_userns = file_mnt_user_ns(file),
};
int error;
@@ -1275,24 +1280,24 @@ xfs_ioctl_setattr_prepare_dax(
*/
static struct xfs_trans *
xfs_ioctl_setattr_get_trans(
- struct xfs_inode *ip)
+ struct file *file,
+ struct xfs_dquot *pdqp)
{
+ struct xfs_inode *ip = XFS_I(file_inode(file));
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error = -EROFS;
if (mp->m_flags & XFS_MOUNT_RDONLY)
- goto out_unlock;
+ goto out_error;
error = -EIO;
if (XFS_FORCED_SHUTDOWN(mp))
- goto out_unlock;
+ goto out_error;
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+ error = xfs_trans_alloc_ichange(ip, NULL, NULL, pdqp,
+ capable(CAP_FOWNER), &tp);
if (error)
- goto out_unlock;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ goto out_error;
/*
* CAP_FOWNER overrides the following restrictions:
@@ -1300,7 +1305,7 @@ xfs_ioctl_setattr_get_trans(
* The user ID of the calling process must be equal to the file owner
* ID, except in cases where the CAP_FSETID capability is applicable.
*/
- if (!inode_owner_or_capable(VFS_I(ip))) {
+ if (!inode_owner_or_capable(file_mnt_user_ns(file), VFS_I(ip))) {
error = -EPERM;
goto out_cancel;
}
@@ -1312,7 +1317,7 @@ xfs_ioctl_setattr_get_trans(
out_cancel:
xfs_trans_cancel(tp);
-out_unlock:
+out_error:
return ERR_PTR(error);
}
@@ -1428,21 +1433,23 @@ xfs_ioctl_setattr_check_projid(
STATIC int
xfs_ioctl_setattr(
- xfs_inode_t *ip,
+ struct file *file,
struct fsxattr *fa)
{
+ struct user_namespace *mnt_userns = file_mnt_user_ns(file);
+ struct xfs_inode *ip = XFS_I(file_inode(file));
struct fsxattr old_fa;
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
struct xfs_dquot *pdqp = NULL;
struct xfs_dquot *olddquot = NULL;
- int code;
+ int error;
trace_xfs_ioctl_setattr(ip);
- code = xfs_ioctl_setattr_check_projid(ip, fa);
- if (code)
- return code;
+ error = xfs_ioctl_setattr_check_projid(ip, fa);
+ if (error)
+ return error;
/*
* If disk quotas is on, we make sure that the dquots do exist on disk,
@@ -1453,44 +1460,36 @@ xfs_ioctl_setattr(
* because the i_*dquot fields will get updated anyway.
*/
if (XFS_IS_QUOTA_ON(mp)) {
- code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
+ error = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
VFS_I(ip)->i_gid, fa->fsx_projid,
XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp);
- if (code)
- return code;
+ if (error)
+ return error;
}
xfs_ioctl_setattr_prepare_dax(ip, fa);
- tp = xfs_ioctl_setattr_get_trans(ip);
+ tp = xfs_ioctl_setattr_get_trans(file, pdqp);
if (IS_ERR(tp)) {
- code = PTR_ERR(tp);
+ error = PTR_ERR(tp);
goto error_free_dquots;
}
- if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
- ip->i_d.di_projid != fa->fsx_projid) {
- code = xfs_qm_vop_chown_reserve(tp, ip, NULL, NULL, pdqp,
- capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
- if (code) /* out of quota */
- goto error_trans_cancel;
- }
-
xfs_fill_fsxattr(ip, false, &old_fa);
- code = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, fa);
- if (code)
+ error = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, fa);
+ if (error)
goto error_trans_cancel;
- code = xfs_ioctl_setattr_check_extsize(ip, fa);
- if (code)
+ error = xfs_ioctl_setattr_check_extsize(ip, fa);
+ if (error)
goto error_trans_cancel;
- code = xfs_ioctl_setattr_check_cowextsize(ip, fa);
- if (code)
+ error = xfs_ioctl_setattr_check_cowextsize(ip, fa);
+ if (error)
goto error_trans_cancel;
- code = xfs_ioctl_setattr_xflags(tp, ip, fa);
- if (code)
+ error = xfs_ioctl_setattr_xflags(tp, ip, fa);
+ if (error)
goto error_trans_cancel;
/*
@@ -1502,7 +1501,7 @@ xfs_ioctl_setattr(
*/
if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
- !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
+ !capable_wrt_inode_uidgid(mnt_userns, VFS_I(ip), CAP_FSETID))
VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
/* Change the ownerships and register project quota modifications */
@@ -1530,7 +1529,7 @@ xfs_ioctl_setattr(
else
ip->i_d.di_cowextsize = 0;
- code = xfs_trans_commit(tp);
+ error = xfs_trans_commit(tp);
/*
* Release any dquot(s) the inode had kept before chown.
@@ -1538,18 +1537,17 @@ xfs_ioctl_setattr(
xfs_qm_dqrele(olddquot);
xfs_qm_dqrele(pdqp);
- return code;
+ return error;
error_trans_cancel:
xfs_trans_cancel(tp);
error_free_dquots:
xfs_qm_dqrele(pdqp);
- return code;
+ return error;
}
STATIC int
xfs_ioc_fssetxattr(
- xfs_inode_t *ip,
struct file *filp,
void __user *arg)
{
@@ -1562,7 +1560,7 @@ xfs_ioc_fssetxattr(
error = mnt_want_write_file(filp);
if (error)
return error;
- error = xfs_ioctl_setattr(ip, &fa);
+ error = xfs_ioctl_setattr(filp, &fa);
mnt_drop_write_file(filp);
return error;
}
@@ -1608,7 +1606,7 @@ xfs_ioc_setxflags(
xfs_ioctl_setattr_prepare_dax(ip, &fa);
- tp = xfs_ioctl_setattr_get_trans(ip);
+ tp = xfs_ioctl_setattr_get_trans(filp, NULL);
if (IS_ERR(tp)) {
error = PTR_ERR(tp);
goto out_drop_write;
@@ -2119,10 +2117,10 @@ xfs_file_ioctl(
case XFS_IOC_FSBULKSTAT_SINGLE:
case XFS_IOC_FSBULKSTAT:
case XFS_IOC_FSINUMBERS:
- return xfs_ioc_fsbulkstat(mp, cmd, arg);
+ return xfs_ioc_fsbulkstat(filp, cmd, arg);
case XFS_IOC_BULKSTAT:
- return xfs_ioc_bulkstat(mp, cmd, arg);
+ return xfs_ioc_bulkstat(filp, cmd, arg);
case XFS_IOC_INUMBERS:
return xfs_ioc_inumbers(mp, cmd, arg);
@@ -2144,7 +2142,7 @@ xfs_file_ioctl(
case XFS_IOC_FSGETXATTRA:
return xfs_ioc_fsgetxattr(ip, 1, arg);
case XFS_IOC_FSSETXATTR:
- return xfs_ioc_fssetxattr(ip, filp, arg);
+ return xfs_ioc_fssetxattr(filp, arg);
case XFS_IOC_GETXFLAGS:
return xfs_ioc_getxflags(ip, arg);
case XFS_IOC_SETXFLAGS:
@@ -2260,7 +2258,7 @@ xfs_file_ioctl(
}
case XFS_IOC_FSGROWFSDATA: {
- xfs_growfs_data_t in;
+ struct xfs_growfs_data in;
if (copy_from_user(&in, arg, sizeof(in)))
return -EFAULT;
@@ -2274,7 +2272,7 @@ xfs_file_ioctl(
}
case XFS_IOC_FSGROWFSLOG: {
- xfs_growfs_log_t in;
+ struct xfs_growfs_log in;
if (copy_from_user(&in, arg, sizeof(in)))
return -EFAULT;
@@ -2348,8 +2346,10 @@ xfs_file_ioctl(
if (error)
return error;
+ trace_xfs_ioc_free_eofblocks(mp, &keofb, _RET_IP_);
+
sb_start_write(mp->m_super);
- error = xfs_icache_free_eofblocks(mp, &keofb);
+ error = xfs_blockgc_free_space(mp, &keofb);
sb_end_write(mp->m_super);
return error;
}
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index c1771e728117..33c09ec8e6c0 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -209,14 +209,16 @@ xfs_fsbulkstat_one_fmt_compat(
/* copied from xfs_ioctl.c */
STATIC int
xfs_compat_ioc_fsbulkstat(
- xfs_mount_t *mp,
+ struct file *file,
unsigned int cmd,
struct compat_xfs_fsop_bulkreq __user *p32)
{
+ struct xfs_mount *mp = XFS_I(file_inode(file))->i_mount;
u32 addr;
struct xfs_fsop_bulkreq bulkreq;
struct xfs_ibulk breq = {
.mp = mp,
+ .mnt_userns = file_mnt_user_ns(file),
.ocount = 0,
};
xfs_ino_t lastino;
@@ -436,7 +438,6 @@ xfs_file_compat_ioctl(
{
struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
void __user *arg = compat_ptr(p);
int error;
@@ -456,7 +457,7 @@ xfs_file_compat_ioctl(
return xfs_ioc_space(filp, &bf);
}
case XFS_IOC_FSGEOMETRY_V1_32:
- return xfs_compat_ioc_fsgeometry_v1(mp, arg);
+ return xfs_compat_ioc_fsgeometry_v1(ip->i_mount, arg);
case XFS_IOC_FSGROWFSDATA_32: {
struct xfs_growfs_data in;
@@ -465,7 +466,7 @@ xfs_file_compat_ioctl(
error = mnt_want_write_file(filp);
if (error)
return error;
- error = xfs_growfs_data(mp, &in);
+ error = xfs_growfs_data(ip->i_mount, &in);
mnt_drop_write_file(filp);
return error;
}
@@ -477,7 +478,7 @@ xfs_file_compat_ioctl(
error = mnt_want_write_file(filp);
if (error)
return error;
- error = xfs_growfs_rt(mp, &in);
+ error = xfs_growfs_rt(ip->i_mount, &in);
mnt_drop_write_file(filp);
return error;
}
@@ -507,7 +508,7 @@ xfs_file_compat_ioctl(
case XFS_IOC_FSBULKSTAT_32:
case XFS_IOC_FSBULKSTAT_SINGLE_32:
case XFS_IOC_FSINUMBERS_32:
- return xfs_compat_ioc_fsbulkstat(mp, cmd, arg);
+ return xfs_compat_ioc_fsbulkstat(filp, cmd, arg);
case XFS_IOC_FD_TO_HANDLE_32:
case XFS_IOC_PATH_TO_HANDLE_32:
case XFS_IOC_PATH_TO_FSHANDLE_32: {
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 7b9ff824e82d..e17ab7f42928 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -194,25 +194,21 @@ xfs_iomap_write_direct(
struct xfs_trans *tp;
xfs_filblks_t resaligned;
int nimaps;
- int quota_flag;
- uint qblocks, resblks;
- unsigned int resrtextents = 0;
+ unsigned int dblocks, rblocks;
+ bool force = false;
int error;
int bmapi_flags = XFS_BMAPI_PREALLOC;
- uint tflags = 0;
ASSERT(count_fsb > 0);
resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
xfs_get_extsz_hint(ip));
if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
- resrtextents = qblocks = resaligned;
- resrtextents /= mp->m_sb.sb_rextsize;
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
- quota_flag = XFS_QMOPT_RES_RTBLKS;
+ dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ rblocks = resaligned;
} else {
- resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
- quota_flag = XFS_QMOPT_RES_REGBLKS;
+ dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+ rblocks = 0;
}
error = xfs_qm_dqattach(ip);
@@ -235,23 +231,21 @@ xfs_iomap_write_direct(
if (IS_DAX(VFS_I(ip))) {
bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
if (imap->br_state == XFS_EXT_UNWRITTEN) {
- tflags |= XFS_TRANS_RESERVE;
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
+ force = true;
+ dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
}
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
- tflags, &tp);
+
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
+ rblocks, force, &tp);
if (error)
return error;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
+ error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
+ XFS_IEXT_ADD_NOSPLIT_CNT);
if (error)
goto out_trans_cancel;
- xfs_trans_ijoin(tp, ip, 0);
-
/*
* From this point onwards we overwrite the imap pointer that the
* caller gave to us.
@@ -260,7 +254,7 @@ xfs_iomap_write_direct(
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
imap, &nimaps);
if (error)
- goto out_res_cancel;
+ goto out_trans_cancel;
/*
* Complete the transaction
@@ -284,8 +278,6 @@ out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
-out_res_cancel:
- xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
out_trans_cancel:
xfs_trans_cancel(tp);
goto out_unlock;
@@ -548,16 +540,13 @@ xfs_iomap_write_unwritten(
* here as we might be asked to write out the same inode that we
* complete here and might deadlock on the iolock.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
- XFS_TRANS_RESERVE, &tp);
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks,
+ 0, true, &tp);
if (error)
return error;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
-
- error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
- XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES);
+ error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
+ XFS_IEXT_WRITE_UNWRITTEN_CNT);
if (error)
goto error_on_bmapi_transaction;
@@ -784,15 +773,28 @@ xfs_direct_write_iomap_begin(
goto allocate_blocks;
/*
- * NOWAIT IO needs to span the entire requested IO with a single map so
- * that we avoid partial IO failures due to the rest of the IO range not
- * covered by this map triggering an EAGAIN condition when it is
- * subsequently mapped and aborting the IO.
+ * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
+ * a single map so that we avoid partial IO failures due to the rest of
+ * the I/O range not covered by this map triggering an EAGAIN condition
+ * when it is subsequently mapped and aborting the I/O.
*/
- if ((flags & IOMAP_NOWAIT) &&
- !imap_spans_range(&imap, offset_fsb, end_fsb)) {
+ if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) {
error = -EAGAIN;
- goto out_unlock;
+ if (!imap_spans_range(&imap, offset_fsb, end_fsb))
+ goto out_unlock;
+ }
+
+ /*
+ * For overwrite only I/O, we cannot convert unwritten extents without
+ * requiring sub-block zeroing. This can only be done under an
+ * exclusive IOLOCK, hence return -EAGAIN if this is not a written
+ * extent to tell the caller to try again.
+ */
+ if (flags & IOMAP_OVERWRITE_ONLY) {
+ error = -EAGAIN;
+ if (imap.br_state != XFS_EXT_NORM &&
+ ((offset | length) & mp->m_blockmask))
+ goto out_unlock;
}
xfs_iunlock(ip, lockmode);
@@ -801,7 +803,7 @@ xfs_direct_write_iomap_begin(
allocate_blocks:
error = -EAGAIN;
- if (flags & IOMAP_NOWAIT)
+ if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY))
goto out_unlock;
/*
@@ -842,7 +844,8 @@ out_found_cow:
return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
out_unlock:
- xfs_iunlock(ip, lockmode);
+ if (lockmode)
+ xfs_iunlock(ip, lockmode);
return error;
}
@@ -870,6 +873,9 @@ xfs_buffered_write_iomap_begin(
int allocfork = XFS_DATA_FORK;
int error = 0;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
/* we can't use delayed allocations when using extent size hints */
if (xfs_get_extsz_hint(ip))
return xfs_direct_write_iomap_begin(inode, offset, count,
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 67c8dc9de8aa..66ebccb5a6ff 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -128,6 +128,7 @@ xfs_cleanup_inode(
STATIC int
xfs_generic_create(
+ struct user_namespace *mnt_userns,
struct inode *dir,
struct dentry *dentry,
umode_t mode,
@@ -161,9 +162,10 @@ xfs_generic_create(
goto out_free_acl;
if (!tmpfile) {
- error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+ error = xfs_create(mnt_userns, XFS_I(dir), &name, mode, rdev,
+ &ip);
} else {
- error = xfs_create_tmpfile(XFS_I(dir), mode, &ip);
+ error = xfs_create_tmpfile(mnt_userns, XFS_I(dir), mode, &ip);
}
if (unlikely(error))
goto out_free_acl;
@@ -220,31 +222,35 @@ xfs_generic_create(
STATIC int
xfs_vn_mknod(
- struct inode *dir,
- struct dentry *dentry,
- umode_t mode,
- dev_t rdev)
+ struct user_namespace *mnt_userns,
+ struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode,
+ dev_t rdev)
{
- return xfs_generic_create(dir, dentry, mode, rdev, false);
+ return xfs_generic_create(mnt_userns, dir, dentry, mode, rdev, false);
}
STATIC int
xfs_vn_create(
- struct inode *dir,
- struct dentry *dentry,
- umode_t mode,
- bool flags)
+ struct user_namespace *mnt_userns,
+ struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode,
+ bool flags)
{
- return xfs_generic_create(dir, dentry, mode, 0, false);
+ return xfs_generic_create(mnt_userns, dir, dentry, mode, 0, false);
}
STATIC int
xfs_vn_mkdir(
- struct inode *dir,
- struct dentry *dentry,
- umode_t mode)
+ struct user_namespace *mnt_userns,
+ struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
{
- return xfs_generic_create(dir, dentry, mode | S_IFDIR, 0, false);
+ return xfs_generic_create(mnt_userns, dir, dentry, mode | S_IFDIR, 0,
+ false);
}
STATIC struct dentry *
@@ -361,9 +367,10 @@ xfs_vn_unlink(
STATIC int
xfs_vn_symlink(
- struct inode *dir,
- struct dentry *dentry,
- const char *symname)
+ struct user_namespace *mnt_userns,
+ struct inode *dir,
+ struct dentry *dentry,
+ const char *symname)
{
struct inode *inode;
struct xfs_inode *cip = NULL;
@@ -377,7 +384,7 @@ xfs_vn_symlink(
if (unlikely(error))
goto out;
- error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
+ error = xfs_symlink(mnt_userns, XFS_I(dir), &name, symname, mode, &cip);
if (unlikely(error))
goto out;
@@ -403,11 +410,12 @@ xfs_vn_symlink(
STATIC int
xfs_vn_rename(
- struct inode *odir,
- struct dentry *odentry,
- struct inode *ndir,
- struct dentry *ndentry,
- unsigned int flags)
+ struct user_namespace *mnt_userns,
+ struct inode *odir,
+ struct dentry *odentry,
+ struct inode *ndir,
+ struct dentry *ndentry,
+ unsigned int flags)
{
struct inode *new_inode = d_inode(ndentry);
int omode = 0;
@@ -431,8 +439,8 @@ xfs_vn_rename(
if (unlikely(error))
return error;
- return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
- XFS_I(ndir), &nname,
+ return xfs_rename(mnt_userns, XFS_I(odir), &oname,
+ XFS_I(d_inode(odentry)), XFS_I(ndir), &nname,
new_inode ? XFS_I(new_inode) : NULL, flags);
}
@@ -529,6 +537,7 @@ xfs_stat_blksize(
STATIC int
xfs_vn_getattr(
+ struct user_namespace *mnt_userns,
const struct path *path,
struct kstat *stat,
u32 request_mask,
@@ -547,8 +556,8 @@ xfs_vn_getattr(
stat->dev = inode->i_sb->s_dev;
stat->mode = inode->i_mode;
stat->nlink = inode->i_nlink;
- stat->uid = inode->i_uid;
- stat->gid = inode->i_gid;
+ stat->uid = i_uid_into_mnt(mnt_userns, inode);
+ stat->gid = i_gid_into_mnt(mnt_userns, inode);
stat->ino = ip->i_ino;
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
@@ -626,8 +635,9 @@ xfs_setattr_time(
static int
xfs_vn_change_ok(
- struct dentry *dentry,
- struct iattr *iattr)
+ struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ struct iattr *iattr)
{
struct xfs_mount *mp = XFS_I(d_inode(dentry))->i_mount;
@@ -637,7 +647,7 @@ xfs_vn_change_ok(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- return setattr_prepare(dentry, iattr);
+ return setattr_prepare(mnt_userns, dentry, iattr);
}
/*
@@ -648,6 +658,7 @@ xfs_vn_change_ok(
*/
static int
xfs_setattr_nonsize(
+ struct user_namespace *mnt_userns,
struct xfs_inode *ip,
struct iattr *iattr)
{
@@ -700,13 +711,11 @@ xfs_setattr_nonsize(
return error;
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+ error = xfs_trans_alloc_ichange(ip, udqp, gdqp, NULL,
+ capable(CAP_FOWNER), &tp);
if (error)
goto out_dqrele;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
-
/*
* Change file ownership. Must be the owner or privileged.
*/
@@ -723,21 +732,6 @@ xfs_setattr_nonsize(
uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
/*
- * Do a quota reservation only if uid/gid is actually
- * going to change.
- */
- if (XFS_IS_QUOTA_RUNNING(mp) &&
- ((XFS_IS_UQUOTA_ON(mp) && !uid_eq(iuid, uid)) ||
- (XFS_IS_GQUOTA_ON(mp) && !gid_eq(igid, gid)))) {
- ASSERT(tp);
- error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
- NULL, capable(CAP_FOWNER) ?
- XFS_QMOPT_FORCE_RES : 0);
- if (error) /* out of quota */
- goto out_cancel;
- }
-
- /*
* CAP_FSETID overrides the following restrictions:
*
* The set-user-ID and set-group-ID bits of a file will be
@@ -786,8 +780,6 @@ xfs_setattr_nonsize(
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
/*
* Release any dquot(s) the inode had kept before chown.
*/
@@ -807,16 +799,13 @@ xfs_setattr_nonsize(
* Posix ACL code seems to care about this issue either.
*/
if (mask & ATTR_MODE) {
- error = posix_acl_chmod(inode, inode->i_mode);
+ error = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
if (error)
return error;
}
return 0;
-out_cancel:
- xfs_trans_cancel(tp);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_dqrele:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@@ -831,6 +820,7 @@ out_dqrele:
*/
STATIC int
xfs_setattr_size(
+ struct user_namespace *mnt_userns,
struct xfs_inode *ip,
struct iattr *iattr)
{
@@ -846,7 +836,7 @@ xfs_setattr_size(
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
ASSERT(S_ISREG(inode->i_mode));
ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
- ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+ ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0);
oldsize = inode->i_size;
newsize = iattr->ia_size;
@@ -862,7 +852,7 @@ xfs_setattr_size(
* Use the regular setattr path to update the timestamps.
*/
iattr->ia_valid &= ~ATTR_SIZE;
- return xfs_setattr_nonsize(ip, iattr);
+ return xfs_setattr_nonsize(mnt_userns, ip, iattr);
}
/*
@@ -1031,6 +1021,7 @@ out_trans_cancel:
int
xfs_vn_setattr_size(
+ struct user_namespace *mnt_userns,
struct dentry *dentry,
struct iattr *iattr)
{
@@ -1039,14 +1030,15 @@ xfs_vn_setattr_size(
trace_xfs_setattr(ip);
- error = xfs_vn_change_ok(dentry, iattr);
+ error = xfs_vn_change_ok(mnt_userns, dentry, iattr);
if (error)
return error;
- return xfs_setattr_size(ip, iattr);
+ return xfs_setattr_size(mnt_userns, ip, iattr);
}
STATIC int
xfs_vn_setattr(
+ struct user_namespace *mnt_userns,
struct dentry *dentry,
struct iattr *iattr)
{
@@ -1066,14 +1058,14 @@ xfs_vn_setattr(
return error;
}
- error = xfs_vn_setattr_size(dentry, iattr);
+ error = xfs_vn_setattr_size(mnt_userns, dentry, iattr);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
} else {
trace_xfs_setattr(ip);
- error = xfs_vn_change_ok(dentry, iattr);
+ error = xfs_vn_change_ok(mnt_userns, dentry, iattr);
if (!error)
- error = xfs_setattr_nonsize(ip, iattr);
+ error = xfs_setattr_nonsize(mnt_userns, ip, iattr);
}
return error;
@@ -1144,11 +1136,12 @@ xfs_vn_fiemap(
STATIC int
xfs_vn_tmpfile(
- struct inode *dir,
- struct dentry *dentry,
- umode_t mode)
+ struct user_namespace *mnt_userns,
+ struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
{
- return xfs_generic_create(dir, dentry, mode, 0, true);
+ return xfs_generic_create(mnt_userns, dir, dentry, mode, 0, true);
}
static const struct inode_operations xfs_inode_operations = {
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index 99ca745c1071..278949056048 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -14,6 +14,7 @@ extern const struct file_operations xfs_dir_file_operations;
extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
-extern int xfs_vn_setattr_size(struct dentry *dentry, struct iattr *vap);
+int xfs_vn_setattr_size(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *vap);
#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 16ca97a7ff00..3498b97fb06d 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -54,10 +54,12 @@ struct xfs_bstat_chunk {
STATIC int
xfs_bulkstat_one_int(
struct xfs_mount *mp,
+ struct user_namespace *mnt_userns,
struct xfs_trans *tp,
xfs_ino_t ino,
struct xfs_bstat_chunk *bc)
{
+ struct user_namespace *sb_userns = mp->m_super->s_user_ns;
struct xfs_icdinode *dic; /* dinode core info pointer */
struct xfs_inode *ip; /* incore inode pointer */
struct inode *inode;
@@ -86,8 +88,8 @@ xfs_bulkstat_one_int(
*/
buf->bs_projectid = ip->i_d.di_projid;
buf->bs_ino = ino;
- buf->bs_uid = i_uid_read(inode);
- buf->bs_gid = i_gid_read(inode);
+ buf->bs_uid = from_kuid(sb_userns, i_uid_into_mnt(mnt_userns, inode));
+ buf->bs_gid = from_kgid(sb_userns, i_gid_into_mnt(mnt_userns, inode));
buf->bs_size = dic->di_size;
buf->bs_nlink = inode->i_nlink;
@@ -166,6 +168,12 @@ xfs_bulkstat_one(
};
int error;
+ if (breq->mnt_userns != &init_user_ns) {
+ xfs_warn_ratelimited(breq->mp,
+ "bulkstat not supported inside of idmapped mounts.");
+ return -EINVAL;
+ }
+
ASSERT(breq->icount == 1);
bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
@@ -173,7 +181,8 @@ xfs_bulkstat_one(
if (!bc.buf)
return -ENOMEM;
- error = xfs_bulkstat_one_int(breq->mp, NULL, breq->startino, &bc);
+ error = xfs_bulkstat_one_int(breq->mp, breq->mnt_userns, NULL,
+ breq->startino, &bc);
kmem_free(bc.buf);
@@ -194,9 +203,10 @@ xfs_bulkstat_iwalk(
xfs_ino_t ino,
void *data)
{
+ struct xfs_bstat_chunk *bc = data;
int error;
- error = xfs_bulkstat_one_int(mp, tp, ino, data);
+ error = xfs_bulkstat_one_int(mp, bc->breq->mnt_userns, tp, ino, data);
/* bulkstat just skips over missing inodes */
if (error == -ENOENT || error == -EINVAL)
return 0;
@@ -239,6 +249,11 @@ xfs_bulkstat(
};
int error;
+ if (breq->mnt_userns != &init_user_ns) {
+ xfs_warn_ratelimited(breq->mp,
+ "bulkstat not supported inside of idmapped mounts.");
+ return -EINVAL;
+ }
if (xfs_bulkstat_already_done(breq->mp, breq->startino))
return 0;
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 96a1e2a9be3f..7078d10c9b12 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -8,6 +8,7 @@
/* In-memory representation of a userspace request for batch inode data. */
struct xfs_ibulk {
struct xfs_mount *mp;
+ struct user_namespace *mnt_userns;
void __user *ubuffer; /* user output buffer */
xfs_ino_t startino; /* start with this inode */
unsigned int icount; /* number of elements in ubuffer */
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index eae3aff9bc97..c4a340f1f1e1 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -618,15 +618,12 @@ xfs_iwalk_threaded(
{
struct xfs_pwork_ctl pctl;
xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino);
- unsigned int nr_threads;
int error;
ASSERT(agno < mp->m_sb.sb_agcount);
ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
- nr_threads = xfs_pwork_guess_datadev_parallelism(mp);
- error = xfs_pwork_init(mp, &pctl, xfs_iwalk_ag_work, "xfs_iwalk",
- nr_threads);
+ error = xfs_pwork_init(mp, &pctl, xfs_iwalk_ag_work, "xfs_iwalk");
if (error)
return error;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 5b7a1e201559..af6be9b9ccdf 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -98,8 +98,7 @@ typedef __u32 xfs_nlink_t;
#define xfs_rotorstep xfs_params.rotorstep.val
#define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val
#define xfs_fstrm_centisecs xfs_params.fstrm_timer.val
-#define xfs_eofb_secs xfs_params.eofb_timer.val
-#define xfs_cowb_secs xfs_params.cowb_timer.val
+#define xfs_blockgc_secs xfs_params.blockgc_timer.val
#define current_cpu() (raw_smp_processor_id())
#define current_set_flags_nested(sp, f) \
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index fa2d05e65ff1..06041834daa3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -91,6 +91,9 @@ STATIC int
xlog_iclogs_empty(
struct xlog *log);
+static int
+xfs_log_cover(struct xfs_mount *);
+
static void
xlog_grant_sub_space(
struct xlog *log,
@@ -347,6 +350,25 @@ xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
tic->t_res_num++;
}
+bool
+xfs_log_writable(
+ struct xfs_mount *mp)
+{
+ /*
+ * Never write to the log on norecovery mounts, if the block device is
+ * read-only, or if the filesystem is shutdown. Read-only mounts still
+ * allow internal writes for log recovery and unmount purposes, so don't
+ * restrict that case here.
+ */
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+ return false;
+ if (xfs_readonly_buftarg(mp->m_log->l_targ))
+ return false;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return false;
+ return true;
+}
+
/*
* Replenish the byte reservation required by moving the grant write head.
*/
@@ -741,7 +763,7 @@ xfs_log_mount_finish(
xfs_log_force(mp, XFS_LOG_SYNC);
xfs_ail_push_all_sync(mp->m_ail);
}
- xfs_wait_buftarg(mp->m_ddev_targp);
+ xfs_buftarg_drain(mp->m_ddev_targp);
if (readonly)
mp->m_flags |= XFS_MOUNT_RDONLY;
@@ -886,15 +908,8 @@ xfs_log_unmount_write(
{
struct xlog *log = mp->m_log;
- /*
- * Don't write out unmount record on norecovery mounts or ro devices.
- * Or, if we are doing a forced umount (typically because of IO errors).
- */
- if (mp->m_flags & XFS_MOUNT_NORECOVERY ||
- xfs_readonly_buftarg(log->l_targ)) {
- ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
+ if (!xfs_log_writable(mp))
return;
- }
xfs_log_force(mp, XFS_LOG_SYNC);
@@ -924,10 +939,9 @@ xfs_log_unmount_write(
* To do this, we first need to shut down the background log work so it is not
* trying to cover the log as we clean up. We then need to unpin all objects in
* the log so we can then flush them out. Once they have completed their IO and
- * run the callbacks removing themselves from the AIL, we can write the unmount
- * record.
+ * run the callbacks removing themselves from the AIL, we can cover the log.
*/
-void
+int
xfs_log_quiesce(
struct xfs_mount *mp)
{
@@ -936,16 +950,24 @@ xfs_log_quiesce(
/*
* The superblock buffer is uncached and while xfs_ail_push_all_sync()
- * will push it, xfs_wait_buftarg() will not wait for it. Further,
+ * will push it, xfs_buftarg_wait() will not wait for it. Further,
* xfs_buf_iowait() cannot be used because it was pushed with the
* XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for
* the IO to complete.
*/
xfs_ail_push_all_sync(mp->m_ail);
- xfs_wait_buftarg(mp->m_ddev_targp);
+ xfs_buftarg_wait(mp->m_ddev_targp);
xfs_buf_lock(mp->m_sb_bp);
xfs_buf_unlock(mp->m_sb_bp);
+ return xfs_log_cover(mp);
+}
+
+void
+xfs_log_clean(
+ struct xfs_mount *mp)
+{
+ xfs_log_quiesce(mp);
xfs_log_unmount_write(mp);
}
@@ -960,7 +982,9 @@ void
xfs_log_unmount(
struct xfs_mount *mp)
{
- xfs_log_quiesce(mp);
+ xfs_log_clean(mp);
+
+ xfs_buftarg_drain(mp->m_ddev_targp);
xfs_trans_ail_destroy(mp);
@@ -1037,17 +1061,15 @@ xfs_log_space_wake(
* there's no point in running a dummy transaction at this point because we
* can't start trying to idle the log until both the CIL and AIL are empty.
*/
-static int
-xfs_log_need_covered(xfs_mount_t *mp)
+static bool
+xfs_log_need_covered(
+ struct xfs_mount *mp)
{
- struct xlog *log = mp->m_log;
- int needed = 0;
-
- if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
- return 0;
+ struct xlog *log = mp->m_log;
+ bool needed = false;
if (!xlog_cil_empty(log))
- return 0;
+ return false;
spin_lock(&log->l_icloglock);
switch (log->l_covered_state) {
@@ -1062,14 +1084,14 @@ xfs_log_need_covered(xfs_mount_t *mp)
if (!xlog_iclogs_empty(log))
break;
- needed = 1;
+ needed = true;
if (log->l_covered_state == XLOG_STATE_COVER_NEED)
log->l_covered_state = XLOG_STATE_COVER_DONE;
else
log->l_covered_state = XLOG_STATE_COVER_DONE2;
break;
default:
- needed = 1;
+ needed = true;
break;
}
spin_unlock(&log->l_icloglock);
@@ -1077,6 +1099,60 @@ xfs_log_need_covered(xfs_mount_t *mp)
}
/*
+ * Explicitly cover the log. This is similar to background log covering but
+ * intended for usage in quiesce codepaths. The caller is responsible to ensure
+ * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
+ * must all be empty.
+ */
+static int
+xfs_log_cover(
+ struct xfs_mount *mp)
+{
+ int error = 0;
+ bool need_covered;
+
+ ASSERT((xlog_cil_empty(mp->m_log) && xlog_iclogs_empty(mp->m_log) &&
+ !xfs_ail_min_lsn(mp->m_log->l_ailp)) ||
+ XFS_FORCED_SHUTDOWN(mp));
+
+ if (!xfs_log_writable(mp))
+ return 0;
+
+ /*
+ * xfs_log_need_covered() is not idempotent because it progresses the
+ * state machine if the log requires covering. Therefore, we must call
+ * this function once and use the result until we've issued an sb sync.
+ * Do so first to make that abundantly clear.
+ *
+ * Fall into the covering sequence if the log needs covering or the
+ * mount has lazy superblock accounting to sync to disk. The sb sync
+ * used for covering accumulates the in-core counters, so covering
+ * handles this for us.
+ */
+ need_covered = xfs_log_need_covered(mp);
+ if (!need_covered && !xfs_sb_version_haslazysbcount(&mp->m_sb))
+ return 0;
+
+ /*
+ * To cover the log, commit the superblock twice (at most) in
+ * independent checkpoints. The first serves as a reference for the
+ * tail pointer. The sync transaction and AIL push empties the AIL and
+ * updates the in-core tail to the LSN of the first checkpoint. The
+ * second commit updates the on-disk tail with the in-core LSN,
+ * covering the log. Push the AIL one more time to leave it empty, as
+ * we found it.
+ */
+ do {
+ error = xfs_sync_sb(mp, true);
+ if (error)
+ break;
+ xfs_ail_push_all_sync(mp->m_ail);
+ } while (xfs_log_need_covered(mp));
+
+ return error;
+}
+
+/*
* We may be holding the log iclog lock upon entering this routine.
*/
xfs_lsn_t
@@ -1259,7 +1335,7 @@ xfs_log_worker(
struct xfs_mount *mp = log->l_mp;
/* dgc: errors ignored - not fatal and nowhere to report them */
- if (xfs_log_need_covered(mp)) {
+ if (xfs_fs_writable(mp, SB_FREEZE_WRITE) && xfs_log_need_covered(mp)) {
/*
* Dump a transaction into the log that contains no real change.
* This is needed to stamp the current tail LSN into the log
@@ -1416,8 +1492,9 @@ xlog_alloc_log(
log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
- WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI, 0,
- mp->m_super->s_id);
+ XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM |
+ WQ_HIGHPRI),
+ 0, mp->m_super->s_id);
if (!log->l_ioend_workqueue)
goto out_free_iclog;
@@ -2538,12 +2615,15 @@ xlog_covered_state(
int iclogs_changed)
{
/*
- * We usually go to NEED. But we go to NEED2 if the changed indicates we
- * are done writing the dummy record. If we are done with the second
- * dummy recored (DONE2), then we go to IDLE.
+ * We go to NEED for any non-covering writes. We go to NEED2 if we just
+ * wrote the first covering record (DONE). We go to IDLE if we just
+ * wrote the second covering record (DONE2) and remain in IDLE until a
+ * non-covering write occurs.
*/
switch (prev_state) {
case XLOG_STATE_COVER_IDLE:
+ if (iclogs_changed == 1)
+ return XLOG_STATE_COVER_IDLE;
case XLOG_STATE_COVER_NEED:
case XLOG_STATE_COVER_NEED2:
break;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 58c3fcbec94a..044e02cb8921 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -127,6 +127,7 @@ int xfs_log_reserve(struct xfs_mount *mp,
int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
void xfs_log_unmount(struct xfs_mount *mp);
int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
+bool xfs_log_writable(struct xfs_mount *mp);
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
void xfs_log_ticket_put(struct xlog_ticket *ticket);
@@ -137,7 +138,8 @@ void xlog_cil_process_committed(struct list_head *list);
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
void xfs_log_work_queue(struct xfs_mount *mp);
-void xfs_log_quiesce(struct xfs_mount *mp);
+int xfs_log_quiesce(struct xfs_mount *mp);
+void xfs_log_clean(struct xfs_mount *mp);
bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
bool xfs_log_in_recovery(struct xfs_mount *);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 7110507a2b6b..1c97b155a8ee 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -126,6 +126,7 @@ __xfs_free_perag(
{
struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
+ ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
ASSERT(atomic_read(&pag->pag_ref) == 0);
kmem_free(pag);
}
@@ -146,6 +147,7 @@ xfs_free_perag(
spin_unlock(&mp->m_perag_lock);
ASSERT(pag);
ASSERT(atomic_read(&pag->pag_ref) == 0);
+ cancel_delayed_work_sync(&pag->pag_blockgc_work);
xfs_iunlink_destroy(pag);
xfs_buf_hash_destroy(pag);
call_rcu(&pag->rcu_head, __xfs_free_perag);
@@ -201,6 +203,7 @@ xfs_initialize_perag(
pag->pag_agno = index;
pag->pag_mount = mp;
spin_lock_init(&pag->pag_ici_lock);
+ INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
error = xfs_buf_hash_init(pag);
@@ -632,6 +635,47 @@ xfs_check_summary_counts(
}
/*
+ * Flush and reclaim dirty inodes in preparation for unmount. Inodes and
+ * internal inode structures can be sitting in the CIL and AIL at this point,
+ * so we need to unpin them, write them back and/or reclaim them before unmount
+ * can proceed.
+ *
+ * An inode cluster that has been freed can have its buffer still pinned in
+ * memory because the transaction is still sitting in a iclog. The stale inodes
+ * on that buffer will be pinned to the buffer until the transaction hits the
+ * disk and the callbacks run. Pushing the AIL will skip the stale inodes and
+ * may never see the pinned buffer, so nothing will push out the iclog and
+ * unpin the buffer.
+ *
+ * Hence we need to force the log to unpin everything first. However, log
+ * forces don't wait for the discards they issue to complete, so we have to
+ * explicitly wait for them to complete here as well.
+ *
+ * Then we can tell the world we are unmounting so that error handling knows
+ * that the filesystem is going away and we should error out anything that we
+ * have been retrying in the background. This will prevent never-ending
+ * retries in AIL pushing from hanging the unmount.
+ *
+ * Finally, we can push the AIL to clean all the remaining dirty objects, then
+ * reclaim the remaining inodes that are still in memory at this point in time.
+ */
+static void
+xfs_unmount_flush_inodes(
+ struct xfs_mount *mp)
+{
+ xfs_log_force(mp, XFS_LOG_SYNC);
+ xfs_extent_busy_wait_all(mp);
+ flush_workqueue(xfs_discard_wq);
+
+ mp->m_flags |= XFS_MOUNT_UNMOUNTING;
+
+ xfs_ail_push_all_sync(mp->m_ail);
+ cancel_delayed_work_sync(&mp->m_reclaim_work);
+ xfs_reclaim_inodes(mp);
+ xfs_health_unmount(mp);
+}
+
+/*
* This function does the following on an initial mount of a file system:
* - reads the superblock from disk and init the mount struct
* - if we're a 32-bit kernel, do a size check on the superblock
@@ -946,7 +990,7 @@ xfs_mountfs(
*/
if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
XFS_MOUNT_RDONLY) {
- xfs_quiesce_attr(mp);
+ xfs_log_clean(mp);
}
/*
@@ -1005,7 +1049,7 @@ xfs_mountfs(
/* Clean out dquots that might be in memory after quotacheck. */
xfs_qm_unmount(mp);
/*
- * Cancel all delayed reclaim work and reclaim the inodes directly.
+ * Flush all inode reclamation work and flush the log.
* We have to do this /after/ rtunmount and qm_unmount because those
* two will have scheduled delayed reclaim for the rt/quota inodes.
*
@@ -1015,16 +1059,13 @@ xfs_mountfs(
* qm_unmount_quotas and therefore rely on qm_unmount to release the
* quota inodes.
*/
- cancel_delayed_work_sync(&mp->m_reclaim_work);
- xfs_reclaim_inodes(mp);
- xfs_health_unmount(mp);
+ xfs_unmount_flush_inodes(mp);
out_log_dealloc:
- mp->m_flags |= XFS_MOUNT_UNMOUNTING;
xfs_log_mount_cancel(mp);
out_fail_wait:
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
- xfs_wait_buftarg(mp->m_logdev_targp);
- xfs_wait_buftarg(mp->m_ddev_targp);
+ xfs_buftarg_drain(mp->m_logdev_targp);
+ xfs_buftarg_drain(mp->m_ddev_targp);
out_free_perag:
xfs_free_perag(mp);
out_free_dir:
@@ -1054,53 +1095,13 @@ xfs_unmountfs(
uint64_t resblks;
int error;
- xfs_stop_block_reaping(mp);
+ xfs_blockgc_stop(mp);
xfs_fs_unreserve_ag_blocks(mp);
xfs_qm_unmount_quotas(mp);
xfs_rtunmount_inodes(mp);
xfs_irele(mp->m_rootip);
- /*
- * We can potentially deadlock here if we have an inode cluster
- * that has been freed has its buffer still pinned in memory because
- * the transaction is still sitting in a iclog. The stale inodes
- * on that buffer will be pinned to the buffer until the
- * transaction hits the disk and the callbacks run. Pushing the AIL will
- * skip the stale inodes and may never see the pinned buffer, so
- * nothing will push out the iclog and unpin the buffer. Hence we
- * need to force the log here to ensure all items are flushed into the
- * AIL before we go any further.
- */
- xfs_log_force(mp, XFS_LOG_SYNC);
-
- /*
- * Wait for all busy extents to be freed, including completion of
- * any discard operation.
- */
- xfs_extent_busy_wait_all(mp);
- flush_workqueue(xfs_discard_wq);
-
- /*
- * We now need to tell the world we are unmounting. This will allow
- * us to detect that the filesystem is going away and we should error
- * out anything that we have been retrying in the background. This will
- * prevent neverending retries in AIL pushing from hanging the unmount.
- */
- mp->m_flags |= XFS_MOUNT_UNMOUNTING;
-
- /*
- * Flush all pending changes from the AIL.
- */
- xfs_ail_push_all_sync(mp->m_ail);
-
- /*
- * Reclaim all inodes. At this point there should be no dirty inodes and
- * none should be pinned or locked. Stop background inode reclaim here
- * if it is still running.
- */
- cancel_delayed_work_sync(&mp->m_reclaim_work);
- xfs_reclaim_inodes(mp);
- xfs_health_unmount(mp);
+ xfs_unmount_flush_inodes(mp);
xfs_qm_unmount(mp);
@@ -1124,12 +1125,6 @@ xfs_unmountfs(
xfs_warn(mp, "Unable to free reserved block pool. "
"Freespace may not be correct on next mount.");
- error = xfs_log_sbcount(mp);
- if (error)
- xfs_warn(mp, "Unable to update superblock counters. "
- "Freespace may not be correct on next mount.");
-
-
xfs_log_unmount(mp);
xfs_da_unmount(mp);
xfs_uuid_unmount(mp);
@@ -1165,32 +1160,6 @@ xfs_fs_writable(
}
/*
- * xfs_log_sbcount
- *
- * Sync the superblock counters to disk.
- *
- * Note this code can be called during the process of freezing, so we use the
- * transaction allocator that does not block when the transaction subsystem is
- * in its frozen state.
- */
-int
-xfs_log_sbcount(xfs_mount_t *mp)
-{
- /* allow this to proceed during the freeze sequence... */
- if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
- return 0;
-
- /*
- * we don't need to do this if we are updating the superblock
- * counters on every modification.
- */
- if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
- return 0;
-
- return xfs_sync_sb(mp, true);
-}
-
-/*
* Deltas for the block count can vary from 1 to very large, but lock contention
* only occurs on frequent small block count updates such as in the delayed
* allocation path for buffered writes (page a time updates). Hence we set
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index dfa429b77ee2..659ad95fe3e0 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -93,7 +93,7 @@ typedef struct xfs_mount {
struct workqueue_struct *m_unwritten_workqueue;
struct workqueue_struct *m_cil_workqueue;
struct workqueue_struct *m_reclaim_workqueue;
- struct workqueue_struct *m_eofblocks_workqueue;
+ struct workqueue_struct *m_blockgc_workqueue;
struct workqueue_struct *m_sync_workqueue;
int m_bsize; /* fs logical block size */
@@ -177,10 +177,6 @@ typedef struct xfs_mount {
uint64_t m_resblks_avail;/* available reserved blocks */
uint64_t m_resblks_save; /* reserved blks @ remount,ro */
struct delayed_work m_reclaim_work; /* background inode reclaim */
- struct delayed_work m_eofblocks_work; /* background eof blocks
- trimming */
- struct delayed_work m_cowblocks_work; /* background cow blocks
- trimming */
struct xfs_kobj m_kobj;
struct xfs_kobj m_error_kobj;
struct xfs_kobj m_error_meta_kobj;
@@ -369,6 +365,9 @@ typedef struct xfs_perag {
/* Blocks reserved for the reverse mapping btree. */
struct xfs_ag_resv pag_rmapbt_resv;
+ /* background prealloc block trimming */
+ struct delayed_work pag_blockgc_work;
+
/* reference count */
uint8_t pagf_refcount_level;
@@ -399,7 +398,6 @@ int xfs_buf_hash_init(xfs_perag_t *pag);
void xfs_buf_hash_destroy(xfs_perag_t *pag);
extern void xfs_uuid_table_free(void);
-extern int xfs_log_sbcount(xfs_mount_t *);
extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp);
extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index a06661dac5be..34c3b16f834f 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -294,7 +294,7 @@ int
xfs_mru_cache_init(void)
{
xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 1);
+ XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 1);
if (!xfs_mru_reap_wq)
return -ENOMEM;
return 0;
diff --git a/fs/xfs/xfs_pwork.c b/fs/xfs/xfs_pwork.c
index b03333f1c84a..c283b801cc5d 100644
--- a/fs/xfs/xfs_pwork.c
+++ b/fs/xfs/xfs_pwork.c
@@ -61,16 +61,18 @@ xfs_pwork_init(
struct xfs_mount *mp,
struct xfs_pwork_ctl *pctl,
xfs_pwork_work_fn work_fn,
- const char *tag,
- unsigned int nr_threads)
+ const char *tag)
{
+ unsigned int nr_threads = 0;
+
#ifdef DEBUG
if (xfs_globals.pwork_threads >= 0)
nr_threads = xfs_globals.pwork_threads;
#endif
trace_xfs_pwork_init(mp, nr_threads, current->pid);
- pctl->wq = alloc_workqueue("%s-%d", WQ_FREEZABLE, nr_threads, tag,
+ pctl->wq = alloc_workqueue("%s-%d",
+ WQ_UNBOUND | WQ_SYSFS | WQ_FREEZABLE, nr_threads, tag,
current->pid);
if (!pctl->wq)
return -ENOMEM;
@@ -117,20 +119,3 @@ xfs_pwork_poll(
atomic_read(&pctl->nr_work) == 0, HZ) == 0)
touch_softlockup_watchdog();
}
-
-/*
- * Return the amount of parallelism that the data device can handle, or 0 for
- * no limit.
- */
-unsigned int
-xfs_pwork_guess_datadev_parallelism(
- struct xfs_mount *mp)
-{
- struct xfs_buftarg *btp = mp->m_ddev_targp;
-
- /*
- * For now we'll go with the most conservative setting possible,
- * which is two threads for an SSD and 1 thread everywhere else.
- */
- return blk_queue_nonrot(btp->bt_bdev->bd_disk->queue) ? 2 : 1;
-}
diff --git a/fs/xfs/xfs_pwork.h b/fs/xfs/xfs_pwork.h
index 8133124cf3bb..c0ef81fc85dd 100644
--- a/fs/xfs/xfs_pwork.h
+++ b/fs/xfs/xfs_pwork.h
@@ -51,11 +51,9 @@ xfs_pwork_want_abort(
}
int xfs_pwork_init(struct xfs_mount *mp, struct xfs_pwork_ctl *pctl,
- xfs_pwork_work_fn work_fn, const char *tag,
- unsigned int nr_threads);
+ xfs_pwork_work_fn work_fn, const char *tag);
void xfs_pwork_queue(struct xfs_pwork_ctl *pctl, struct xfs_pwork *pwork);
int xfs_pwork_destroy(struct xfs_pwork_ctl *pctl);
void xfs_pwork_poll(struct xfs_pwork_ctl *pctl);
-unsigned int xfs_pwork_guess_datadev_parallelism(struct xfs_mount *mp);
#endif /* __XFS_PWORK_H__ */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index c134eb4aeaa8..bfa4164990b1 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -787,7 +787,8 @@ xfs_qm_qino_alloc(
return error;
if (need_alloc) {
- error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ipp);
+ error = xfs_dir_ialloc(&init_user_ns, &tp, NULL, S_IFREG, 1, 0,
+ 0, ipp);
if (error) {
xfs_trans_cancel(tp);
return error;
@@ -1786,105 +1787,35 @@ xfs_qm_vop_chown(
xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
/*
- * Take an extra reference, because the inode is going to keep
- * this dquot pointer even after the trans_commit.
+ * Back when we made quota reservations for the chown, we reserved the
+ * ondisk blocks + delalloc blocks with the new dquot. Now that we've
+ * switched the dquots, decrease the new dquot's block reservation
+ * (having already bumped up the real counter) so that we don't have
+ * any reservation to give back when we commit.
*/
- *IO_olddq = xfs_qm_dqhold(newdq);
+ xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
+ -ip->i_delayed_blks);
- return prevdq;
-}
-
-/*
- * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
- */
-int
-xfs_qm_vop_chown_reserve(
- struct xfs_trans *tp,
- struct xfs_inode *ip,
- struct xfs_dquot *udqp,
- struct xfs_dquot *gdqp,
- struct xfs_dquot *pdqp,
- uint flags)
-{
- struct xfs_mount *mp = ip->i_mount;
- uint64_t delblks;
- unsigned int blkflags;
- struct xfs_dquot *udq_unres = NULL;
- struct xfs_dquot *gdq_unres = NULL;
- struct xfs_dquot *pdq_unres = NULL;
- struct xfs_dquot *udq_delblks = NULL;
- struct xfs_dquot *gdq_delblks = NULL;
- struct xfs_dquot *pdq_delblks = NULL;
- int error;
-
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- delblks = ip->i_delayed_blks;
- blkflags = XFS_IS_REALTIME_INODE(ip) ?
- XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
-
- if (XFS_IS_UQUOTA_ON(mp) && udqp &&
- i_uid_read(VFS_I(ip)) != udqp->q_id) {
- udq_delblks = udqp;
- /*
- * If there are delayed allocation blocks, then we have to
- * unreserve those from the old dquot, and add them to the
- * new dquot.
- */
- if (delblks) {
- ASSERT(ip->i_udquot);
- udq_unres = ip->i_udquot;
- }
- }
- if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
- i_gid_read(VFS_I(ip)) != gdqp->q_id) {
- gdq_delblks = gdqp;
- if (delblks) {
- ASSERT(ip->i_gdquot);
- gdq_unres = ip->i_gdquot;
- }
- }
-
- if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
- ip->i_d.di_projid != pdqp->q_id) {
- pdq_delblks = pdqp;
- if (delblks) {
- ASSERT(ip->i_pdquot);
- pdq_unres = ip->i_pdquot;
- }
- }
-
- error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
- udq_delblks, gdq_delblks, pdq_delblks,
- ip->i_d.di_nblocks, 1, flags | blkflags);
- if (error)
- return error;
+ /*
+ * Give the incore reservation for delalloc blocks back to the old
+ * dquot. We don't normally handle delalloc quota reservations
+ * transactionally, so just lock the dquot and subtract from the
+ * reservation. Dirty the transaction because it's too late to turn
+ * back now.
+ */
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ xfs_dqlock(prevdq);
+ ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
+ prevdq->q_blk.reserved -= ip->i_delayed_blks;
+ xfs_dqunlock(prevdq);
/*
- * Do the delayed blks reservations/unreservations now. Since, these
- * are done without the help of a transaction, if a reservation fails
- * its previous reservations won't be automatically undone by trans
- * code. So, we have to do it manually here.
+ * Take an extra reference, because the inode is going to keep
+ * this dquot pointer even after the trans_commit.
*/
- if (delblks) {
- /*
- * Do the reservations first. Unreservation can't fail.
- */
- ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
- ASSERT(udq_unres || gdq_unres || pdq_unres);
- error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
- udq_delblks, gdq_delblks, pdq_delblks,
- (xfs_qcnt_t)delblks, 0, flags | blkflags);
- if (error)
- return error;
- xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
- udq_unres, gdq_unres, pdq_unres,
- -((xfs_qcnt_t)delblks), 0, blkflags);
- }
+ *IO_olddq = xfs_qm_dqhold(newdq);
- return 0;
+ return prevdq;
}
int
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 5a62398940d0..d00d01302545 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -81,11 +81,14 @@ extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *,
uint, int64_t);
extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *);
extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *);
-extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *,
- struct xfs_inode *, int64_t, long, uint);
+int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, struct xfs_inode *ip,
+ int64_t dblocks, int64_t rblocks, bool force);
extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
struct xfs_mount *, struct xfs_dquot *,
struct xfs_dquot *, struct xfs_dquot *, int64_t, long, uint);
+int xfs_trans_reserve_quota_icreate(struct xfs_trans *tp,
+ struct xfs_dquot *udqp, struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp, int64_t dblocks);
extern int xfs_qm_vop_dqalloc(struct xfs_inode *, kuid_t, kgid_t,
prid_t, uint, struct xfs_dquot **, struct xfs_dquot **,
@@ -95,9 +98,6 @@ extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **);
extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *,
struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *);
-extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *,
- struct xfs_dquot *, struct xfs_dquot *,
- struct xfs_dquot *, uint);
extern int xfs_qm_dqattach(struct xfs_inode *);
extern int xfs_qm_dqattach_locked(struct xfs_inode *ip, bool doalloc);
extern void xfs_qm_dqdetach(struct xfs_inode *);
@@ -108,6 +108,11 @@ extern void xfs_qm_mount_quotas(struct xfs_mount *);
extern void xfs_qm_unmount(struct xfs_mount *);
extern void xfs_qm_unmount_quotas(struct xfs_mount *);
+static inline int
+xfs_quota_reserve_blkres(struct xfs_inode *ip, int64_t blocks)
+{
+ return xfs_trans_reserve_quota_nblks(NULL, ip, blocks, 0, false);
+}
#else
static inline int
xfs_qm_vop_dqalloc(struct xfs_inode *ip, kuid_t kuid, kgid_t kgid,
@@ -121,11 +126,12 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, kuid_t kuid, kgid_t kgid,
}
#define xfs_trans_dup_dqinfo(tp, tp2)
#define xfs_trans_free_dqinfo(tp)
-#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
+#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) do { } while (0)
#define xfs_trans_apply_dquot_deltas(tp)
#define xfs_trans_unreserve_and_mod_dquots(tp)
static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
- struct xfs_inode *ip, int64_t nblks, long ninos, uint flags)
+ struct xfs_inode *ip, int64_t dblocks, int64_t rblocks,
+ bool force)
{
return 0;
}
@@ -136,26 +142,39 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
{
return 0;
}
+
+static inline int
+xfs_quota_reserve_blkres(struct xfs_inode *ip, int64_t blocks)
+{
+ return 0;
+}
+
+static inline int
+xfs_trans_reserve_quota_icreate(struct xfs_trans *tp, struct xfs_dquot *udqp,
+ struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, int64_t dblocks)
+{
+ return 0;
+}
+
#define xfs_qm_vop_create_dqattach(tp, ip, u, g, p)
#define xfs_qm_vop_rename_dqattach(it) (0)
#define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
-#define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl) (0)
#define xfs_qm_dqattach(ip) (0)
#define xfs_qm_dqattach_locked(ip, fl) (0)
#define xfs_qm_dqdetach(ip)
-#define xfs_qm_dqrele(d)
-#define xfs_qm_statvfs(ip, s)
+#define xfs_qm_dqrele(d) do { (d) = (d); } while(0)
+#define xfs_qm_statvfs(ip, s) do { } while(0)
#define xfs_qm_newmount(mp, a, b) (0)
#define xfs_qm_mount_quotas(mp)
#define xfs_qm_unmount(mp)
#define xfs_qm_unmount_quotas(mp)
#endif /* CONFIG_XFS_QUOTA */
-#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
- xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags)
-#define xfs_trans_reserve_quota(tp, mp, ud, gd, pd, nb, ni, f) \
- xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \
- f | XFS_QMOPT_RES_REGBLKS)
+static inline int
+xfs_quota_unreserve_blkres(struct xfs_inode *ip, int64_t blocks)
+{
+ return xfs_quota_reserve_blkres(ip, -blocks);
+}
extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 6fa05fb78189..725c7d8e4438 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -376,16 +376,14 @@ xfs_reflink_allocate_cow(
resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
xfs_iunlock(ip, *lockmode);
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
- *lockmode = XFS_ILOCK_EXCL;
- xfs_ilock(ip, *lockmode);
+ *lockmode = 0;
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
+ false, &tp);
if (error)
return error;
- error = xfs_qm_dqattach_locked(ip, false);
- if (error)
- goto out_trans_cancel;
+ *lockmode = XFS_ILOCK_EXCL;
/*
* Check for an overlapping extent again now that we dropped the ilock.
@@ -398,20 +396,13 @@ xfs_reflink_allocate_cow(
goto convert;
}
- error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
- XFS_QMOPT_RES_REGBLKS);
- if (error)
- goto out_trans_cancel;
-
- xfs_trans_ijoin(tp, ip, 0);
-
/* Allocate the entire reservation as unwritten blocks. */
nimaps = 1;
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, cmap,
&nimaps);
if (error)
- goto out_unreserve;
+ goto out_trans_cancel;
xfs_inode_set_cowblocks_tag(ip);
error = xfs_trans_commit(tp);
@@ -436,9 +427,6 @@ convert:
trace_xfs_reflink_convert_cow(ip, cmap);
return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
-out_unreserve:
- xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
- XFS_QMOPT_RES_REGBLKS);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
@@ -508,9 +496,8 @@ xfs_reflink_cancel_cow_blocks(
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
/* Remove the quota reservation */
- error = xfs_trans_reserve_quota_nblks(NULL, ip,
- -(long)del.br_blockcount, 0,
- XFS_QMOPT_RES_REGBLKS);
+ error = xfs_quota_unreserve_blkres(ip,
+ del.br_blockcount);
if (error)
break;
} else {
@@ -628,6 +615,11 @@ xfs_reflink_end_cow_extent(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
+ error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
+ XFS_IEXT_REFLINK_END_COW_CNT);
+ if (error)
+ goto out_cancel;
+
/*
* In case of racing, overlapping AIO writes no COW extents might be
* left by the time I/O completes for the loser of the race. In that
@@ -997,22 +989,47 @@ xfs_reflink_remap_extent(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
xfs_off_t newlen;
- int64_t qres, qdelta;
+ int64_t qdelta = 0;
unsigned int resblks;
+ bool quota_reserved = true;
bool smap_real;
bool dmap_written = xfs_bmap_is_written_extent(dmap);
+ int iext_delta = 0;
int nimaps;
int error;
- /* Start a rolling transaction to switch the mappings */
+ /*
+ * Start a rolling transaction to switch the mappings.
+ *
+ * Adding a written extent to the extent map can cause a bmbt split,
+ * and removing a mapped extent from the extent can cause a bmbt split.
+ * The two operations cannot both cause a split since they operate on
+ * the same index in the bmap btree, so we only need a reservation for
+ * one bmbt split if either thing is happening. However, we haven't
+ * locked the inode yet, so we reserve assuming this is the case.
+ *
+ * The first allocation call tries to reserve enough space to handle
+ * mapping dmap into a sparse part of the file plus the bmbt split. We
+ * haven't locked the inode or read the existing mapping yet, so we do
+ * not know for sure that we need the space. This should succeed most
+ * of the time.
+ *
+ * If the first attempt fails, try again but reserving only enough
+ * space to handle a bmbt split. This is the hard minimum requirement,
+ * and we revisit quota reservations later when we know more about what
+ * we're remapping.
+ */
resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
+ resblks + dmap->br_blockcount, 0, false, &tp);
+ if (error == -EDQUOT || error == -ENOSPC) {
+ quota_reserved = false;
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
+ resblks, 0, false, &tp);
+ }
if (error)
goto out;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
-
/*
* Read what's currently mapped in the destination file into smap.
* If smap isn't a hole, we will have to remove it before we can add
@@ -1060,15 +1077,9 @@ xfs_reflink_remap_extent(
}
/*
- * Compute quota reservation if we think the quota block counter for
+ * Increase quota reservation if we think the quota block counter for
* this file could increase.
*
- * Adding a written extent to the extent map can cause a bmbt split,
- * and removing a mapped extent from the extent can cause a bmbt split.
- * The two operations cannot both cause a split since they operate on
- * the same index in the bmap btree, so we only need a reservation for
- * one bmbt split if either thing is happening.
- *
* If we are mapping a written extent into the file, we need to have
* enough quota block count reservation to handle the blocks in that
* extent. We log only the delta to the quota block counts, so if the
@@ -1081,19 +1092,29 @@ xfs_reflink_remap_extent(
* count. This is suboptimal, but the VFS flushed the dest range
* before we started. That should have removed all the delalloc
* reservations, but we code defensively.
+ *
+ * xfs_trans_alloc_inode above already tried to grab an even larger
+ * quota reservation, and kicked off a blockgc scan if it couldn't.
+ * If we can't get a potentially smaller quota reservation now, we're
+ * done.
*/
- qres = qdelta = 0;
- if (smap_real || dmap_written)
- qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
- if (!smap_real && dmap_written)
- qres += dmap->br_blockcount;
- if (qres > 0) {
- error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0,
- XFS_QMOPT_RES_REGBLKS);
+ if (!quota_reserved && !smap_real && dmap_written) {
+ error = xfs_trans_reserve_quota_nblks(tp, ip,
+ dmap->br_blockcount, 0, false);
if (error)
goto out_cancel;
}
+ if (smap_real)
+ ++iext_delta;
+
+ if (dmap_written)
+ ++iext_delta;
+
+ error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, iext_delta);
+ if (error)
+ goto out_cancel;
+
if (smap_real) {
/*
* If the extent we're unmapping is backed by storage (written
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index b4999fb01ff7..161b0e8992ba 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -804,6 +804,11 @@ xfs_growfs_rt_alloc(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
+ XFS_IEXT_ADD_NOSPLIT_CNT);
+ if (error)
+ goto out_trans_cancel;
+
/*
* Allocate blocks to the bitmap file.
*/
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 813be879a5e5..e5e0713bebcd 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -35,6 +35,7 @@
#include "xfs_refcount_item.h"
#include "xfs_bmap_item.h"
#include "xfs_reflink.h"
+#include "xfs_pwork.h"
#include <linux/magic.h>
#include <linux/fs_context.h>
@@ -342,7 +343,7 @@ void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
- blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
+ blkdev_issue_flush(buftarg->bt_bdev);
}
STATIC void
@@ -495,40 +496,44 @@ xfs_init_mount_workqueues(
struct xfs_mount *mp)
{
mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
+ XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
+ 1, mp->m_super->s_id);
if (!mp->m_buf_workqueue)
goto out;
mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
+ XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
+ 0, mp->m_super->s_id);
if (!mp->m_unwritten_workqueue)
goto out_destroy_buf;
mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
- WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
+ XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
0, mp->m_super->s_id);
if (!mp->m_cil_workqueue)
goto out_destroy_unwritten;
mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
+ XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
+ 0, mp->m_super->s_id);
if (!mp->m_reclaim_workqueue)
goto out_destroy_cil;
- mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
- if (!mp->m_eofblocks_workqueue)
+ mp->m_blockgc_workqueue = alloc_workqueue("xfs-blockgc/%s",
+ WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ 0, mp->m_super->s_id);
+ if (!mp->m_blockgc_workqueue)
goto out_destroy_reclaim;
- mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
- mp->m_super->s_id);
+ mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
+ XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
if (!mp->m_sync_workqueue)
goto out_destroy_eofb;
return 0;
out_destroy_eofb:
- destroy_workqueue(mp->m_eofblocks_workqueue);
+ destroy_workqueue(mp->m_blockgc_workqueue);
out_destroy_reclaim:
destroy_workqueue(mp->m_reclaim_workqueue);
out_destroy_cil:
@@ -546,7 +551,7 @@ xfs_destroy_mount_workqueues(
struct xfs_mount *mp)
{
destroy_workqueue(mp->m_sync_workqueue);
- destroy_workqueue(mp->m_eofblocks_workqueue);
+ destroy_workqueue(mp->m_blockgc_workqueue);
destroy_workqueue(mp->m_reclaim_workqueue);
destroy_workqueue(mp->m_cil_workqueue);
destroy_workqueue(mp->m_unwritten_workqueue);
@@ -868,39 +873,6 @@ xfs_restore_resvblks(struct xfs_mount *mp)
}
/*
- * Trigger writeback of all the dirty metadata in the file system.
- *
- * This ensures that the metadata is written to their location on disk rather
- * than just existing in transactions in the log. This means after a quiesce
- * there is no log replay required to write the inodes to disk - this is the
- * primary difference between a sync and a quiesce.
- *
- * We cancel log work early here to ensure all transactions the log worker may
- * run have finished before we clean up and log the superblock and write an
- * unmount record. The unfreeze process is responsible for restarting the log
- * worker correctly.
- */
-void
-xfs_quiesce_attr(
- struct xfs_mount *mp)
-{
- int error = 0;
-
- cancel_delayed_work_sync(&mp->m_log->l_work);
-
- /* force the log to unpin objects from the now complete transactions */
- xfs_log_force(mp, XFS_LOG_SYNC);
-
-
- /* Push the superblock and write an unmount record */
- error = xfs_log_sbcount(mp);
- if (error)
- xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
- "Frozen image may not be consistent.");
- xfs_log_quiesce(mp);
-}
-
-/*
* Second stage of a freeze. The data is already frozen so we only
* need to take care of the metadata. Once that's done sync the superblock
* to the log to dirty it in case of a crash while frozen. This ensures that we
@@ -920,10 +892,9 @@ xfs_fs_freeze(
* set a GFP_NOFS context here to avoid recursion deadlocks.
*/
flags = memalloc_nofs_save();
- xfs_stop_block_reaping(mp);
+ xfs_blockgc_stop(mp);
xfs_save_resvblks(mp);
- xfs_quiesce_attr(mp);
- ret = xfs_sync_sb(mp, true);
+ ret = xfs_log_quiesce(mp);
memalloc_nofs_restore(flags);
return ret;
}
@@ -936,7 +907,7 @@ xfs_fs_unfreeze(
xfs_restore_resvblks(mp);
xfs_log_work_queue(mp);
- xfs_start_block_reaping(mp);
+ xfs_blockgc_start(mp);
return 0;
}
@@ -1720,7 +1691,7 @@ xfs_remount_rw(
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return error;
}
- xfs_start_block_reaping(mp);
+ xfs_blockgc_start(mp);
/* Create the per-AG metadata reservation pool .*/
error = xfs_fs_reserve_ag_blocks(mp);
@@ -1740,10 +1711,10 @@ xfs_remount_ro(
* Cancel background eofb scanning so it cannot race with the final
* log force+buftarg wait and deadlock the remount.
*/
- xfs_stop_block_reaping(mp);
+ xfs_blockgc_stop(mp);
/* Get rid of any leftover CoW reservations... */
- error = xfs_icache_free_cowblocks(mp, NULL);
+ error = xfs_blockgc_free_space(mp, NULL);
if (error) {
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return error;
@@ -1765,7 +1736,7 @@ xfs_remount_ro(
*/
xfs_save_resvblks(mp);
- xfs_quiesce_attr(mp);
+ xfs_log_clean(mp);
mp->m_flags |= XFS_MOUNT_RDONLY;
return 0;
@@ -1872,8 +1843,6 @@ static int xfs_init_fs_context(
mutex_init(&mp->m_growlock);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
- INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
- INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
mp->m_kobj.kobject.kset = xfs_kset;
/*
* We don't create the finobt per-ag space reservation until after log
@@ -1912,7 +1881,7 @@ static struct file_system_type xfs_fs_type = {
.init_fs_context = xfs_init_fs_context,
.parameters = xfs_fs_parameters,
.kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("xfs");
@@ -2119,11 +2088,12 @@ xfs_init_workqueues(void)
* max_active value for this workqueue.
*/
xfs_alloc_wq = alloc_workqueue("xfsalloc",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
+ XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
if (!xfs_alloc_wq)
return -ENOMEM;
- xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
+ xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
+ 0);
if (!xfs_discard_wq)
goto out_free_alloc_wq;
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index b552cf6d3379..1ca484b8357f 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -75,6 +75,12 @@ extern void xfs_qm_exit(void);
XFS_ASSERT_FATAL_STRING \
XFS_DBG_STRING /* DBG must be last */
+#ifdef DEBUG
+# define XFS_WQFLAGS(wqflags) (WQ_SYSFS | (wqflags))
+#else
+# define XFS_WQFLAGS(wqflags) (wqflags)
+#endif
+
struct xfs_inode;
struct xfs_mount;
struct xfs_buftarg;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 1f43fd7f3209..7f368b10ded1 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -134,6 +134,7 @@ xfs_readlink(
int
xfs_symlink(
+ struct user_namespace *mnt_userns,
struct xfs_inode *dp,
struct xfs_name *link_name,
const char *target_path,
@@ -181,7 +182,8 @@ xfs_symlink(
/*
* Make sure that we have allocated dquot(s) on disk.
*/
- error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
+ error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),
+ fsgid_into_mnt(mnt_userns), prid,
XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
&udqp, &gdqp, &pdqp);
if (error)
@@ -197,9 +199,10 @@ xfs_symlink(
fs_blocks = xfs_symlink_blocks(mp, pathlen);
resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
+ error = xfs_trans_alloc_icreate(mp, &M_RES(mp)->tr_symlink, udqp, gdqp,
+ pdqp, resblks, &tp);
if (error)
- goto out_release_inode;
+ goto out_release_dquots;
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
unlock_dp_on_error = true;
@@ -212,19 +215,16 @@ xfs_symlink(
goto out_trans_cancel;
}
- /*
- * Reserve disk quota : blocks and inode.
- */
- error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
- pdqp, resblks, 1, 0);
+ error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
+ XFS_IEXT_DIR_MANIP_CNT(mp));
if (error)
goto out_trans_cancel;
/*
* Allocate an inode for the symlink.
*/
- error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
- prid, &ip);
+ error = xfs_dir_ialloc(mnt_userns, &tp, dp, S_IFLNK | (mode & ~S_IFMT),
+ 1, 0, prid, &ip);
if (error)
goto out_trans_cancel;
@@ -300,6 +300,7 @@ xfs_symlink(
}
ASSERT(pathlen == 0);
}
+ i_size_write(VFS_I(ip), ip->i_d.di_size);
/*
* Create the directory entry for the symlink.
@@ -342,7 +343,7 @@ out_release_inode:
xfs_finish_inode_setup(ip);
xfs_irele(ip);
}
-
+out_release_dquots:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
xfs_qm_dqrele(pdqp);
diff --git a/fs/xfs/xfs_symlink.h b/fs/xfs/xfs_symlink.h
index b1fa091427e6..2586b7e393f3 100644
--- a/fs/xfs/xfs_symlink.h
+++ b/fs/xfs/xfs_symlink.h
@@ -7,8 +7,9 @@
/* Kernel only symlink definitions */
-int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
- const char *target_path, umode_t mode, struct xfs_inode **ipp);
+int xfs_symlink(struct user_namespace *mnt_userns, struct xfs_inode *dp,
+ struct xfs_name *link_name, const char *target_path,
+ umode_t mode, struct xfs_inode **ipp);
int xfs_readlink_bmap_ilocked(struct xfs_inode *ip, char *link);
int xfs_readlink(struct xfs_inode *ip, char *link);
int xfs_inactive_symlink(struct xfs_inode *ip);
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index fac9de7ee6d0..546a6cd96729 100644
--- a/fs/xfs/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
@@ -51,7 +51,7 @@ xfs_panic_mask_proc_handler(
#endif /* CONFIG_PROC_FS */
STATIC int
-xfs_deprecate_irix_sgid_inherit_proc_handler(
+xfs_deprecated_dointvec_minmax(
struct ctl_table *ctl,
int write,
void *buffer,
@@ -59,24 +59,8 @@ xfs_deprecate_irix_sgid_inherit_proc_handler(
loff_t *ppos)
{
if (write) {
- printk_once(KERN_WARNING
- "XFS: " "%s sysctl option is deprecated.\n",
- ctl->procname);
- }
- return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
-}
-
-STATIC int
-xfs_deprecate_irix_symlink_mode_proc_handler(
- struct ctl_table *ctl,
- int write,
- void *buffer,
- size_t *lenp,
- loff_t *ppos)
-{
- if (write) {
- printk_once(KERN_WARNING
- "XFS: " "%s sysctl option is deprecated.\n",
+ printk_ratelimited(KERN_WARNING
+ "XFS: %s sysctl option is deprecated.\n",
ctl->procname);
}
return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
@@ -88,7 +72,7 @@ static struct ctl_table xfs_table[] = {
.data = &xfs_params.sgid_inherit.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = xfs_deprecate_irix_sgid_inherit_proc_handler,
+ .proc_handler = xfs_deprecated_dointvec_minmax,
.extra1 = &xfs_params.sgid_inherit.min,
.extra2 = &xfs_params.sgid_inherit.max
},
@@ -97,7 +81,7 @@ static struct ctl_table xfs_table[] = {
.data = &xfs_params.symlink_mode.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = xfs_deprecate_irix_symlink_mode_proc_handler,
+ .proc_handler = xfs_deprecated_dointvec_minmax,
.extra1 = &xfs_params.symlink_mode.min,
.extra2 = &xfs_params.symlink_mode.max
},
@@ -194,21 +178,21 @@ static struct ctl_table xfs_table[] = {
},
{
.procname = "speculative_prealloc_lifetime",
- .data = &xfs_params.eofb_timer.val,
+ .data = &xfs_params.blockgc_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &xfs_params.eofb_timer.min,
- .extra2 = &xfs_params.eofb_timer.max,
+ .extra1 = &xfs_params.blockgc_timer.min,
+ .extra2 = &xfs_params.blockgc_timer.max,
},
{
.procname = "speculative_cow_prealloc_lifetime",
- .data = &xfs_params.cowb_timer.val,
+ .data = &xfs_params.blockgc_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &xfs_params.cowb_timer.min,
- .extra2 = &xfs_params.cowb_timer.max,
+ .proc_handler = xfs_deprecated_dointvec_minmax,
+ .extra1 = &xfs_params.blockgc_timer.min,
+ .extra2 = &xfs_params.blockgc_timer.max,
},
/* please keep this the last entry */
#ifdef CONFIG_PROC_FS
diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h
index 8abf4640f1d5..7692e76ead33 100644
--- a/fs/xfs/xfs_sysctl.h
+++ b/fs/xfs/xfs_sysctl.h
@@ -35,8 +35,7 @@ typedef struct xfs_param {
xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */
xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */
xfs_sysctl_val_t fstrm_timer; /* Filestream dir-AG assoc'n timeout. */
- xfs_sysctl_val_t eofb_timer; /* Interval between eofb scan wakeups */
- xfs_sysctl_val_t cowb_timer; /* Interval between cowb scan wakeups */
+ xfs_sysctl_val_t blockgc_timer; /* Interval between blockgc scans */
} xfs_param_t;
/*
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 120398a37c2a..9b8d703dc9fd 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -29,6 +29,7 @@
#include "xfs_filestream.h"
#include "xfs_fsmap.h"
#include "xfs_btree_staging.h"
+#include "xfs_icache.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 5a263ae3d4f0..e74bbb648f83 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -37,6 +37,7 @@ struct xfs_trans_res;
struct xfs_inobt_rec_incore;
union xfs_btree_ptr;
struct xfs_dqtrx;
+struct xfs_eofblocks;
#define XFS_ATTR_FILTER_FLAGS \
{ XFS_ATTR_ROOT, "ROOT" }, \
@@ -154,10 +155,8 @@ DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_put);
DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
-DEFINE_PERAG_REF_EVENT(xfs_perag_set_eofblocks);
-DEFINE_PERAG_REF_EVENT(xfs_perag_clear_eofblocks);
-DEFINE_PERAG_REF_EVENT(xfs_perag_set_cowblocks);
-DEFINE_PERAG_REF_EVENT(xfs_perag_clear_cowblocks);
+DEFINE_PERAG_REF_EVENT(xfs_perag_set_blockgc);
+DEFINE_PERAG_REF_EVENT(xfs_perag_clear_blockgc);
DECLARE_EVENT_CLASS(xfs_ag_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
@@ -358,7 +357,7 @@ DEFINE_BUF_EVENT(xfs_buf_get_uncached);
DEFINE_BUF_EVENT(xfs_buf_item_relse);
DEFINE_BUF_EVENT(xfs_buf_iodone_async);
DEFINE_BUF_EVENT(xfs_buf_error_relse);
-DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);
+DEFINE_BUF_EVENT(xfs_buf_drain_buftarg);
DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
/* not really buffer traces, but the buf provides useful information */
@@ -1287,8 +1286,8 @@ TRACE_EVENT(xfs_log_assign_tail_lsn,
)
DECLARE_EVENT_CLASS(xfs_file_class,
- TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset),
- TP_ARGS(ip, count, offset),
+ TP_PROTO(struct kiocb *iocb, struct iov_iter *iter),
+ TP_ARGS(iocb, iter),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
@@ -1297,11 +1296,11 @@ DECLARE_EVENT_CLASS(xfs_file_class,
__field(size_t, count)
),
TP_fast_assign(
- __entry->dev = VFS_I(ip)->i_sb->s_dev;
- __entry->ino = ip->i_ino;
- __entry->size = ip->i_d.di_size;
- __entry->offset = offset;
- __entry->count = count;
+ __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
+ __entry->ino = XFS_I(file_inode(iocb->ki_filp))->i_ino;
+ __entry->size = XFS_I(file_inode(iocb->ki_filp))->i_d.di_size;
+ __entry->offset = iocb->ki_pos;
+ __entry->count = iov_iter_count(iter);
),
TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count 0x%zx",
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -1313,14 +1312,16 @@ DECLARE_EVENT_CLASS(xfs_file_class,
#define DEFINE_RW_EVENT(name) \
DEFINE_EVENT(xfs_file_class, name, \
- TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset), \
- TP_ARGS(ip, count, offset))
+ TP_PROTO(struct kiocb *iocb, struct iov_iter *iter), \
+ TP_ARGS(iocb, iter))
DEFINE_RW_EVENT(xfs_file_buffered_read);
DEFINE_RW_EVENT(xfs_file_direct_read);
DEFINE_RW_EVENT(xfs_file_dax_read);
DEFINE_RW_EVENT(xfs_file_buffered_write);
DEFINE_RW_EVENT(xfs_file_direct_write);
DEFINE_RW_EVENT(xfs_file_dax_write);
+DEFINE_RW_EVENT(xfs_reflink_bounce_dio_write);
+
DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
@@ -3294,8 +3295,6 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
-DEFINE_SIMPLE_IO_EVENT(xfs_reflink_bounce_dio_write);
-
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap);
@@ -3888,6 +3887,47 @@ DEFINE_EVENT(xfs_timestamp_range_class, name, \
DEFINE_TIMESTAMP_RANGE_EVENT(xfs_inode_timestamp_range);
DEFINE_TIMESTAMP_RANGE_EVENT(xfs_quota_expiry_range);
+DECLARE_EVENT_CLASS(xfs_eofblocks_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_eofblocks *eofb,
+ unsigned long caller_ip),
+ TP_ARGS(mp, eofb, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(__u32, flags)
+ __field(uint32_t, uid)
+ __field(uint32_t, gid)
+ __field(prid_t, prid)
+ __field(__u64, min_file_size)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->flags = eofb ? eofb->eof_flags : 0;
+ __entry->uid = eofb ? from_kuid(mp->m_super->s_user_ns,
+ eofb->eof_uid) : 0;
+ __entry->gid = eofb ? from_kgid(mp->m_super->s_user_ns,
+ eofb->eof_gid) : 0;
+ __entry->prid = eofb ? eofb->eof_prid : 0;
+ __entry->min_file_size = eofb ? eofb->eof_min_file_size : 0;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu caller %pS",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->flags,
+ __entry->uid,
+ __entry->gid,
+ __entry->prid,
+ __entry->min_file_size,
+ (char *)__entry->caller_ip)
+);
+#define DEFINE_EOFBLOCKS_EVENT(name) \
+DEFINE_EVENT(xfs_eofblocks_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_eofblocks *eofb, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, eofb, caller_ip))
+DEFINE_EOFBLOCKS_EVENT(xfs_ioc_free_eofblocks);
+DEFINE_EOFBLOCKS_EVENT(xfs_blockgc_free_space);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index e72730f85af1..b22a09e9daee 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -20,6 +20,10 @@
#include "xfs_trace.h"
#include "xfs_error.h"
#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_dquot_item.h"
+#include "xfs_dquot.h"
+#include "xfs_icache.h"
kmem_zone_t *xfs_trans_zone;
@@ -68,6 +72,7 @@ xfs_trans_free(
xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
trace_xfs_trans_free(tp, _RET_IP_);
+ xfs_trans_clear_context(tp);
if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
sb_end_intwrite(tp->t_mountp->m_super);
xfs_trans_free_dqinfo(tp);
@@ -119,7 +124,8 @@ xfs_trans_dup(
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
tp->t_rtx_res = tp->t_rtx_res_used;
- ntp->t_pflags = tp->t_pflags;
+
+ xfs_trans_switch_context(tp, ntp);
/* move deferred ops over to the new tp */
xfs_defer_move(ntp, tp);
@@ -153,9 +159,6 @@ xfs_trans_reserve(
int error = 0;
bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
- /* Mark this thread as being in a transaction */
- current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
-
/*
* Attempt to reserve the needed disk blocks by decrementing
* the number needed from the number available. This will
@@ -163,10 +166,8 @@ xfs_trans_reserve(
*/
if (blocks > 0) {
error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
- if (error != 0) {
- current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
+ if (error != 0)
return -ENOSPC;
- }
tp->t_blk_res += blocks;
}
@@ -240,9 +241,6 @@ undo_blocks:
xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
tp->t_blk_res = 0;
}
-
- current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
-
return error;
}
@@ -256,6 +254,7 @@ xfs_trans_alloc(
struct xfs_trans **tpp)
{
struct xfs_trans *tp;
+ bool want_retry = true;
int error;
/*
@@ -263,9 +262,11 @@ xfs_trans_alloc(
* GFP_NOFS allocation context so that we avoid lockdep false positives
* by doing GFP_KERNEL allocations inside sb_start_intwrite().
*/
+retry:
tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
sb_start_intwrite(mp->m_super);
+ xfs_trans_set_context(tp);
/*
* Zero-reservation ("empty") transactions can't modify anything, so
@@ -285,6 +286,22 @@ xfs_trans_alloc(
tp->t_firstblock = NULLFSBLOCK;
error = xfs_trans_reserve(tp, resp, blocks, rtextents);
+ if (error == -ENOSPC && want_retry) {
+ xfs_trans_cancel(tp);
+
+ /*
+ * We weren't able to reserve enough space for the transaction.
+ * Flush the other speculative space allocations to free space.
+ * Do not perform a synchronous scan because callers can hold
+ * other locks.
+ */
+ error = xfs_blockgc_free_space(mp, NULL);
+ if (error)
+ return error;
+
+ want_retry = false;
+ goto retry;
+ }
if (error) {
xfs_trans_cancel(tp);
return error;
@@ -878,7 +895,6 @@ __xfs_trans_commit(
xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
- current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
xfs_trans_free(tp);
/*
@@ -910,7 +926,6 @@ out_unreserve:
xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
tp->t_ticket = NULL;
}
- current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
xfs_trans_free_items(tp, !!error);
xfs_trans_free(tp);
@@ -970,9 +985,6 @@ xfs_trans_cancel(
tp->t_ticket = NULL;
}
- /* mark this thread as no longer being in a transaction */
- current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
-
xfs_trans_free_items(tp, dirty);
xfs_trans_free(tp);
}
@@ -1024,3 +1036,183 @@ xfs_trans_roll(
tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
return xfs_trans_reserve(*tpp, &tres, 0, 0);
}
+
+/*
+ * Allocate an transaction, lock and join the inode to it, and reserve quota.
+ *
+ * The caller must ensure that the on-disk dquots attached to this inode have
+ * already been allocated and initialized. The caller is responsible for
+ * releasing ILOCK_EXCL if a new transaction is returned.
+ */
+int
+xfs_trans_alloc_inode(
+ struct xfs_inode *ip,
+ struct xfs_trans_res *resv,
+ unsigned int dblocks,
+ unsigned int rblocks,
+ bool force,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans *tp;
+ struct xfs_mount *mp = ip->i_mount;
+ bool retried = false;
+ int error;
+
+retry:
+ error = xfs_trans_alloc(mp, resv, dblocks,
+ rblocks / mp->m_sb.sb_rextsize,
+ force ? XFS_TRANS_RESERVE : 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error) {
+ /* Caller should have allocated the dquots! */
+ ASSERT(error != -ENOENT);
+ goto out_cancel;
+ }
+
+ error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force);
+ if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_blockgc_free_quota(ip, 0);
+ retried = true;
+ goto retry;
+ }
+ if (error)
+ goto out_cancel;
+
+ *tpp = tp;
+ return 0;
+
+out_cancel:
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+}
+
+/*
+ * Allocate an transaction in preparation for inode creation by reserving quota
+ * against the given dquots. Callers are not required to hold any inode locks.
+ */
+int
+xfs_trans_alloc_icreate(
+ struct xfs_mount *mp,
+ struct xfs_trans_res *resv,
+ struct xfs_dquot *udqp,
+ struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp,
+ unsigned int dblocks,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans *tp;
+ bool retried = false;
+ int error;
+
+retry:
+ error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp);
+ if (error)
+ return error;
+
+ error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks);
+ if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
+ xfs_trans_cancel(tp);
+ xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
+ retried = true;
+ goto retry;
+ }
+ if (error) {
+ xfs_trans_cancel(tp);
+ return error;
+ }
+
+ *tpp = tp;
+ return 0;
+}
+
+/*
+ * Allocate an transaction, lock and join the inode to it, and reserve quota
+ * in preparation for inode attribute changes that include uid, gid, or prid
+ * changes.
+ *
+ * The caller must ensure that the on-disk dquots attached to this inode have
+ * already been allocated and initialized. The ILOCK will be dropped when the
+ * transaction is committed or cancelled.
+ */
+int
+xfs_trans_alloc_ichange(
+ struct xfs_inode *ip,
+ struct xfs_dquot *new_udqp,
+ struct xfs_dquot *new_gdqp,
+ struct xfs_dquot *new_pdqp,
+ bool force,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans *tp;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dquot *udqp;
+ struct xfs_dquot *gdqp;
+ struct xfs_dquot *pdqp;
+ bool retried = false;
+ int error;
+
+retry:
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error) {
+ /* Caller should have allocated the dquots! */
+ ASSERT(error != -ENOENT);
+ goto out_cancel;
+ }
+
+ /*
+ * For each quota type, skip quota reservations if the inode's dquots
+ * now match the ones that came from the caller, or the caller didn't
+ * pass one in. The inode's dquots can change if we drop the ILOCK to
+ * perform a blockgc scan, so we must preserve the caller's arguments.
+ */
+ udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL;
+ gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL;
+ pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL;
+ if (udqp || gdqp || pdqp) {
+ unsigned int qflags = XFS_QMOPT_RES_REGBLKS;
+
+ if (force)
+ qflags |= XFS_QMOPT_FORCE_RES;
+
+ /*
+ * Reserve enough quota to handle blocks on disk and reserved
+ * for a delayed allocation. We'll actually transfer the
+ * delalloc reservation between dquots at chown time, even
+ * though that part is only semi-transactional.
+ */
+ error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp,
+ pdqp, ip->i_d.di_nblocks + ip->i_delayed_blks,
+ 1, qflags);
+ if ((error == -EDQUOT || error == -ENOSPC) && !retried) {
+ xfs_trans_cancel(tp);
+ xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0);
+ retried = true;
+ goto retry;
+ }
+ if (error)
+ goto out_cancel;
+ }
+
+ *tpp = tp;
+ return 0;
+
+out_cancel:
+ xfs_trans_cancel(tp);
+ return error;
+}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 084658946cc8..9dd745cf77c9 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -268,4 +268,47 @@ xfs_trans_item_relog(
return lip->li_ops->iop_relog(lip, tp);
}
+struct xfs_dquot;
+
+int xfs_trans_alloc_inode(struct xfs_inode *ip, struct xfs_trans_res *resv,
+ unsigned int dblocks, unsigned int rblocks, bool force,
+ struct xfs_trans **tpp);
+int xfs_trans_alloc_icreate(struct xfs_mount *mp, struct xfs_trans_res *resv,
+ struct xfs_dquot *udqp, struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp, unsigned int dblocks,
+ struct xfs_trans **tpp);
+int xfs_trans_alloc_ichange(struct xfs_inode *ip, struct xfs_dquot *udqp,
+ struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, bool force,
+ struct xfs_trans **tpp);
+
+static inline void
+xfs_trans_set_context(
+ struct xfs_trans *tp)
+{
+ ASSERT(current->journal_info == NULL);
+ tp->t_pflags = memalloc_nofs_save();
+ current->journal_info = tp;
+}
+
+static inline void
+xfs_trans_clear_context(
+ struct xfs_trans *tp)
+{
+ if (current->journal_info == tp) {
+ memalloc_nofs_restore(tp->t_pflags);
+ current->journal_info = NULL;
+ }
+}
+
+static inline void
+xfs_trans_switch_context(
+ struct xfs_trans *old_tp,
+ struct xfs_trans *new_tp)
+{
+ ASSERT(current->journal_info == old_tp);
+ new_tp->t_pflags = old_tp->t_pflags;
+ old_tp->t_pflags = 0;
+ current->journal_info = new_tp;
+}
+
#endif /* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 28b8ac701919..48e09ea30ee5 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -16,6 +16,7 @@
#include "xfs_quota.h"
#include "xfs_qm.h"
#include "xfs_trace.h"
+#include "xfs_error.h"
STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
@@ -691,9 +692,11 @@ xfs_trans_dqresv(
nblks);
xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos);
}
- ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
- ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
- ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
+
+ if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) ||
+ XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) ||
+ XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
+ goto error_corrupt;
xfs_dqunlock(dqp);
return 0;
@@ -703,6 +706,10 @@ error_return:
if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
return -ENOSPC;
return -EDQUOT;
+error_corrupt:
+ xfs_dqunlock(dqp);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return -EFSCORRUPTED;
}
@@ -780,28 +787,60 @@ int
xfs_trans_reserve_quota_nblks(
struct xfs_trans *tp,
struct xfs_inode *ip,
- int64_t nblks,
- long ninos,
- uint flags)
+ int64_t dblocks,
+ int64_t rblocks,
+ bool force)
{
struct xfs_mount *mp = ip->i_mount;
+ unsigned int qflags = 0;
+ int error;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
-
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS ||
- (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS);
- /*
- * Reserve nblks against these dquots, with trans as the mediator.
- */
- return xfs_trans_reserve_quota_bydquots(tp, mp,
- ip->i_udquot, ip->i_gdquot,
- ip->i_pdquot,
- nblks, ninos, flags);
+ if (force)
+ qflags |= XFS_QMOPT_FORCE_RES;
+
+ /* Reserve data device quota against the inode's dquots. */
+ error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
+ ip->i_gdquot, ip->i_pdquot, dblocks, 0,
+ XFS_QMOPT_RES_REGBLKS | qflags);
+ if (error)
+ return error;
+
+ /* Do the same but for realtime blocks. */
+ error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
+ ip->i_gdquot, ip->i_pdquot, rblocks, 0,
+ XFS_QMOPT_RES_RTBLKS | qflags);
+ if (error) {
+ xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
+ ip->i_gdquot, ip->i_pdquot, -dblocks, 0,
+ XFS_QMOPT_RES_REGBLKS);
+ return error;
+ }
+
+ return 0;
+}
+
+/* Change the quota reservations for an inode creation activity. */
+int
+xfs_trans_reserve_quota_icreate(
+ struct xfs_trans *tp,
+ struct xfs_dquot *udqp,
+ struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp,
+ int64_t dblocks)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+
+ if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
+ return 0;
+
+ return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp,
+ dblocks, 1, XFS_QMOPT_RES_REGBLKS);
}
/*
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index bca48b308c02..12be32f66dc1 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -38,9 +38,10 @@ xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
}
static int
-xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
- struct inode *inode, const char *name, const void *value,
- size_t size, int flags)
+xfs_xattr_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns, struct dentry *unused,
+ struct inode *inode, const char *name, const void *value,
+ size_t size, int flags)
{
struct xfs_da_args args = {
.dp = XFS_I(inode),
diff --git a/fs/zonefs/Makefile b/fs/zonefs/Makefile
index 75a380aa1ae1..33c1a4f1132e 100644
--- a/fs/zonefs/Makefile
+++ b/fs/zonefs/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -I$(src)
+
obj-$(CONFIG_ZONEFS_FS) += zonefs.o
zonefs-y := super.o
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index bec47f2d074b..049e36c69ed7 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -24,6 +24,9 @@
#include "zonefs.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
static inline int zonefs_zone_mgmt(struct inode *inode,
enum req_opf op)
{
@@ -32,6 +35,7 @@ static inline int zonefs_zone_mgmt(struct inode *inode,
lockdep_assert_held(&zi->i_truncate_mutex);
+ trace_zonefs_zone_mgmt(inode, op);
ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
if (ret) {
@@ -100,6 +104,8 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->bdev = inode->i_sb->s_bdev;
iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+ trace_zonefs_iomap_begin(inode, iomap);
+
return 0;
}
@@ -159,6 +165,21 @@ static int zonefs_writepages(struct address_space *mapping,
return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
}
+static int zonefs_swap_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *span)
+{
+ struct inode *inode = file_inode(swap_file);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+ zonefs_err(inode->i_sb,
+ "swap file: not a conventional zone file\n");
+ return -EINVAL;
+ }
+
+ return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
+}
+
static const struct address_space_operations zonefs_file_aops = {
.readpage = zonefs_readpage,
.readahead = zonefs_readahead,
@@ -171,6 +192,7 @@ static const struct address_space_operations zonefs_file_aops = {
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
.direct_IO = noop_direct_IO,
+ .swap_activate = zonefs_swap_activate,
};
static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
@@ -250,6 +272,9 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
}
inode->i_mode &= ~0222;
return i_size_read(inode);
+ case BLK_ZONE_COND_FULL:
+ /* The write pointer of full zones is invalid. */
+ return zi->i_max_size;
default:
if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
return zi->i_max_size;
@@ -480,7 +505,8 @@ unlock:
return ret;
}
-static int zonefs_inode_setattr(struct dentry *dentry, struct iattr *iattr)
+static int zonefs_inode_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = d_inode(dentry);
int ret;
@@ -488,7 +514,7 @@ static int zonefs_inode_setattr(struct dentry *dentry, struct iattr *iattr)
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
- ret = setattr_prepare(dentry, iattr);
+ ret = setattr_prepare(&init_user_ns, dentry, iattr);
if (ret)
return ret;
@@ -516,7 +542,7 @@ static int zonefs_inode_setattr(struct dentry *dentry, struct iattr *iattr)
return ret;
}
- setattr_copy(inode, iattr);
+ setattr_copy(&init_user_ns, inode, iattr);
return 0;
}
@@ -541,7 +567,7 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
ret = file_write_and_wait_range(file, start, end);
if (!ret)
- ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (ret)
zonefs_io_error(inode, true);
@@ -674,11 +700,11 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
iov_iter_truncate(from, max);
- nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
+ nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
if (!nr_pages)
return 0;
- bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
+ bio = bio_alloc(GFP_NOFS, nr_pages);
if (!bio)
return -ENOMEM;
@@ -703,6 +729,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
ret = submit_bio_wait(bio);
zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+ trace_zonefs_file_dio_append(inode, size, ret);
out_release:
bio_release_pages(bio, false);
@@ -717,6 +744,68 @@ out_release:
}
/*
+ * Do not exceed the LFS limits nor the file zone size. If pos is under the
+ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
+ */
+static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
+ loff_t count)
+{
+ struct inode *inode = file_inode(file);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ loff_t limit = rlimit(RLIMIT_FSIZE);
+ loff_t max_size = zi->i_max_size;
+
+ if (limit != RLIM_INFINITY) {
+ if (pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+ }
+ count = min(count, limit - pos);
+ }
+
+ if (!(file->f_flags & O_LARGEFILE))
+ max_size = min_t(loff_t, MAX_NON_LFS, max_size);
+
+ if (unlikely(pos >= max_size))
+ return -EFBIG;
+
+ return min(count, max_size - pos);
+}
+
+static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ loff_t count;
+
+ if (IS_SWAPFILE(inode))
+ return -ETXTBSY;
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+ return -EINVAL;
+
+ if (iocb->ki_flags & IOCB_APPEND) {
+ if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+ return -EINVAL;
+ mutex_lock(&zi->i_truncate_mutex);
+ iocb->ki_pos = zi->i_wpoffset;
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+ count = zonefs_write_check_limits(file, iocb->ki_pos,
+ iov_iter_count(from));
+ if (count < 0)
+ return count;
+
+ iov_iter_truncate(from, count);
+ return iov_iter_count(from);
+}
+
+/*
* Handle direct writes. For sequential zone files, this is the only possible
* write path. For these files, check that the user is issuing writes
* sequentially from the end of the file. This code assumes that the block layer
@@ -733,8 +822,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
struct super_block *sb = inode->i_sb;
bool sync = is_sync_kiocb(iocb);
bool append = false;
- size_t count;
- ssize_t ret;
+ ssize_t ret, count;
/*
* For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
@@ -752,12 +840,11 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode);
}
- ret = generic_write_checks(iocb, from);
- if (ret <= 0)
+ count = zonefs_write_checks(iocb, from);
+ if (count <= 0) {
+ ret = count;
goto inode_unlock;
-
- iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
- count = iov_iter_count(from);
+ }
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
ret = -EINVAL;
@@ -780,7 +867,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
ret = zonefs_file_dio_append(iocb, from);
else
ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
- &zonefs_write_dio_ops, sync);
+ &zonefs_write_dio_ops, 0);
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
(ret > 0 || ret == -EIOCBQUEUED)) {
if (ret > 0)
@@ -817,12 +904,10 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
inode_lock(inode);
}
- ret = generic_write_checks(iocb, from);
+ ret = zonefs_write_checks(iocb, from);
if (ret <= 0)
goto inode_unlock;
- iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
-
ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
if (ret > 0)
iocb->ki_pos += ret;
@@ -917,7 +1002,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
file_accessed(iocb->ki_filp);
ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
- &zonefs_read_dio_ops, is_sync_kiocb(iocb));
+ &zonefs_read_dio_ops, 0);
} else {
ret = generic_file_read_iter(iocb, to);
if (ret == -EIO)
@@ -955,9 +1040,7 @@ static int zonefs_open_zone(struct inode *inode)
mutex_lock(&zi->i_truncate_mutex);
- zi->i_wr_refcnt++;
- if (zi->i_wr_refcnt == 1) {
-
+ if (!zi->i_wr_refcnt) {
if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) {
atomic_dec(&sbi->s_open_zones);
ret = -EBUSY;
@@ -967,7 +1050,6 @@ static int zonefs_open_zone(struct inode *inode)
if (i_size_read(inode) < zi->i_max_size) {
ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
if (ret) {
- zi->i_wr_refcnt--;
atomic_dec(&sbi->s_open_zones);
goto unlock;
}
@@ -975,6 +1057,8 @@ static int zonefs_open_zone(struct inode *inode)
}
}
+ zi->i_wr_refcnt++;
+
unlock:
mutex_unlock(&zi->i_truncate_mutex);
@@ -1223,7 +1307,7 @@ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
struct super_block *sb = parent->i_sb;
inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk) + type + 1;
- inode_init_owner(inode, parent, S_IFDIR | 0555);
+ inode_init_owner(&init_user_ns, inode, parent, S_IFDIR | 0555);
inode->i_op = &zonefs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
set_nlink(inode, 2);
@@ -1581,12 +1665,11 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1;
/*
- * The block size is set to the device physical sector size to ensure
- * that write operations on 512e devices (512B logical block and 4KB
- * physical block) are always aligned to the device physical blocks,
- * as mandated by the ZBC/ZAC specifications.
+ * The block size is set to the device zone write granularity to ensure
+ * that write operations are always aligned according to the device
+ * interface constraints.
*/
- sb_set_blocksize(sb, bdev_physical_block_size(sb->s_bdev));
+ sb_set_blocksize(sb, bdev_zone_write_granularity(sb->s_bdev));
sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
sbi->s_uid = GLOBAL_ROOT_UID;
sbi->s_gid = GLOBAL_ROOT_GID;
diff --git a/fs/zonefs/trace.h b/fs/zonefs/trace.h
new file mode 100644
index 000000000000..f369d7d50303
--- /dev/null
+++ b/fs/zonefs/trace.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zonefs filesystem driver tracepoints.
+ *
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM zonefs
+
+#if !defined(_TRACE_ZONEFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ZONEFS_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include <linux/blkdev.h>
+
+#include "zonefs.h"
+
+#define show_dev(dev) MAJOR(dev), MINOR(dev)
+
+TRACE_EVENT(zonefs_zone_mgmt,
+ TP_PROTO(struct inode *inode, enum req_opf op),
+ TP_ARGS(inode, op),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(int, op)
+ __field(sector_t, sector)
+ __field(sector_t, nr_sectors)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->op = op;
+ __entry->sector = ZONEFS_I(inode)->i_zsector;
+ __entry->nr_sectors =
+ ZONEFS_I(inode)->i_zone_size >> SECTOR_SHIFT;
+ ),
+ TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu",
+ show_dev(__entry->dev), (unsigned long)__entry->ino,
+ blk_op_str(__entry->op), __entry->sector,
+ __entry->nr_sectors
+ )
+);
+
+TRACE_EVENT(zonefs_file_dio_append,
+ TP_PROTO(struct inode *inode, ssize_t size, ssize_t ret),
+ TP_ARGS(inode, size, ret),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(sector_t, sector)
+ __field(ssize_t, size)
+ __field(loff_t, wpoffset)
+ __field(ssize_t, ret)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->sector = ZONEFS_I(inode)->i_zsector;
+ __entry->size = size;
+ __entry->wpoffset = ZONEFS_I(inode)->i_wpoffset;
+ __entry->ret = ret;
+ ),
+ TP_printk("bdev=(%d, %d), ino=%lu, sector=%llu, size=%zu, wpoffset=%llu, ret=%zu",
+ show_dev(__entry->dev), (unsigned long)__entry->ino,
+ __entry->sector, __entry->size, __entry->wpoffset,
+ __entry->ret
+ )
+);
+
+TRACE_EVENT(zonefs_iomap_begin,
+ TP_PROTO(struct inode *inode, struct iomap *iomap),
+ TP_ARGS(inode, iomap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(u64, addr)
+ __field(loff_t, offset)
+ __field(u64, length)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->addr = iomap->addr;
+ __entry->offset = iomap->offset;
+ __entry->length = iomap->length;
+ ),
+ TP_printk("bdev=(%d,%d), ino=%lu, addr=%llu, offset=%llu, length=%llu",
+ show_dev(__entry->dev), (unsigned long)__entry->ino,
+ __entry->addr, __entry->offset, __entry->length
+ )
+);
+
+#endif /* _TRACE_ZONEFS_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>