aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/Makefile4
-rw-r--r--fs/adfs/adfs.h32
-rw-r--r--fs/adfs/dir.c314
-rw-r--r--fs/adfs/dir_f.c302
-rw-r--r--fs/adfs/dir_f.h52
-rw-r--r--fs/adfs/dir_fplus.c346
-rw-r--r--fs/adfs/dir_fplus.h6
-rw-r--r--fs/adfs/inode.c64
-rw-r--r--fs/adfs/map.c247
-rw-r--r--fs/adfs/super.c267
-rw-r--r--fs/afs/cell.c11
-rw-r--r--fs/afs/super.c32
-rw-r--r--fs/aio.c20
-rw-r--r--fs/attr.c23
-rw-r--r--fs/binfmt_elf.c144
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/dev-replace.c5
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/scrub.c33
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/btrfs/zlib.c135
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/cachefiles/rdwr.c27
-rw-r--r--fs/ceph/Makefile2
-rw-r--r--fs/ceph/acl.c4
-rw-r--r--fs/ceph/cache.c4
-rw-r--r--fs/ceph/caps.c3
-rw-r--r--fs/ceph/debugfs.c2
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/file.c28
-rw-r--r--fs/ceph/inode.c47
-rw-r--r--fs/ceph/mds_client.c183
-rw-r--r--fs/ceph/mds_client.h39
-rw-r--r--fs/ceph/mdsmap.c91
-rw-r--r--fs/ceph/super.c111
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/ceph/util.c100
-rw-r--r--fs/ceph/xattr.c7
-rw-r--r--fs/cifs/cifs_debug.c108
-rw-r--r--fs/cifs/cifs_dfs_ref.c97
-rw-r--r--fs/cifs/cifs_ioctl.h6
-rw-r--r--fs/cifs/cifsacl.c38
-rw-r--r--fs/cifs/cifsfs.c8
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h9
-rw-r--r--fs/cifs/cifsproto.h12
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/connect.c10
-rw-r--r--fs/cifs/dfs_cache.c1125
-rw-r--r--fs/cifs/dfs_cache.h2
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/file.c29
-rw-r--r--fs/cifs/inode.c27
-rw-r--r--fs/cifs/ioctl.c18
-rw-r--r--fs/cifs/link.c18
-rw-r--r--fs/cifs/readdir.c3
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/cifs/smb1ops.c19
-rw-r--r--fs/cifs/smb2inode.c9
-rw-r--r--fs/cifs/smb2misc.c2
-rw-r--r--fs/cifs/smb2ops.c352
-rw-r--r--fs/cifs/smb2pdu.c220
-rw-r--r--fs/cifs/smb2pdu.h18
-rw-r--r--fs/cifs/smb2proto.h7
-rw-r--r--fs/cifs/smb2transport.c7
-rw-r--r--fs/cifs/trace.h27
-rw-r--r--fs/cifs/transport.c3
-rw-r--r--fs/cifs/xattr.c128
-rw-r--r--fs/compat_ioctl.c261
-rw-r--r--fs/configfs/inode.c9
-rw-r--r--fs/coredump.c4
-rw-r--r--fs/cramfs/inode.c14
-rw-r--r--fs/crypto/Kconfig22
-rw-r--r--fs/crypto/bio.c114
-rw-r--r--fs/crypto/crypto.c57
-rw-r--r--fs/crypto/fname.c316
-rw-r--r--fs/crypto/fscrypt_private.h58
-rw-r--r--fs/crypto/hkdf.c2
-rw-r--r--fs/crypto/hooks.c47
-rw-r--r--fs/crypto/keyring.c147
-rw-r--r--fs/crypto/keysetup.c102
-rw-r--r--fs/crypto/keysetup_v1.c19
-rw-r--r--fs/crypto/policy.c170
-rw-r--r--fs/dax.c11
-rw-r--r--fs/debugfs/file.c38
-rw-r--r--fs/debugfs/inode.c130
-rw-r--r--fs/dlm/lowcomms.c6
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/keystore.c4
-rw-r--r--fs/ecryptfs/mmap.c16
-rw-r--r--fs/erofs/decompressor.c22
-rw-r--r--fs/erofs/internal.h4
-rw-r--r--fs/erofs/utils.c15
-rw-r--r--fs/erofs/xattr.h17
-rw-r--r--fs/erofs/zdata.c123
-rw-r--r--fs/eventfd.c15
-rw-r--r--fs/eventpoll.c87
-rw-r--r--fs/exec.c6
-rw-r--r--fs/ext2/inode.c5
-rw-r--r--fs/ext2/super.c7
-rw-r--r--fs/ext4/Kconfig9
-rw-r--r--fs/ext4/Makefile3
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/block_validity.c1
-rw-r--r--fs/ext4/dir.c26
-rw-r--r--fs/ext4/ext4.h86
-rw-r--r--fs/ext4/ext4_extents.h5
-rw-r--r--fs/ext4/ext4_jbd2.c25
-rw-r--r--fs/ext4/ext4_jbd2.h22
-rw-r--r--fs/ext4/extents.c205
-rw-r--r--fs/ext4/extents_status.h6
-rw-r--r--fs/ext4/file.c203
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/indirect.c26
-rw-r--r--fs/ext4/inline.c4
-rw-r--r--fs/ext4/inode-test.c4
-rw-r--r--fs/ext4/inode.c67
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/mballoc.c4
-rw-r--r--fs/ext4/mmp.c18
-rw-r--r--fs/ext4/namei.c27
-rw-r--r--fs/ext4/page-io.c19
-rw-r--r--fs/ext4/readpage.c42
-rw-r--r--fs/ext4/resize.c10
-rw-r--r--fs/ext4/super.c150
-rw-r--r--fs/ext4/sysfs.c88
-rw-r--r--fs/ext4/verity.c47
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/f2fs/Kconfig28
-rw-r--r--fs/f2fs/Makefile1
-rw-r--r--fs/f2fs/checkpoint.c6
-rw-r--r--fs/f2fs/compress.c1176
-rw-r--r--fs/f2fs/data.c752
-rw-r--r--fs/f2fs/debug.c88
-rw-r--r--fs/f2fs/dir.c27
-rw-r--r--fs/f2fs/f2fs.h329
-rw-r--r--fs/f2fs/file.c269
-rw-r--r--fs/f2fs/gc.c18
-rw-r--r--fs/f2fs/inline.c44
-rw-r--r--fs/f2fs/inode.c41
-rw-r--r--fs/f2fs/namei.c120
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/f2fs/recovery.c20
-rw-r--r--fs/f2fs/segment.c271
-rw-r--r--fs/f2fs/segment.h19
-rw-r--r--fs/f2fs/super.c182
-rw-r--r--fs/f2fs/sysfs.c158
-rw-r--r--fs/f2fs/verity.c47
-rw-r--r--fs/fat/inode.c3
-rw-r--r--fs/fat/misc.c10
-rw-r--r--fs/file.c28
-rw-r--r--fs/filesystems.c3
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fs_context.c79
-rw-r--r--fs/fs_parser.c447
-rw-r--r--fs/fscache/internal.h2
-rw-r--r--fs/fscache/object-list.c11
-rw-r--r--fs/fscache/proc.c2
-rw-r--r--fs/fsopen.c26
-rw-r--r--fs/fuse/cuse.c4
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/file.c21
-rw-r--r--fs/fuse/inode.c39
-rw-r--r--fs/fuse/readdir.c2
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/file.c72
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/glops.c2
-rw-r--r--fs/gfs2/incore.h6
-rw-r--r--fs/gfs2/inode.c10
-rw-r--r--fs/gfs2/log.c21
-rw-r--r--fs/gfs2/log.h4
-rw-r--r--fs/gfs2/lops.c72
-rw-r--r--fs/gfs2/ops_fstype.c105
-rw-r--r--fs/gfs2/rgrp.c10
-rw-r--r--fs/gfs2/trans.c9
-rw-r--r--fs/hfs/hfs_fs.h28
-rw-r--r--fs/hfs/inode.c4
-rw-r--r--fs/hfsplus/hfsplus_fs.h28
-rw-r--r--fs/hfsplus/inode.c12
-rw-r--r--fs/hostfs/hostfs.h22
-rw-r--r--fs/hostfs/hostfs_kern.c15
-rw-r--r--fs/hugetlbfs/inode.c13
-rw-r--r--fs/inode.c68
-rw-r--r--fs/internal.h14
-rw-r--r--fs/io-wq.c193
-rw-r--r--fs/io-wq.h17
-rw-r--r--fs/io_uring.c2623
-rw-r--r--fs/ioctl.c175
-rw-r--r--fs/iomap/buffered-io.c18
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/commit.c50
-rw-r--r--fs/jbd2/journal.c153
-rw-r--r--fs/jbd2/transaction.c14
-rw-r--r--fs/jffs2/super.c26
-rw-r--r--fs/jfs/jfs_debug.c14
-rw-r--r--fs/jfs/jfs_dmap.c1
-rw-r--r--fs/kernfs/dir.c2
-rw-r--r--fs/kernfs/inode.c6
-rw-r--r--fs/libfs.c70
-rw-r--r--fs/lockd/procfs.c12
-rw-r--r--fs/namei.c216
-rw-r--r--fs/namespace.c49
-rw-r--r--fs/nfs/Kconfig11
-rw-r--r--fs/nfs/Makefile2
-rw-r--r--fs/nfs/callback_xdr.c11
-rw-r--r--fs/nfs/client.c84
-rw-r--r--fs/nfs/delegation.c126
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/dir.c209
-rw-r--r--fs/nfs/direct.c7
-rw-r--r--fs/nfs/dns_resolve.c2
-rw-r--r--fs/nfs/file.c49
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c34
-rw-r--r--fs/nfs/fs_context.c1440
-rw-r--r--fs/nfs/fscache-index.c6
-rw-r--r--fs/nfs/fscache.c20
-rw-r--r--fs/nfs/fscache.h8
-rw-r--r--fs/nfs/getroot.c73
-rw-r--r--fs/nfs/inode.c11
-rw-r--r--fs/nfs/internal.h143
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/namespace.c146
-rw-r--r--fs/nfs/nfs2xdr.c12
-rw-r--r--fs/nfs/nfs3_fs.h2
-rw-r--r--fs/nfs/nfs3client.c6
-rw-r--r--fs/nfs/nfs3proc.c28
-rw-r--r--fs/nfs/nfs3xdr.c5
-rw-r--r--fs/nfs/nfs42proc.c40
-rw-r--r--fs/nfs/nfs4_fs.h19
-rw-r--r--fs/nfs/nfs4client.c99
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfs/nfs4namespace.c298
-rw-r--r--fs/nfs/nfs4proc.c124
-rw-r--r--fs/nfs/nfs4renewd.c5
-rw-r--r--fs/nfs/nfs4state.c7
-rw-r--r--fs/nfs/nfs4super.c257
-rw-r--r--fs/nfs/nfs4trace.c4
-rw-r--r--fs/nfs/nfs4trace.h237
-rw-r--r--fs/nfs/nfs4xdr.c17
-rw-r--r--fs/nfs/nfstrace.h279
-rw-r--r--fs/nfs/pnfs.c4
-rw-r--r--fs/nfs/pnfs.h8
-rw-r--r--fs/nfs/pnfs_nfs.c7
-rw-r--r--fs/nfs/proc.c24
-rw-r--r--fs/nfs/read.c7
-rw-r--r--fs/nfs/super.c1972
-rw-r--r--fs/nfs/write.c32
-rw-r--r--fs/nfsd/Kconfig10
-rw-r--r--fs/nfsd/filecache.c313
-rw-r--r--fs/nfsd/filecache.h7
-rw-r--r--fs/nfsd/netns.h6
-rw-r--r--fs/nfsd/nfs3proc.c5
-rw-r--r--fs/nfsd/nfs3xdr.c36
-rw-r--r--fs/nfsd/nfs4callback.c11
-rw-r--r--fs/nfsd/nfs4layouts.c2
-rw-r--r--fs/nfsd/nfs4proc.c462
-rw-r--r--fs/nfsd/nfs4recover.c8
-rw-r--r--fs/nfsd/nfs4state.c262
-rw-r--r--fs/nfsd/nfs4xdr.c161
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/nfsd/nfsd.h34
-rw-r--r--fs/nfsd/nfsfh.h9
-rw-r--r--fs/nfsd/nfsproc.c8
-rw-r--r--fs/nfsd/nfssvc.c21
-rw-r--r--fs/nfsd/state.h44
-rw-r--r--fs/nfsd/stats.c12
-rw-r--r--fs/nfsd/trace.h22
-rw-r--r--fs/nfsd/vfs.c109
-rw-r--r--fs/nfsd/vfs.h18
-rw-r--r--fs/nfsd/xdr3.h4
-rw-r--r--fs/nfsd/xdr4.h39
-rw-r--r--fs/nsfs.c29
-rw-r--r--fs/ntfs/inode.c18
-rw-r--r--fs/ocfs2/cluster/quorum.c2
-rw-r--r--fs/ocfs2/dlm/Makefile2
-rw-r--r--fs/ocfs2/dlm/dlmast.c8
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c8
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c8
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c8
-rw-r--r--fs/ocfs2/dlm/dlmlock.c8
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c10
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c10
-rw-r--r--fs/ocfs2/dlm/dlmthread.c8
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c8
-rw-r--r--fs/ocfs2/dlmfs/Makefile2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c6
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/file.c14
-rw-r--r--fs/ocfs2/journal.h8
-rw-r--r--fs/ocfs2/namei.c3
-rw-r--r--fs/ocfs2/suballoc.c2
-rw-r--r--fs/open.c146
-rw-r--r--fs/orangefs/orangefs-debugfs.c1
-rw-r--r--fs/overlayfs/copy_up.c43
-rw-r--r--fs/overlayfs/dir.c10
-rw-r--r--fs/overlayfs/export.c28
-rw-r--r--fs/overlayfs/file.c162
-rw-r--r--fs/overlayfs/inode.c66
-rw-r--r--fs/overlayfs/namei.c38
-rw-r--r--fs/overlayfs/overlayfs.h24
-rw-r--r--fs/overlayfs/ovl_entry.h23
-rw-r--r--fs/overlayfs/readdir.c22
-rw-r--r--fs/overlayfs/super.c233
-rw-r--r--fs/overlayfs/util.c28
-rw-r--r--fs/pipe.c67
-rw-r--r--fs/proc/Kconfig4
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/base.c104
-rw-r--r--fs/proc/bootconfig.c89
-rw-r--r--fs/proc/cpuinfo.c12
-rw-r--r--fs/proc/generic.c38
-rw-r--r--fs/proc/inode.c76
-rw-r--r--fs/proc/internal.h5
-rw-r--r--fs/proc/kcore.c13
-rw-r--r--fs/proc/kmsg.c14
-rw-r--r--fs/proc/namespaces.c24
-rw-r--r--fs/proc/page.c54
-rw-r--r--fs/proc/proc_net.c32
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/proc/root.c15
-rw-r--r--fs/proc/stat.c12
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--fs/proc/uptime.c3
-rw-r--r--fs/proc/vmcore.c10
-rw-r--r--fs/quota/quota_v2.c2
-rw-r--r--fs/quota/quotaio_v1.h6
-rw-r--r--fs/ramfs/inode.c11
-rw-r--r--fs/read_write.c66
-rw-r--r--fs/readdir.c79
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/reiserfs/procfs.c1
-rw-r--r--fs/reiserfs/stree.c9
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/reiserfs/xattr.c8
-rw-r--r--fs/splice.c8
-rw-r--r--fs/stack.c6
-rw-r--r--fs/stat.c34
-rw-r--r--fs/sysfs/group.c2
-rw-r--r--fs/timerfd.c3
-rw-r--r--fs/tracefs/inode.c114
-rw-r--r--fs/ubifs/Kconfig1
-rw-r--r--fs/ubifs/dir.c16
-rw-r--r--fs/ubifs/file.c26
-rw-r--r--fs/ubifs/ioctl.c14
-rw-r--r--fs/ubifs/journal.c10
-rw-r--r--fs/ubifs/key.h1
-rw-r--r--fs/ubifs/orphan.c4
-rw-r--r--fs/ubifs/sb.c13
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/ubifs/ubifs.h7
-rw-r--r--fs/udf/ecma_167.h46
-rw-r--r--fs/udf/inode.c6
-rw-r--r--fs/udf/osta_udf.h100
-rw-r--r--fs/udf/super.c40
-rw-r--r--fs/udf/truncate.c2
-rw-r--r--fs/unicode/Makefile2
-rw-r--r--fs/utimes.c4
-rw-r--r--fs/vboxsf/Kconfig10
-rw-r--r--fs/vboxsf/Makefile5
-rw-r--r--fs/vboxsf/dir.c427
-rw-r--r--fs/vboxsf/file.c379
-rw-r--r--fs/vboxsf/shfl_hostintf.h901
-rw-r--r--fs/vboxsf/super.c491
-rw-r--r--fs/vboxsf/utils.c551
-rw-r--r--fs/vboxsf/vboxsf_wrappers.c371
-rw-r--r--fs/vboxsf/vfsmod.h137
-rw-r--r--fs/verity/enable.c67
-rw-r--r--fs/verity/fsverity_private.h17
-rw-r--r--fs/verity/hash_algs.c98
-rw-r--r--fs/verity/open.c5
-rw-r--r--fs/verity/verify.c47
-rw-r--r--fs/xfs/libxfs/xfs_ag.c21
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c51
-rw-r--r--fs/xfs/libxfs/xfs_attr.c14
-rw-r--r--fs/xfs/libxfs/xfs_attr.h15
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h9
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c83
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c25
-rw-r--r--fs/xfs/libxfs/xfs_btree.c47
-rw-r--r--fs/xfs/libxfs/xfs_btree.h21
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c8
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h4
-rw-r--r--fs/xfs/libxfs/xfs_format.h7
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c12
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h19
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c6
-rw-r--r--fs/xfs/libxfs/xfs_sb.c17
-rw-r--r--fs/xfs/scrub/agheader_repair.c4
-rw-r--r--fs/xfs/scrub/fscounters.c3
-rw-r--r--fs/xfs/scrub/repair.c10
-rw-r--r--fs/xfs/scrub/repair.h1
-rw-r--r--fs/xfs/scrub/trace.h6
-rw-r--r--fs/xfs/xfs_acl.c11
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_attr_inactive.c166
-rw-r--r--fs/xfs/xfs_buf.c161
-rw-r--r--fs/xfs/xfs_buf.h33
-rw-r--r--fs/xfs/xfs_buf_item.c47
-rw-r--r--fs/xfs/xfs_buf_item.h1
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_dquot.c14
-rw-r--r--fs/xfs/xfs_file.c7
-rw-r--r--fs/xfs/xfs_filestream.c11
-rw-r--r--fs/xfs/xfs_inode.c37
-rw-r--r--fs/xfs/xfs_ioctl.c20
-rw-r--r--fs/xfs/xfs_ioctl32.c9
-rw-r--r--fs/xfs/xfs_ioctl32.h2
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iops.c6
-rw-r--r--fs/xfs/xfs_log_recover.c36
-rw-r--r--fs/xfs/xfs_ondisk.h1
-rw-r--r--fs/xfs/xfs_qm.h6
-rw-r--r--fs/xfs/xfs_quotaops.c6
-rw-r--r--fs/xfs/xfs_reflink.c11
-rw-r--r--fs/xfs/xfs_reflink.h2
-rw-r--r--fs/xfs/xfs_rtalloc.c8
-rw-r--r--fs/xfs/xfs_super.c59
-rw-r--r--fs/xfs/xfs_symlink.c37
-rw-r--r--fs/xfs/xfs_trace.h4
-rw-r--r--fs/xfs/xfs_trans.h14
-rw-r--r--fs/xfs/xfs_trans_buf.c61
-rw-r--r--fs/xfs/xfs_trans_dquot.c8
-rw-r--r--fs/xfs/xfs_xattr.c14
-rw-r--r--fs/zonefs/Kconfig9
-rw-r--r--fs/zonefs/Makefile4
-rw-r--r--fs/zonefs/super.c1439
-rw-r--r--fs/zonefs/zonefs.h189
435 files changed, 22837 insertions, 9662 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 7b623e9fc1b0..708ba336e689 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -40,6 +40,7 @@ source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
source "fs/f2fs/Kconfig"
+source "fs/zonefs/Kconfig"
config FS_DAX
bool "Direct Access (DAX) support"
@@ -264,6 +265,7 @@ source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/erofs/Kconfig"
+source "fs/vboxsf/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index 1148c555c4d3..505e51166973 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -37,7 +37,7 @@ obj-$(CONFIG_FS_DAX) += dax.o
obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FS_VERITY) += verity/
obj-$(CONFIG_FILE_LOCKING) += locks.o
-obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
+obj-$(CONFIG_COMPAT) += compat.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
@@ -133,3 +133,5 @@ obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
obj-$(CONFIG_EROFS_FS) += erofs/
+obj-$(CONFIG_VBOXSF_FS) += vboxsf/
+obj-$(CONFIG_ZONEFS_FS) += zonefs/
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index b7e844d2f321..699c4fa8b78b 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -26,14 +26,13 @@ static inline u16 adfs_filetype(u32 loadaddr)
#define ADFS_NDA_PUBLIC_READ (1 << 5)
#define ADFS_NDA_PUBLIC_WRITE (1 << 6)
-#include "dir_f.h"
-
/*
* adfs file system inode data in memory
*/
struct adfs_inode_info {
loff_t mmu_private;
__u32 parent_id; /* parent indirect disc address */
+ __u32 indaddr; /* object indirect disc address */
__u32 loadaddr; /* RISC OS load address */
__u32 execaddr; /* RISC OS exec address */
unsigned int attr; /* RISC OS permissions */
@@ -93,15 +92,19 @@ struct adfs_dir {
int nr_buffers;
struct buffer_head *bh[4];
-
- /* big directories need allocated buffers */
- struct buffer_head **bh_fplus;
+ struct buffer_head **bhs;
unsigned int pos;
__u32 parent_id;
- struct adfs_dirheader dirhead;
- union adfs_dirtail dirtail;
+ union {
+ struct adfs_dirheader *dirhead;
+ struct adfs_bigdirheader *bighead;
+ };
+ union {
+ struct adfs_newdirtail *newtail;
+ struct adfs_bigdirtail *bigtail;
+ };
};
/*
@@ -122,13 +125,13 @@ struct object_info {
struct adfs_dir_ops {
int (*read)(struct super_block *sb, unsigned int indaddr,
unsigned int size, struct adfs_dir *dir);
+ int (*iterate)(struct adfs_dir *dir, struct dir_context *ctx);
int (*setpos)(struct adfs_dir *dir, unsigned int fpos);
int (*getnext)(struct adfs_dir *dir, struct object_info *obj);
int (*update)(struct adfs_dir *dir, struct object_info *obj);
int (*create)(struct adfs_dir *dir, struct object_info *obj);
int (*remove)(struct adfs_dir *dir, struct object_info *obj);
- int (*sync)(struct adfs_dir *dir);
- void (*free)(struct adfs_dir *dir);
+ int (*commit)(struct adfs_dir *dir);
};
struct adfs_discmap {
@@ -145,7 +148,9 @@ int adfs_notify_change(struct dentry *dentry, struct iattr *attr);
/* map.c */
int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset);
-extern unsigned int adfs_map_free(struct super_block *sb);
+void adfs_map_statfs(struct super_block *sb, struct kstatfs *buf);
+struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr);
+void adfs_free_map(struct super_block *sb);
/* Misc */
__printf(3, 4)
@@ -167,6 +172,13 @@ extern const struct dentry_operations adfs_dentry_operations;
extern const struct adfs_dir_ops adfs_f_dir_ops;
extern const struct adfs_dir_ops adfs_fplus_dir_ops;
+int adfs_dir_copyfrom(void *dst, struct adfs_dir *dir, unsigned int offset,
+ size_t len);
+int adfs_dir_copyto(struct adfs_dir *dir, unsigned int offset, const void *src,
+ size_t len);
+void adfs_dir_relse(struct adfs_dir *dir);
+int adfs_dir_read_buffers(struct super_block *sb, u32 indaddr,
+ unsigned int size, struct adfs_dir *dir);
void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj);
extern int adfs_dir_update(struct super_block *sb, struct object_info *obj,
int wait);
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index a54c53244992..77fbd196008f 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -6,12 +6,196 @@
*
* Common directory handling for ADFS
*/
+#include <linux/slab.h>
#include "adfs.h"
/*
* For future. This should probably be per-directory.
*/
-static DEFINE_RWLOCK(adfs_dir_lock);
+static DECLARE_RWSEM(adfs_dir_rwsem);
+
+int adfs_dir_copyfrom(void *dst, struct adfs_dir *dir, unsigned int offset,
+ size_t len)
+{
+ struct super_block *sb = dir->sb;
+ unsigned int index, remain;
+
+ index = offset >> sb->s_blocksize_bits;
+ offset &= sb->s_blocksize - 1;
+ remain = sb->s_blocksize - offset;
+ if (index + (remain < len) >= dir->nr_buffers)
+ return -EINVAL;
+
+ if (remain < len) {
+ memcpy(dst, dir->bhs[index]->b_data + offset, remain);
+ dst += remain;
+ len -= remain;
+ index += 1;
+ offset = 0;
+ }
+
+ memcpy(dst, dir->bhs[index]->b_data + offset, len);
+
+ return 0;
+}
+
+int adfs_dir_copyto(struct adfs_dir *dir, unsigned int offset, const void *src,
+ size_t len)
+{
+ struct super_block *sb = dir->sb;
+ unsigned int index, remain;
+
+ index = offset >> sb->s_blocksize_bits;
+ offset &= sb->s_blocksize - 1;
+ remain = sb->s_blocksize - offset;
+ if (index + (remain < len) >= dir->nr_buffers)
+ return -EINVAL;
+
+ if (remain < len) {
+ memcpy(dir->bhs[index]->b_data + offset, src, remain);
+ src += remain;
+ len -= remain;
+ index += 1;
+ offset = 0;
+ }
+
+ memcpy(dir->bhs[index]->b_data + offset, src, len);
+
+ return 0;
+}
+
+static void __adfs_dir_cleanup(struct adfs_dir *dir)
+{
+ dir->nr_buffers = 0;
+
+ if (dir->bhs != dir->bh)
+ kfree(dir->bhs);
+ dir->bhs = NULL;
+ dir->sb = NULL;
+}
+
+void adfs_dir_relse(struct adfs_dir *dir)
+{
+ unsigned int i;
+
+ for (i = 0; i < dir->nr_buffers; i++)
+ brelse(dir->bhs[i]);
+
+ __adfs_dir_cleanup(dir);
+}
+
+static void adfs_dir_forget(struct adfs_dir *dir)
+{
+ unsigned int i;
+
+ for (i = 0; i < dir->nr_buffers; i++)
+ bforget(dir->bhs[i]);
+
+ __adfs_dir_cleanup(dir);
+}
+
+int adfs_dir_read_buffers(struct super_block *sb, u32 indaddr,
+ unsigned int size, struct adfs_dir *dir)
+{
+ struct buffer_head **bhs;
+ unsigned int i, num;
+ int block;
+
+ num = ALIGN(size, sb->s_blocksize) >> sb->s_blocksize_bits;
+ if (num > ARRAY_SIZE(dir->bh)) {
+ /* We only allow one extension */
+ if (dir->bhs != dir->bh)
+ return -EINVAL;
+
+ bhs = kcalloc(num, sizeof(*bhs), GFP_KERNEL);
+ if (!bhs)
+ return -ENOMEM;
+
+ if (dir->nr_buffers)
+ memcpy(bhs, dir->bhs, dir->nr_buffers * sizeof(*bhs));
+
+ dir->bhs = bhs;
+ }
+
+ for (i = dir->nr_buffers; i < num; i++) {
+ block = __adfs_block_map(sb, indaddr, i);
+ if (!block) {
+ adfs_error(sb, "dir %06x has a hole at offset %u",
+ indaddr, i);
+ goto error;
+ }
+
+ dir->bhs[i] = sb_bread(sb, block);
+ if (!dir->bhs[i]) {
+ adfs_error(sb,
+ "dir %06x failed read at offset %u, mapped block 0x%08x",
+ indaddr, i, block);
+ goto error;
+ }
+
+ dir->nr_buffers++;
+ }
+ return 0;
+
+error:
+ adfs_dir_relse(dir);
+
+ return -EIO;
+}
+
+static int adfs_dir_read(struct super_block *sb, u32 indaddr,
+ unsigned int size, struct adfs_dir *dir)
+{
+ dir->sb = sb;
+ dir->bhs = dir->bh;
+ dir->nr_buffers = 0;
+
+ return ADFS_SB(sb)->s_dir->read(sb, indaddr, size, dir);
+}
+
+static int adfs_dir_read_inode(struct super_block *sb, struct inode *inode,
+ struct adfs_dir *dir)
+{
+ int ret;
+
+ ret = adfs_dir_read(sb, ADFS_I(inode)->indaddr, inode->i_size, dir);
+ if (ret)
+ return ret;
+
+ if (ADFS_I(inode)->parent_id != dir->parent_id) {
+ adfs_error(sb,
+ "parent directory id changed under me! (%06x but got %06x)\n",
+ ADFS_I(inode)->parent_id, dir->parent_id);
+ adfs_dir_relse(dir);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void adfs_dir_mark_dirty(struct adfs_dir *dir)
+{
+ unsigned int i;
+
+ /* Mark the buffers dirty */
+ for (i = 0; i < dir->nr_buffers; i++)
+ mark_buffer_dirty(dir->bhs[i]);
+}
+
+static int adfs_dir_sync(struct adfs_dir *dir)
+{
+ int err = 0;
+ int i;
+
+ for (i = dir->nr_buffers - 1; i >= 0; i--) {
+ struct buffer_head *bh = dir->bhs[i];
+ sync_dirty_buffer(bh);
+ if (buffer_req(bh) && !buffer_uptodate(bh))
+ err = -EIO;
+ }
+
+ return err;
+}
void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj)
{
@@ -51,87 +235,90 @@ void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj)
}
}
-static int
-adfs_readdir(struct file *file, struct dir_context *ctx)
+static int adfs_iterate(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
- struct object_info obj;
struct adfs_dir dir;
- int ret = 0;
-
- if (ctx->pos >> 32)
- return 0;
+ int ret;
- ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
+ down_read(&adfs_dir_rwsem);
+ ret = adfs_dir_read_inode(sb, inode, &dir);
if (ret)
- return ret;
+ goto unlock;
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
- goto free_out;
+ goto unlock_relse;
ctx->pos = 1;
}
if (ctx->pos == 1) {
if (!dir_emit(ctx, "..", 2, dir.parent_id, DT_DIR))
- goto free_out;
+ goto unlock_relse;
ctx->pos = 2;
}
- read_lock(&adfs_dir_lock);
+ ret = ops->iterate(&dir, ctx);
- ret = ops->setpos(&dir, ctx->pos - 2);
- if (ret)
- goto unlock_out;
- while (ops->getnext(&dir, &obj) == 0) {
- if (!dir_emit(ctx, obj.name, obj.name_len,
- obj.indaddr, DT_UNKNOWN))
- break;
- ctx->pos++;
- }
-
-unlock_out:
- read_unlock(&adfs_dir_lock);
+unlock_relse:
+ up_read(&adfs_dir_rwsem);
+ adfs_dir_relse(&dir);
+ return ret;
-free_out:
- ops->free(&dir);
+unlock:
+ up_read(&adfs_dir_rwsem);
return ret;
}
int
adfs_dir_update(struct super_block *sb, struct object_info *obj, int wait)
{
- int ret = -EINVAL;
-#ifdef CONFIG_ADFS_FS_RW
const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
struct adfs_dir dir;
+ int ret;
- printk(KERN_INFO "adfs_dir_update: object %06x in dir %06x\n",
- obj->indaddr, obj->parent_id);
+ if (!IS_ENABLED(CONFIG_ADFS_FS_RW))
+ return -EINVAL;
- if (!ops->update) {
- ret = -EINVAL;
- goto out;
- }
+ if (!ops->update)
+ return -EINVAL;
- ret = ops->read(sb, obj->parent_id, 0, &dir);
+ down_write(&adfs_dir_rwsem);
+ ret = adfs_dir_read(sb, obj->parent_id, 0, &dir);
if (ret)
- goto out;
+ goto unlock;
- write_lock(&adfs_dir_lock);
ret = ops->update(&dir, obj);
- write_unlock(&adfs_dir_lock);
+ if (ret)
+ goto forget;
- if (wait) {
- int err = ops->sync(&dir);
- if (!ret)
- ret = err;
- }
+ ret = ops->commit(&dir);
+ if (ret)
+ goto forget;
+ up_write(&adfs_dir_rwsem);
+
+ adfs_dir_mark_dirty(&dir);
+
+ if (wait)
+ ret = adfs_dir_sync(&dir);
+
+ adfs_dir_relse(&dir);
+ return ret;
+
+ /*
+ * If the updated failed because the entry wasn't found, we can
+ * just release the buffers. If it was any other error, forget
+ * the dirtied buffers so they aren't written back to the media.
+ */
+forget:
+ if (ret == -ENOENT)
+ adfs_dir_relse(&dir);
+ else
+ adfs_dir_forget(&dir);
+unlock:
+ up_write(&adfs_dir_rwsem);
- ops->free(&dir);
-out:
-#endif
return ret;
}
@@ -167,25 +354,14 @@ static int adfs_dir_lookup_byname(struct inode *inode, const struct qstr *qstr,
u32 name_len;
int ret;
- ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
+ down_read(&adfs_dir_rwsem);
+ ret = adfs_dir_read_inode(sb, inode, &dir);
if (ret)
- goto out;
-
- if (ADFS_I(inode)->parent_id != dir.parent_id) {
- adfs_error(sb,
- "parent directory changed under me! (%06x but got %06x)\n",
- ADFS_I(inode)->parent_id, dir.parent_id);
- ret = -EIO;
- goto free_out;
- }
-
- obj->parent_id = inode->i_ino;
-
- read_lock(&adfs_dir_lock);
+ goto unlock;
ret = ops->setpos(&dir, 0);
if (ret)
- goto unlock_out;
+ goto unlock_relse;
ret = -ENOENT;
name = qstr->name;
@@ -196,20 +372,22 @@ static int adfs_dir_lookup_byname(struct inode *inode, const struct qstr *qstr,
break;
}
}
+ obj->parent_id = ADFS_I(inode)->indaddr;
-unlock_out:
- read_unlock(&adfs_dir_lock);
+unlock_relse:
+ up_read(&adfs_dir_rwsem);
+ adfs_dir_relse(&dir);
+ return ret;
-free_out:
- ops->free(&dir);
-out:
+unlock:
+ up_read(&adfs_dir_rwsem);
return ret;
}
const struct file_operations adfs_dir_operations = {
.read = generic_read_dir,
.llseek = generic_file_llseek,
- .iterate = adfs_readdir,
+ .iterate_shared = adfs_iterate,
.fsync = generic_file_fsync,
};
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index c1a950c7400a..30d526fecc3f 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -9,8 +9,6 @@
#include "adfs.h"
#include "dir_f.h"
-static void adfs_f_free(struct adfs_dir *dir);
-
/*
* Read an (unaligned) value of length 1..4 bytes
*/
@@ -60,7 +58,7 @@ static inline void adfs_writeval(unsigned char *p, int len, unsigned int val)
#define bufoff(_bh,_idx) \
({ int _buf = _idx >> blocksize_bits; \
int _off = _idx - (_buf << blocksize_bits);\
- (u8 *)(_bh[_buf]->b_data + _off); \
+ (void *)(_bh[_buf]->b_data + _off); \
})
/*
@@ -123,65 +121,49 @@ adfs_dir_checkbyte(const struct adfs_dir *dir)
return (dircheck ^ (dircheck >> 8) ^ (dircheck >> 16) ^ (dircheck >> 24)) & 0xff;
}
-/* Read and check that a directory is valid */
-static int adfs_dir_read(struct super_block *sb, u32 indaddr,
- unsigned int size, struct adfs_dir *dir)
+static int adfs_f_validate(struct adfs_dir *dir)
{
- const unsigned int blocksize_bits = sb->s_blocksize_bits;
- int blk = 0;
-
- /*
- * Directories which are not a multiple of 2048 bytes
- * are considered bad v2 [3.6]
- */
- if (size & 2047)
- goto bad_dir;
-
- size >>= blocksize_bits;
-
- dir->nr_buffers = 0;
- dir->sb = sb;
-
- for (blk = 0; blk < size; blk++) {
- int phys;
+ struct adfs_dirheader *head = dir->dirhead;
+ struct adfs_newdirtail *tail = dir->newtail;
+
+ if (head->startmasseq != tail->endmasseq ||
+ tail->dirlastmask || tail->reserved[0] || tail->reserved[1] ||
+ (memcmp(&head->startname, "Nick", 4) &&
+ memcmp(&head->startname, "Hugo", 4)) ||
+ memcmp(&head->startname, &tail->endname, 4) ||
+ adfs_dir_checkbyte(dir) != tail->dircheckbyte)
+ return -EIO;
- phys = __adfs_block_map(sb, indaddr, blk);
- if (!phys) {
- adfs_error(sb, "dir %06x has a hole at offset %d",
- indaddr, blk);
- goto release_buffers;
- }
+ return 0;
+}
- dir->bh[blk] = sb_bread(sb, phys);
- if (!dir->bh[blk])
- goto release_buffers;
- }
+/* Read and check that a directory is valid */
+static int adfs_f_read(struct super_block *sb, u32 indaddr, unsigned int size,
+ struct adfs_dir *dir)
+{
+ const unsigned int blocksize_bits = sb->s_blocksize_bits;
+ int ret;
- memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead));
- memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail));
+ if (size && size != ADFS_NEWDIR_SIZE)
+ return -EIO;
- if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq ||
- memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4))
- goto bad_dir;
+ ret = adfs_dir_read_buffers(sb, indaddr, ADFS_NEWDIR_SIZE, dir);
+ if (ret)
+ return ret;
- if (memcmp(&dir->dirhead.startname, "Nick", 4) &&
- memcmp(&dir->dirhead.startname, "Hugo", 4))
- goto bad_dir;
+ dir->dirhead = bufoff(dir->bh, 0);
+ dir->newtail = bufoff(dir->bh, 2007);
- if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte)
+ if (adfs_f_validate(dir))
goto bad_dir;
- dir->nr_buffers = blk;
+ dir->parent_id = adfs_readval(dir->newtail->dirparent, 3);
return 0;
bad_dir:
adfs_error(sb, "dir %06x is corrupted", indaddr);
-release_buffers:
- for (blk -= 1; blk >= 0; blk -= 1)
- brelse(dir->bh[blk]);
-
- dir->sb = NULL;
+ adfs_dir_relse(dir);
return -EIO;
}
@@ -232,24 +214,12 @@ adfs_obj2dir(struct adfs_direntry *de, struct object_info *obj)
static int
__adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj)
{
- struct super_block *sb = dir->sb;
struct adfs_direntry de;
- int thissize, buffer, offset;
-
- buffer = pos >> sb->s_blocksize_bits;
-
- if (buffer > dir->nr_buffers)
- return -EINVAL;
-
- offset = pos & (sb->s_blocksize - 1);
- thissize = sb->s_blocksize - offset;
- if (thissize > 26)
- thissize = 26;
+ int ret;
- memcpy(&de, dir->bh[buffer]->b_data + offset, thissize);
- if (thissize != 26)
- memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data,
- 26 - thissize);
+ ret = adfs_dir_copyfrom(&de, dir, pos, 26);
+ if (ret)
+ return ret;
if (!de.dirobname[0])
return -ENOENT;
@@ -260,89 +230,6 @@ __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj)
}
static int
-__adfs_dir_put(struct adfs_dir *dir, int pos, struct object_info *obj)
-{
- struct super_block *sb = dir->sb;
- struct adfs_direntry de;
- int thissize, buffer, offset;
-
- buffer = pos >> sb->s_blocksize_bits;
-
- if (buffer > dir->nr_buffers)
- return -EINVAL;
-
- offset = pos & (sb->s_blocksize - 1);
- thissize = sb->s_blocksize - offset;
- if (thissize > 26)
- thissize = 26;
-
- /*
- * Get the entry in total
- */
- memcpy(&de, dir->bh[buffer]->b_data + offset, thissize);
- if (thissize != 26)
- memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data,
- 26 - thissize);
-
- /*
- * update it
- */
- adfs_obj2dir(&de, obj);
-
- /*
- * Put the new entry back
- */
- memcpy(dir->bh[buffer]->b_data + offset, &de, thissize);
- if (thissize != 26)
- memcpy(dir->bh[buffer + 1]->b_data, ((char *)&de) + thissize,
- 26 - thissize);
-
- return 0;
-}
-
-/*
- * the caller is responsible for holding the necessary
- * locks.
- */
-static int adfs_dir_find_entry(struct adfs_dir *dir, u32 indaddr)
-{
- int pos, ret;
-
- ret = -ENOENT;
-
- for (pos = 5; pos < ADFS_NUM_DIR_ENTRIES * 26 + 5; pos += 26) {
- struct object_info obj;
-
- if (!__adfs_dir_get(dir, pos, &obj))
- break;
-
- if (obj.indaddr == indaddr) {
- ret = pos;
- break;
- }
- }
-
- return ret;
-}
-
-static int adfs_f_read(struct super_block *sb, u32 indaddr, unsigned int size,
- struct adfs_dir *dir)
-{
- int ret;
-
- if (size != ADFS_NEWDIR_SIZE)
- return -EIO;
-
- ret = adfs_dir_read(sb, indaddr, size, dir);
- if (ret)
- adfs_error(sb, "unable to read directory");
- else
- dir->parent_id = adfs_readval(dir->dirtail.new.dirparent, 3);
-
- return ret;
-}
-
-static int
adfs_f_setpos(struct adfs_dir *dir, unsigned int fpos)
{
if (fpos >= ADFS_NUM_DIR_ENTRIES)
@@ -364,99 +251,74 @@ adfs_f_getnext(struct adfs_dir *dir, struct object_info *obj)
return ret;
}
-static int
-adfs_f_update(struct adfs_dir *dir, struct object_info *obj)
+static int adfs_f_iterate(struct adfs_dir *dir, struct dir_context *ctx)
{
- struct super_block *sb = dir->sb;
- int ret, i;
+ struct object_info obj;
+ int pos = 5 + (ctx->pos - 2) * 26;
- ret = adfs_dir_find_entry(dir, obj->indaddr);
- if (ret < 0) {
- adfs_error(dir->sb, "unable to locate entry to update");
- goto out;
+ while (ctx->pos < 2 + ADFS_NUM_DIR_ENTRIES) {
+ if (__adfs_dir_get(dir, pos, &obj))
+ break;
+ if (!dir_emit(ctx, obj.name, obj.name_len,
+ obj.indaddr, DT_UNKNOWN))
+ break;
+ pos += 26;
+ ctx->pos++;
}
+ return 0;
+}
- __adfs_dir_put(dir, ret, obj);
-
- /*
- * Increment directory sequence number
- */
- dir->bh[0]->b_data[0] += 1;
- dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 6] += 1;
-
- ret = adfs_dir_checkbyte(dir);
- /*
- * Update directory check byte
- */
- dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 1] = ret;
-
-#if 1
- {
- const unsigned int blocksize_bits = sb->s_blocksize_bits;
-
- memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead));
- memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail));
+static int adfs_f_update(struct adfs_dir *dir, struct object_info *obj)
+{
+ struct adfs_direntry de;
+ int offset, ret;
- if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq ||
- memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4))
- goto bad_dir;
+ offset = 5 - (int)sizeof(de);
- if (memcmp(&dir->dirhead.startname, "Nick", 4) &&
- memcmp(&dir->dirhead.startname, "Hugo", 4))
- goto bad_dir;
+ do {
+ offset += sizeof(de);
+ ret = adfs_dir_copyfrom(&de, dir, offset, sizeof(de));
+ if (ret) {
+ adfs_error(dir->sb, "error reading directory entry");
+ return -ENOENT;
+ }
+ if (!de.dirobname[0]) {
+ adfs_error(dir->sb, "unable to locate entry to update");
+ return -ENOENT;
+ }
+ } while (adfs_readval(de.dirinddiscadd, 3) != obj->indaddr);
- if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte)
- goto bad_dir;
- }
-#endif
- for (i = dir->nr_buffers - 1; i >= 0; i--)
- mark_buffer_dirty(dir->bh[i]);
+ /* Update the directory entry with the new object state */
+ adfs_obj2dir(&de, obj);
- ret = 0;
-out:
- return ret;
-#if 1
-bad_dir:
- adfs_error(dir->sb, "whoops! I broke a directory!");
- return -EIO;
-#endif
+ /* Write the directory entry back to the directory */
+ return adfs_dir_copyto(dir, offset, &de, 26);
}
-static int
-adfs_f_sync(struct adfs_dir *dir)
+static int adfs_f_commit(struct adfs_dir *dir)
{
- int err = 0;
- int i;
-
- for (i = dir->nr_buffers - 1; i >= 0; i--) {
- struct buffer_head *bh = dir->bh[i];
- sync_dirty_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh))
- err = -EIO;
- }
+ int ret;
- return err;
-}
+ /* Increment directory sequence number */
+ dir->dirhead->startmasseq += 1;
+ dir->newtail->endmasseq += 1;
-static void
-adfs_f_free(struct adfs_dir *dir)
-{
- int i;
+ /* Update directory check byte */
+ dir->newtail->dircheckbyte = adfs_dir_checkbyte(dir);
- for (i = dir->nr_buffers - 1; i >= 0; i--) {
- brelse(dir->bh[i]);
- dir->bh[i] = NULL;
- }
+ /* Make sure the directory still validates correctly */
+ ret = adfs_f_validate(dir);
+ if (ret)
+ adfs_msg(dir->sb, KERN_ERR, "error: update broke directory");
- dir->nr_buffers = 0;
- dir->sb = NULL;
+ return ret;
}
const struct adfs_dir_ops adfs_f_dir_ops = {
.read = adfs_f_read,
+ .iterate = adfs_f_iterate,
.setpos = adfs_f_setpos,
.getnext = adfs_f_getnext,
.update = adfs_f_update,
- .sync = adfs_f_sync,
- .free = adfs_f_free
+ .commit = adfs_f_commit,
};
diff --git a/fs/adfs/dir_f.h b/fs/adfs/dir_f.h
index 5aec332b90f5..a5393e6cf9f4 100644
--- a/fs/adfs/dir_f.h
+++ b/fs/adfs/dir_f.h
@@ -13,9 +13,9 @@
* Directory header
*/
struct adfs_dirheader {
- unsigned char startmasseq;
- unsigned char startname[4];
-};
+ __u8 startmasseq;
+ __u8 startname[4];
+} __attribute__((packed));
#define ADFS_NEWDIR_SIZE 2048
#define ADFS_NUM_DIR_ENTRIES 77
@@ -31,32 +31,36 @@ struct adfs_direntry {
__u8 dirlen[4];
__u8 dirinddiscadd[3];
__u8 newdiratts;
-};
+} __attribute__((packed));
/*
* Directory tail
*/
+struct adfs_olddirtail {
+ __u8 dirlastmask;
+ char dirname[10];
+ __u8 dirparent[3];
+ char dirtitle[19];
+ __u8 reserved[14];
+ __u8 endmasseq;
+ __u8 endname[4];
+ __u8 dircheckbyte;
+} __attribute__((packed));
+
+struct adfs_newdirtail {
+ __u8 dirlastmask;
+ __u8 reserved[2];
+ __u8 dirparent[3];
+ char dirtitle[19];
+ char dirname[10];
+ __u8 endmasseq;
+ __u8 endname[4];
+ __u8 dircheckbyte;
+} __attribute__((packed));
+
union adfs_dirtail {
- struct {
- unsigned char dirlastmask;
- char dirname[10];
- unsigned char dirparent[3];
- char dirtitle[19];
- unsigned char reserved[14];
- unsigned char endmasseq;
- unsigned char endname[4];
- unsigned char dircheckbyte;
- } old;
- struct {
- unsigned char dirlastmask;
- unsigned char reserved[2];
- unsigned char dirparent[3];
- char dirtitle[19];
- char dirname[10];
- unsigned char endmasseq;
- unsigned char endname[4];
- unsigned char dircheckbyte;
- } new;
+ struct adfs_olddirtail old;
+ struct adfs_newdirtail new;
};
#endif
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index d56924c11b17..4a15924014da 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -4,123 +4,163 @@
*
* Copyright (C) 1997-1999 Russell King
*/
-#include <linux/slab.h>
#include "adfs.h"
#include "dir_fplus.h"
-static int
-adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir)
+/* Return the byte offset to directory entry pos */
+static unsigned int adfs_fplus_offset(const struct adfs_bigdirheader *h,
+ unsigned int pos)
{
- struct adfs_bigdirheader *h;
- struct adfs_bigdirtail *t;
- unsigned long block;
- unsigned int blk, size;
- int i, ret = -EIO;
+ return offsetof(struct adfs_bigdirheader, bigdirname) +
+ ALIGN(le32_to_cpu(h->bigdirnamelen), 4) +
+ pos * sizeof(struct adfs_bigdirentry);
+}
- dir->nr_buffers = 0;
+static int adfs_fplus_validate_header(const struct adfs_bigdirheader *h)
+{
+ unsigned int size = le32_to_cpu(h->bigdirsize);
+ unsigned int len;
- /* start off using fixed bh set - only alloc for big dirs */
- dir->bh_fplus = &dir->bh[0];
+ if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
+ h->bigdirversion[2] != 0 ||
+ h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME) ||
+ !size || size & 2047 || size > SZ_4M)
+ return -EIO;
- block = __adfs_block_map(sb, id, 0);
- if (!block) {
- adfs_error(sb, "dir object %X has a hole at offset 0", id);
- goto out;
- }
+ size -= sizeof(struct adfs_bigdirtail) +
+ offsetof(struct adfs_bigdirheader, bigdirname);
- dir->bh_fplus[0] = sb_bread(sb, block);
- if (!dir->bh_fplus[0])
- goto out;
- dir->nr_buffers += 1;
+ /* Check that bigdirnamelen fits within the directory */
+ len = ALIGN(le32_to_cpu(h->bigdirnamelen), 4);
+ if (len > size)
+ return -EIO;
- h = (struct adfs_bigdirheader *)dir->bh_fplus[0]->b_data;
- size = le32_to_cpu(h->bigdirsize);
- if (size != sz) {
- adfs_msg(sb, KERN_WARNING,
- "directory header size %X does not match directory size %X",
- size, sz);
+ size -= len;
+
+ /* Check that bigdirnamesize fits within the directory */
+ len = le32_to_cpu(h->bigdirnamesize);
+ if (len > size)
+ return -EIO;
+
+ size -= len;
+
+ /*
+ * Avoid division, we know that absolute maximum number of entries
+ * can not be so large to cause overflow of the multiplication below.
+ */
+ len = le32_to_cpu(h->bigdirentries);
+ if (len > SZ_4M / sizeof(struct adfs_bigdirentry) ||
+ len * sizeof(struct adfs_bigdirentry) > size)
+ return -EIO;
+
+ return 0;
+}
+
+static int adfs_fplus_validate_tail(const struct adfs_bigdirheader *h,
+ const struct adfs_bigdirtail *t)
+{
+ if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
+ t->bigdirendmasseq != h->startmasseq ||
+ t->reserved[0] != 0 || t->reserved[1] != 0)
+ return -EIO;
+
+ return 0;
+}
+
+static u8 adfs_fplus_checkbyte(struct adfs_dir *dir)
+{
+ struct adfs_bigdirheader *h = dir->bighead;
+ struct adfs_bigdirtail *t = dir->bigtail;
+ unsigned int end, bs, bi, i;
+ __le32 *bp;
+ u32 dircheck;
+
+ end = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries)) +
+ le32_to_cpu(h->bigdirnamesize);
+
+ /* Accumulate the contents of the header, entries and names */
+ for (dircheck = 0, bi = 0; end; bi++) {
+ bp = (void *)dir->bhs[bi]->b_data;
+ bs = dir->bhs[bi]->b_size;
+ if (bs > end)
+ bs = end;
+
+ for (i = 0; i < bs; i += sizeof(u32))
+ dircheck = ror32(dircheck, 13) ^ le32_to_cpup(bp++);
+
+ end -= bs;
}
- if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
- h->bigdirversion[2] != 0 || size & 2047 ||
- h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) {
- adfs_error(sb, "dir %06x has malformed header", id);
+ /* Accumulate the contents of the tail except for the check byte */
+ dircheck = ror32(dircheck, 13) ^ le32_to_cpu(t->bigdirendname);
+ dircheck = ror32(dircheck, 13) ^ t->bigdirendmasseq;
+ dircheck = ror32(dircheck, 13) ^ t->reserved[0];
+ dircheck = ror32(dircheck, 13) ^ t->reserved[1];
+
+ return dircheck ^ dircheck >> 8 ^ dircheck >> 16 ^ dircheck >> 24;
+}
+
+static int adfs_fplus_read(struct super_block *sb, u32 indaddr,
+ unsigned int size, struct adfs_dir *dir)
+{
+ struct adfs_bigdirheader *h;
+ struct adfs_bigdirtail *t;
+ unsigned int dirsize;
+ int ret;
+
+ /* Read first buffer */
+ ret = adfs_dir_read_buffers(sb, indaddr, sb->s_blocksize, dir);
+ if (ret)
+ return ret;
+
+ dir->bighead = h = (void *)dir->bhs[0]->b_data;
+ ret = adfs_fplus_validate_header(h);
+ if (ret) {
+ adfs_error(sb, "dir %06x has malformed header", indaddr);
goto out;
}
- size >>= sb->s_blocksize_bits;
- if (size > ARRAY_SIZE(dir->bh)) {
- /* this directory is too big for fixed bh set, must allocate */
- struct buffer_head **bh_fplus =
- kcalloc(size, sizeof(struct buffer_head *),
- GFP_KERNEL);
- if (!bh_fplus) {
- adfs_msg(sb, KERN_ERR,
- "not enough memory for dir object %X (%d blocks)",
- id, size);
- ret = -ENOMEM;
- goto out;
- }
- dir->bh_fplus = bh_fplus;
- /* copy over the pointer to the block that we've already read */
- dir->bh_fplus[0] = dir->bh[0];
+ dirsize = le32_to_cpu(h->bigdirsize);
+ if (size && dirsize != size) {
+ adfs_msg(sb, KERN_WARNING,
+ "dir %06x header size %X does not match directory size %X",
+ indaddr, dirsize, size);
}
- for (blk = 1; blk < size; blk++) {
- block = __adfs_block_map(sb, id, blk);
- if (!block) {
- adfs_error(sb, "dir object %X has a hole at offset %d", id, blk);
- goto out;
- }
+ /* Read remaining buffers */
+ ret = adfs_dir_read_buffers(sb, indaddr, dirsize, dir);
+ if (ret)
+ return ret;
- dir->bh_fplus[blk] = sb_bread(sb, block);
- if (!dir->bh_fplus[blk]) {
- adfs_error(sb, "dir object %x failed read for offset %d, mapped block %lX",
- id, blk, block);
- goto out;
- }
+ dir->bigtail = t = (struct adfs_bigdirtail *)
+ (dir->bhs[dir->nr_buffers - 1]->b_data + (sb->s_blocksize - 8));
- dir->nr_buffers += 1;
+ ret = adfs_fplus_validate_tail(h, t);
+ if (ret) {
+ adfs_error(sb, "dir %06x has malformed tail", indaddr);
+ goto out;
}
- t = (struct adfs_bigdirtail *)
- (dir->bh_fplus[size - 1]->b_data + (sb->s_blocksize - 8));
-
- if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
- t->bigdirendmasseq != h->startmasseq ||
- t->reserved[0] != 0 || t->reserved[1] != 0) {
- adfs_error(sb, "dir %06x has malformed tail", id);
+ if (adfs_fplus_checkbyte(dir) != t->bigdircheckbyte) {
+ adfs_error(sb, "dir %06x checkbyte mismatch\n", indaddr);
goto out;
}
dir->parent_id = le32_to_cpu(h->bigdirparent);
- dir->sb = sb;
return 0;
out:
- if (dir->bh_fplus) {
- for (i = 0; i < dir->nr_buffers; i++)
- brelse(dir->bh_fplus[i]);
-
- if (&dir->bh[0] != dir->bh_fplus)
- kfree(dir->bh_fplus);
+ adfs_dir_relse(dir);
- dir->bh_fplus = NULL;
- }
-
- dir->nr_buffers = 0;
- dir->sb = NULL;
return ret;
}
static int
adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
{
- struct adfs_bigdirheader *h =
- (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
int ret = -ENOENT;
- if (fpos <= le32_to_cpu(h->bigdirentries)) {
+ if (fpos <= le32_to_cpu(dir->bighead->bigdirentries)) {
dir->pos = fpos;
ret = 0;
}
@@ -128,51 +168,23 @@ adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
return ret;
}
-static void
-dir_memcpy(struct adfs_dir *dir, unsigned int offset, void *to, int len)
-{
- struct super_block *sb = dir->sb;
- unsigned int buffer, partial, remainder;
-
- buffer = offset >> sb->s_blocksize_bits;
- offset &= sb->s_blocksize - 1;
-
- partial = sb->s_blocksize - offset;
-
- if (partial >= len)
- memcpy(to, dir->bh_fplus[buffer]->b_data + offset, len);
- else {
- char *c = (char *)to;
-
- remainder = len - partial;
-
- memcpy(c,
- dir->bh_fplus[buffer]->b_data + offset,
- partial);
-
- memcpy(c + partial,
- dir->bh_fplus[buffer + 1]->b_data,
- remainder);
- }
-}
-
static int
adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
{
- struct adfs_bigdirheader *h =
- (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
+ struct adfs_bigdirheader *h = dir->bighead;
struct adfs_bigdirentry bde;
unsigned int offset;
- int ret = -ENOENT;
+ int ret;
if (dir->pos >= le32_to_cpu(h->bigdirentries))
- goto out;
+ return -ENOENT;
- offset = offsetof(struct adfs_bigdirheader, bigdirname);
- offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
- offset += dir->pos * sizeof(struct adfs_bigdirentry);
+ offset = adfs_fplus_offset(h, dir->pos);
- dir_memcpy(dir, offset, &bde, sizeof(struct adfs_bigdirentry));
+ ret = adfs_dir_copyfrom(&bde, dir, offset,
+ sizeof(struct adfs_bigdirentry));
+ if (ret)
+ return ret;
obj->loadaddr = le32_to_cpu(bde.bigdirload);
obj->execaddr = le32_to_cpu(bde.bigdirexec);
@@ -181,59 +193,95 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
obj->attr = le32_to_cpu(bde.bigdirattr);
obj->name_len = le32_to_cpu(bde.bigdirobnamelen);
- offset = offsetof(struct adfs_bigdirheader, bigdirname);
- offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
- offset += le32_to_cpu(h->bigdirentries) * sizeof(struct adfs_bigdirentry);
+ offset = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries));
offset += le32_to_cpu(bde.bigdirobnameptr);
- dir_memcpy(dir, offset, obj->name, obj->name_len);
+ ret = adfs_dir_copyfrom(obj->name, dir, offset, obj->name_len);
+ if (ret)
+ return ret;
+
adfs_object_fixup(dir, obj);
dir->pos += 1;
- ret = 0;
-out:
- return ret;
+
+ return 0;
}
-static int
-adfs_fplus_sync(struct adfs_dir *dir)
+static int adfs_fplus_iterate(struct adfs_dir *dir, struct dir_context *ctx)
{
- int err = 0;
- int i;
-
- for (i = dir->nr_buffers - 1; i >= 0; i--) {
- struct buffer_head *bh = dir->bh_fplus[i];
- sync_dirty_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh))
- err = -EIO;
+ struct object_info obj;
+
+ if ((ctx->pos - 2) >> 32)
+ return 0;
+
+ if (adfs_fplus_setpos(dir, ctx->pos - 2))
+ return 0;
+
+ while (!adfs_fplus_getnext(dir, &obj)) {
+ if (!dir_emit(ctx, obj.name, obj.name_len,
+ obj.indaddr, DT_UNKNOWN))
+ break;
+ ctx->pos++;
}
- return err;
+ return 0;
}
-static void
-adfs_fplus_free(struct adfs_dir *dir)
+static int adfs_fplus_update(struct adfs_dir *dir, struct object_info *obj)
{
- int i;
+ struct adfs_bigdirheader *h = dir->bighead;
+ struct adfs_bigdirentry bde;
+ int offset, end, ret;
- if (dir->bh_fplus) {
- for (i = 0; i < dir->nr_buffers; i++)
- brelse(dir->bh_fplus[i]);
+ offset = adfs_fplus_offset(h, 0) - sizeof(bde);
+ end = adfs_fplus_offset(h, le32_to_cpu(h->bigdirentries));
- if (&dir->bh[0] != dir->bh_fplus)
- kfree(dir->bh_fplus);
+ do {
+ offset += sizeof(bde);
+ if (offset >= end) {
+ adfs_error(dir->sb, "unable to locate entry to update");
+ return -ENOENT;
+ }
+ ret = adfs_dir_copyfrom(&bde, dir, offset, sizeof(bde));
+ if (ret) {
+ adfs_error(dir->sb, "error reading directory entry");
+ return -ENOENT;
+ }
+ } while (le32_to_cpu(bde.bigdirindaddr) != obj->indaddr);
- dir->bh_fplus = NULL;
- }
+ bde.bigdirload = cpu_to_le32(obj->loadaddr);
+ bde.bigdirexec = cpu_to_le32(obj->execaddr);
+ bde.bigdirlen = cpu_to_le32(obj->size);
+ bde.bigdirindaddr = cpu_to_le32(obj->indaddr);
+ bde.bigdirattr = cpu_to_le32(obj->attr);
+
+ return adfs_dir_copyto(dir, offset, &bde, sizeof(bde));
+}
+
+static int adfs_fplus_commit(struct adfs_dir *dir)
+{
+ int ret;
- dir->nr_buffers = 0;
- dir->sb = NULL;
+ /* Increment directory sequence number */
+ dir->bighead->startmasseq += 1;
+ dir->bigtail->bigdirendmasseq += 1;
+
+ /* Update directory check byte */
+ dir->bigtail->bigdircheckbyte = adfs_fplus_checkbyte(dir);
+
+ /* Make sure the directory still validates correctly */
+ ret = adfs_fplus_validate_header(dir->bighead);
+ if (ret == 0)
+ ret = adfs_fplus_validate_tail(dir->bighead, dir->bigtail);
+
+ return ret;
}
const struct adfs_dir_ops adfs_fplus_dir_ops = {
.read = adfs_fplus_read,
+ .iterate = adfs_fplus_iterate,
.setpos = adfs_fplus_setpos,
.getnext = adfs_fplus_getnext,
- .sync = adfs_fplus_sync,
- .free = adfs_fplus_free
+ .update = adfs_fplus_update,
+ .commit = adfs_fplus_commit,
};
diff --git a/fs/adfs/dir_fplus.h b/fs/adfs/dir_fplus.h
index 4ec0931e36ad..d729b1591e5e 100644
--- a/fs/adfs/dir_fplus.h
+++ b/fs/adfs/dir_fplus.h
@@ -22,7 +22,7 @@ struct adfs_bigdirheader {
__le32 bigdirnamesize;
__le32 bigdirparent;
char bigdirname[1];
-};
+} __attribute__((packed, aligned(4)));
struct adfs_bigdirentry {
__le32 bigdirload;
@@ -32,11 +32,11 @@ struct adfs_bigdirentry {
__le32 bigdirattr;
__le32 bigdirobnamelen;
__le32 bigdirobnameptr;
-};
+} __attribute__((packed, aligned(4)));
struct adfs_bigdirtail {
__le32 bigdirendname;
__u8 bigdirendmasseq;
__u8 reserved[2];
__u8 bigdircheckbyte;
-};
+} __attribute__((packed, aligned(4)));
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 124de75413a5..32620f4a7623 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -20,7 +20,8 @@ adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh,
if (block >= inode->i_blocks)
goto abort_toobig;
- block = __adfs_block_map(inode->i_sb, inode->i_ino, block);
+ block = __adfs_block_map(inode->i_sb, ADFS_I(inode)->indaddr,
+ block);
if (block)
map_bh(bh, inode->i_sb, block);
return 0;
@@ -126,29 +127,29 @@ adfs_atts2mode(struct super_block *sb, struct inode *inode)
* Convert Linux permission to ADFS attribute. We try to do the reverse
* of atts2mode, but there is not a 1:1 translation.
*/
-static int
-adfs_mode2atts(struct super_block *sb, struct inode *inode)
+static int adfs_mode2atts(struct super_block *sb, struct inode *inode,
+ umode_t ia_mode)
{
+ struct adfs_sb_info *asb = ADFS_SB(sb);
umode_t mode;
int attr;
- struct adfs_sb_info *asb = ADFS_SB(sb);
/* FIXME: should we be able to alter a link? */
if (S_ISLNK(inode->i_mode))
return ADFS_I(inode)->attr;
+ /* Directories do not have read/write permissions on the media */
if (S_ISDIR(inode->i_mode))
- attr = ADFS_NDA_DIRECTORY;
- else
- attr = 0;
+ return ADFS_NDA_DIRECTORY;
- mode = inode->i_mode & asb->s_owner_mask;
+ attr = 0;
+ mode = ia_mode & asb->s_owner_mask;
if (mode & S_IRUGO)
attr |= ADFS_NDA_OWNER_READ;
if (mode & S_IWUGO)
attr |= ADFS_NDA_OWNER_WRITE;
- mode = inode->i_mode & asb->s_other_mask;
+ mode = ia_mode & asb->s_other_mask;
mode &= ~asb->s_owner_mask;
if (mode & S_IRUGO)
attr |= ADFS_NDA_PUBLIC_READ;
@@ -158,6 +159,8 @@ adfs_mode2atts(struct super_block *sb, struct inode *inode)
return attr;
}
+static const s64 nsec_unix_epoch_diff_risc_os_epoch = 2208988800000000000LL;
+
/*
* Convert an ADFS time to Unix time. ADFS has a 40-bit centi-second time
* referenced to 1 Jan 1900 (til 2248) so we need to discard 2208988800 seconds
@@ -170,8 +173,6 @@ adfs_adfs2unix_time(struct timespec64 *tv, struct inode *inode)
/* 01 Jan 1970 00:00:00 (Unix epoch) as nanoseconds since
* 01 Jan 1900 00:00:00 (RISC OS epoch)
*/
- static const s64 nsec_unix_epoch_diff_risc_os_epoch =
- 2208988800000000000LL;
s64 nsec;
if (!adfs_inode_is_stamped(inode))
@@ -204,24 +205,23 @@ adfs_adfs2unix_time(struct timespec64 *tv, struct inode *inode)
return;
}
-/*
- * Convert an Unix time to ADFS time. We only do this if the entry has a
- * time/date stamp already.
- */
-static void
-adfs_unix2adfs_time(struct inode *inode, unsigned int secs)
+/* Convert an Unix time to ADFS time for an entry that is already stamped. */
+static void adfs_unix2adfs_time(struct inode *inode,
+ const struct timespec64 *ts)
{
- unsigned int high, low;
+ s64 cs, nsec = timespec64_to_ns(ts);
- if (adfs_inode_is_stamped(inode)) {
- /* convert 32-bit seconds to 40-bit centi-seconds */
- low = (secs & 255) * 100;
- high = (secs / 256) * 100 + (low >> 8) + 0x336e996a;
+ /* convert from Unix to RISC OS epoch */
+ nsec += nsec_unix_epoch_diff_risc_os_epoch;
- ADFS_I(inode)->loadaddr = (high >> 24) |
- (ADFS_I(inode)->loadaddr & ~0xff);
- ADFS_I(inode)->execaddr = (low & 255) | (high << 8);
- }
+ /* convert from nanoseconds to centiseconds */
+ cs = div_s64(nsec, 10000000);
+
+ cs = clamp_t(s64, cs, 0, 0xffffffffff);
+
+ ADFS_I(inode)->loadaddr &= ~0xff;
+ ADFS_I(inode)->loadaddr |= (cs >> 32) & 0xff;
+ ADFS_I(inode)->execaddr = cs;
}
/*
@@ -260,6 +260,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
* for cross-directory renames.
*/
ADFS_I(inode)->parent_id = obj->parent_id;
+ ADFS_I(inode)->indaddr = obj->indaddr;
ADFS_I(inode)->loadaddr = obj->loadaddr;
ADFS_I(inode)->execaddr = obj->execaddr;
ADFS_I(inode)->attr = obj->attr;
@@ -315,10 +316,11 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr)
if (ia_valid & ATTR_SIZE)
truncate_setsize(inode, attr->ia_size);
- if (ia_valid & ATTR_MTIME) {
- inode->i_mtime = attr->ia_mtime;
- adfs_unix2adfs_time(inode, attr->ia_mtime.tv_sec);
+ if (ia_valid & ATTR_MTIME && adfs_inode_is_stamped(inode)) {
+ adfs_unix2adfs_time(inode, &attr->ia_mtime);
+ adfs_adfs2unix_time(&inode->i_mtime, inode);
}
+
/*
* FIXME: should we make these == to i_mtime since we don't
* have the ability to represent them in our filesystem?
@@ -328,7 +330,7 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr)
if (ia_valid & ATTR_CTIME)
inode->i_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
- ADFS_I(inode)->attr = adfs_mode2atts(sb, inode);
+ ADFS_I(inode)->attr = adfs_mode2atts(sb, inode, attr->ia_mode);
inode->i_mode = adfs_atts2mode(sb, inode);
}
@@ -353,7 +355,7 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
struct object_info obj;
int ret;
- obj.indaddr = inode->i_ino;
+ obj.indaddr = ADFS_I(inode)->indaddr;
obj.name_len = 0;
obj.parent_id = ADFS_I(inode)->parent_id;
obj.loadaddr = ADFS_I(inode)->loadaddr;
diff --git a/fs/adfs/map.c b/fs/adfs/map.c
index f44d12cef5be..a81de80c45c1 100644
--- a/fs/adfs/map.c
+++ b/fs/adfs/map.c
@@ -4,6 +4,8 @@
*
* Copyright (C) 1997-2002 Russell King
*/
+#include <linux/slab.h>
+#include <linux/statfs.h>
#include <asm/unaligned.h>
#include "adfs.h"
@@ -66,54 +68,41 @@ static DEFINE_RWLOCK(adfs_map_lock);
static int lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen,
const u32 frag_id, unsigned int *offset)
{
- const unsigned int mapsize = dm->dm_endbit;
+ const unsigned int endbit = dm->dm_endbit;
const u32 idmask = (1 << idlen) - 1;
- unsigned char *map = dm->dm_bh->b_data + 4;
+ unsigned char *map = dm->dm_bh->b_data;
unsigned int start = dm->dm_startbit;
- unsigned int mapptr;
+ unsigned int freelink, fragend;
u32 frag;
+ frag = GET_FRAG_ID(map, 8, idmask & 0x7fff);
+ freelink = frag ? 8 + frag : 0;
+
do {
frag = GET_FRAG_ID(map, start, idmask);
- mapptr = start + idlen;
-
- /*
- * find end of fragment
- */
- {
- __le32 *_map = (__le32 *)map;
- u32 v = le32_to_cpu(_map[mapptr >> 5]) >> (mapptr & 31);
- while (v == 0) {
- mapptr = (mapptr & ~31) + 32;
- if (mapptr >= mapsize)
- goto error;
- v = le32_to_cpu(_map[mapptr >> 5]);
- }
-
- mapptr += 1 + ffz(~v);
+
+ fragend = find_next_bit_le(map, endbit, start + idlen);
+ if (fragend >= endbit)
+ goto error;
+
+ if (start == freelink) {
+ freelink += frag & 0x7fff;
+ } else if (frag == frag_id) {
+ unsigned int length = fragend + 1 - start;
+
+ if (*offset < length)
+ return start + *offset;
+ *offset -= length;
}
- if (frag == frag_id)
- goto found;
-again:
- start = mapptr;
- } while (mapptr < mapsize);
+ start = fragend + 1;
+ } while (start < endbit);
return -1;
error:
printk(KERN_ERR "adfs: oversized fragment 0x%x at 0x%x-0x%x\n",
- frag, start, mapptr);
+ frag, start, fragend);
return -1;
-
-found:
- {
- int length = mapptr - start;
- if (*offset >= length) {
- *offset -= length;
- goto again;
- }
- }
- return start + *offset;
}
/*
@@ -125,12 +114,12 @@ found:
static unsigned int
scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm)
{
- const unsigned int mapsize = dm->dm_endbit + 32;
+ const unsigned int endbit = dm->dm_endbit;
const unsigned int idlen = asb->s_idlen;
const unsigned int frag_idlen = idlen <= 15 ? idlen : 15;
const u32 idmask = (1 << frag_idlen) - 1;
unsigned char *map = dm->dm_bh->b_data;
- unsigned int start = 8, mapptr;
+ unsigned int start = 8, fragend;
u32 frag;
unsigned long total = 0;
@@ -149,29 +138,13 @@ scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm)
do {
start += frag;
- /*
- * get fragment id
- */
frag = GET_FRAG_ID(map, start, idmask);
- mapptr = start + idlen;
-
- /*
- * find end of fragment
- */
- {
- __le32 *_map = (__le32 *)map;
- u32 v = le32_to_cpu(_map[mapptr >> 5]) >> (mapptr & 31);
- while (v == 0) {
- mapptr = (mapptr & ~31) + 32;
- if (mapptr >= mapsize)
- goto error;
- v = le32_to_cpu(_map[mapptr >> 5]);
- }
-
- mapptr += 1 + ffz(~v);
- }
- total += mapptr - start;
+ fragend = find_next_bit_le(map, endbit, start + idlen);
+ if (fragend >= endbit)
+ goto error;
+
+ total += fragend + 1 - start;
} while (frag >= idlen + 1);
if (frag != 0)
@@ -220,10 +193,10 @@ found:
* total_free = E(free_in_zone_n)
* nzones
*/
-unsigned int
-adfs_map_free(struct super_block *sb)
+void adfs_map_statfs(struct super_block *sb, struct kstatfs *buf)
{
struct adfs_sb_info *asb = ADFS_SB(sb);
+ struct adfs_discrecord *dr = adfs_map_discrecord(asb->s_map);
struct adfs_discmap *dm;
unsigned int total = 0;
unsigned int zone;
@@ -235,7 +208,10 @@ adfs_map_free(struct super_block *sb)
total += scan_free_map(asb, dm++);
} while (--zone > 0);
- return signed_asl(total, asb->s_map2blk);
+ buf->f_blocks = adfs_disc_size(dr) >> sb->s_blocksize_bits;
+ buf->f_files = asb->s_ids_per_zone * asb->s_map_size;
+ buf->f_bavail =
+ buf->f_bfree = signed_asl(total, asb->s_map2blk);
}
int adfs_map_lookup(struct super_block *sb, u32 frag_id, unsigned int offset)
@@ -280,3 +256,152 @@ bad_fragment:
frag_id, zone, asb->s_map_size);
return 0;
}
+
+static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map)
+{
+ unsigned int v0, v1, v2, v3;
+ int i;
+
+ v0 = v1 = v2 = v3 = 0;
+ for (i = sb->s_blocksize - 4; i; i -= 4) {
+ v0 += map[i] + (v3 >> 8);
+ v3 &= 0xff;
+ v1 += map[i + 1] + (v0 >> 8);
+ v0 &= 0xff;
+ v2 += map[i + 2] + (v1 >> 8);
+ v1 &= 0xff;
+ v3 += map[i + 3] + (v2 >> 8);
+ v2 &= 0xff;
+ }
+ v0 += v3 >> 8;
+ v1 += map[1] + (v0 >> 8);
+ v2 += map[2] + (v1 >> 8);
+ v3 += map[3] + (v2 >> 8);
+
+ return v0 ^ v1 ^ v2 ^ v3;
+}
+
+static int adfs_checkmap(struct super_block *sb, struct adfs_discmap *dm)
+{
+ unsigned char crosscheck = 0, zonecheck = 1;
+ int i;
+
+ for (i = 0; i < ADFS_SB(sb)->s_map_size; i++) {
+ unsigned char *map;
+
+ map = dm[i].dm_bh->b_data;
+
+ if (adfs_calczonecheck(sb, map) != map[0]) {
+ adfs_error(sb, "zone %d fails zonecheck", i);
+ zonecheck = 0;
+ }
+ crosscheck ^= map[3];
+ }
+ if (crosscheck != 0xff)
+ adfs_error(sb, "crosscheck != 0xff");
+ return crosscheck == 0xff && zonecheck;
+}
+
+/*
+ * Layout the map - the first zone contains a copy of the disc record,
+ * and the last zone must be limited to the size of the filesystem.
+ */
+static void adfs_map_layout(struct adfs_discmap *dm, unsigned int nzones,
+ struct adfs_discrecord *dr)
+{
+ unsigned int zone, zone_size;
+ u64 size;
+
+ zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare);
+
+ dm[0].dm_bh = NULL;
+ dm[0].dm_startblk = 0;
+ dm[0].dm_startbit = 32 + ADFS_DR_SIZE_BITS;
+ dm[0].dm_endbit = 32 + zone_size;
+
+ for (zone = 1; zone < nzones; zone++) {
+ dm[zone].dm_bh = NULL;
+ dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
+ dm[zone].dm_startbit = 32;
+ dm[zone].dm_endbit = 32 + zone_size;
+ }
+
+ size = adfs_disc_size(dr) >> dr->log2bpmb;
+ size -= (nzones - 1) * zone_size - ADFS_DR_SIZE_BITS;
+ dm[nzones - 1].dm_endbit = 32 + size;
+}
+
+static int adfs_map_read(struct adfs_discmap *dm, struct super_block *sb,
+ unsigned int map_addr, unsigned int nzones)
+{
+ unsigned int zone;
+
+ for (zone = 0; zone < nzones; zone++) {
+ dm[zone].dm_bh = sb_bread(sb, map_addr + zone);
+ if (!dm[zone].dm_bh)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void adfs_map_relse(struct adfs_discmap *dm, unsigned int nzones)
+{
+ unsigned int zone;
+
+ for (zone = 0; zone < nzones; zone++)
+ brelse(dm[zone].dm_bh);
+}
+
+struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr)
+{
+ struct adfs_sb_info *asb = ADFS_SB(sb);
+ struct adfs_discmap *dm;
+ unsigned int map_addr, zone_size, nzones;
+ int ret;
+
+ nzones = dr->nzones | dr->nzones_high << 8;
+ zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare);
+
+ asb->s_idlen = dr->idlen;
+ asb->s_map_size = nzones;
+ asb->s_map2blk = dr->log2bpmb - dr->log2secsize;
+ asb->s_log2sharesize = dr->log2sharesize;
+ asb->s_ids_per_zone = zone_size / (asb->s_idlen + 1);
+
+ map_addr = (nzones >> 1) * zone_size -
+ ((nzones > 1) ? ADFS_DR_SIZE_BITS : 0);
+ map_addr = signed_asl(map_addr, asb->s_map2blk);
+
+ dm = kmalloc_array(nzones, sizeof(*dm), GFP_KERNEL);
+ if (dm == NULL) {
+ adfs_error(sb, "not enough memory");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ adfs_map_layout(dm, nzones, dr);
+
+ ret = adfs_map_read(dm, sb, map_addr, nzones);
+ if (ret) {
+ adfs_error(sb, "unable to read map");
+ goto error_free;
+ }
+
+ if (adfs_checkmap(sb, dm))
+ return dm;
+
+ adfs_error(sb, "map corrupted");
+
+error_free:
+ adfs_map_relse(dm, nzones);
+ kfree(dm);
+ return ERR_PTR(-EIO);
+}
+
+void adfs_free_map(struct super_block *sb)
+{
+ struct adfs_sb_info *asb = ADFS_SB(sb);
+
+ adfs_map_relse(asb->s_map, asb->s_map_size);
+ kfree(asb->s_map);
+}
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 65b04ebb51c3..a3cc8ecb50da 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -88,59 +88,11 @@ static int adfs_checkdiscrecord(struct adfs_discrecord *dr)
return 0;
}
-static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map)
-{
- unsigned int v0, v1, v2, v3;
- int i;
-
- v0 = v1 = v2 = v3 = 0;
- for (i = sb->s_blocksize - 4; i; i -= 4) {
- v0 += map[i] + (v3 >> 8);
- v3 &= 0xff;
- v1 += map[i + 1] + (v0 >> 8);
- v0 &= 0xff;
- v2 += map[i + 2] + (v1 >> 8);
- v1 &= 0xff;
- v3 += map[i + 3] + (v2 >> 8);
- v2 &= 0xff;
- }
- v0 += v3 >> 8;
- v1 += map[1] + (v0 >> 8);
- v2 += map[2] + (v1 >> 8);
- v3 += map[3] + (v2 >> 8);
-
- return v0 ^ v1 ^ v2 ^ v3;
-}
-
-static int adfs_checkmap(struct super_block *sb, struct adfs_discmap *dm)
-{
- unsigned char crosscheck = 0, zonecheck = 1;
- int i;
-
- for (i = 0; i < ADFS_SB(sb)->s_map_size; i++) {
- unsigned char *map;
-
- map = dm[i].dm_bh->b_data;
-
- if (adfs_calczonecheck(sb, map) != map[0]) {
- adfs_error(sb, "zone %d fails zonecheck", i);
- zonecheck = 0;
- }
- crosscheck ^= map[3];
- }
- if (crosscheck != 0xff)
- adfs_error(sb, "crosscheck != 0xff");
- return crosscheck == 0xff && zonecheck;
-}
-
static void adfs_put_super(struct super_block *sb)
{
- int i;
struct adfs_sb_info *asb = ADFS_SB(sb);
- for (i = 0; i < asb->s_map_size; i++)
- brelse(asb->s_map[i].dm_bh);
- kfree(asb->s_map);
+ adfs_free_map(sb);
kfree_rcu(asb, rcu);
}
@@ -249,16 +201,13 @@ static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct adfs_sb_info *sbi = ADFS_SB(sb);
- struct adfs_discrecord *dr = adfs_map_discrecord(sbi->s_map);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ adfs_map_statfs(sb, buf);
+
buf->f_type = ADFS_SUPER_MAGIC;
buf->f_namelen = sbi->s_namelen;
buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = adfs_disc_size(dr) >> sb->s_blocksize_bits;
- buf->f_files = sbi->s_ids_per_zone * sbi->s_map_size;
- buf->f_bavail =
- buf->f_bfree = adfs_map_free(sb);
buf->f_ffree = (long)(buf->f_bfree * buf->f_files) / (long)buf->f_blocks;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
@@ -282,6 +231,12 @@ static void adfs_free_inode(struct inode *inode)
kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
}
+static int adfs_drop_inode(struct inode *inode)
+{
+ /* always drop inodes if we are read-only */
+ return !IS_ENABLED(CONFIG_ADFS_FS_RW) || IS_RDONLY(inode);
+}
+
static void init_once(void *foo)
{
struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
@@ -314,7 +269,7 @@ static void destroy_inodecache(void)
static const struct super_operations adfs_sops = {
.alloc_inode = adfs_alloc_inode,
.free_inode = adfs_free_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = adfs_drop_inode,
.write_inode = adfs_write_inode,
.put_super = adfs_put_super,
.statfs = adfs_statfs,
@@ -322,66 +277,94 @@ static const struct super_operations adfs_sops = {
.show_options = adfs_show_options,
};
-static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr)
+static int adfs_probe(struct super_block *sb, unsigned int offset, int silent,
+ int (*validate)(struct super_block *sb,
+ struct buffer_head *bh,
+ struct adfs_discrecord **bhp))
{
- struct adfs_discmap *dm;
- unsigned int map_addr, zone_size, nzones;
- int i, zone;
struct adfs_sb_info *asb = ADFS_SB(sb);
+ struct adfs_discrecord *dr;
+ struct buffer_head *bh;
+ unsigned int blocksize = BLOCK_SIZE;
+ int ret, try;
+
+ for (try = 0; try < 2; try++) {
+ /* try to set the requested block size */
+ if (sb->s_blocksize != blocksize &&
+ !sb_set_blocksize(sb, blocksize)) {
+ if (!silent)
+ adfs_msg(sb, KERN_ERR,
+ "error: unsupported blocksize");
+ return -EINVAL;
+ }
- nzones = asb->s_map_size;
- zone_size = (8 << dr->log2secsize) - le16_to_cpu(dr->zone_spare);
- map_addr = (nzones >> 1) * zone_size -
- ((nzones > 1) ? ADFS_DR_SIZE_BITS : 0);
- map_addr = signed_asl(map_addr, asb->s_map2blk);
+ /* read the buffer */
+ bh = sb_bread(sb, offset >> sb->s_blocksize_bits);
+ if (!bh) {
+ adfs_msg(sb, KERN_ERR,
+ "error: unable to read block %u, try %d",
+ offset >> sb->s_blocksize_bits, try);
+ return -EIO;
+ }
+
+ /* validate it */
+ ret = validate(sb, bh, &dr);
+ if (ret) {
+ brelse(bh);
+ return ret;
+ }
- asb->s_ids_per_zone = zone_size / (asb->s_idlen + 1);
+ /* does the block size match the filesystem block size? */
+ blocksize = 1 << dr->log2secsize;
+ if (sb->s_blocksize == blocksize) {
+ asb->s_map = adfs_read_map(sb, dr);
+ brelse(bh);
+ return PTR_ERR_OR_ZERO(asb->s_map);
+ }
- dm = kmalloc_array(nzones, sizeof(*dm), GFP_KERNEL);
- if (dm == NULL) {
- adfs_error(sb, "not enough memory");
- return ERR_PTR(-ENOMEM);
+ brelse(bh);
}
- for (zone = 0; zone < nzones; zone++, map_addr++) {
- dm[zone].dm_startbit = 0;
- dm[zone].dm_endbit = zone_size;
- dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
- dm[zone].dm_bh = sb_bread(sb, map_addr);
+ return -EIO;
+}
- if (!dm[zone].dm_bh) {
- adfs_error(sb, "unable to read map");
- goto error_free;
- }
- }
+static int adfs_validate_bblk(struct super_block *sb, struct buffer_head *bh,
+ struct adfs_discrecord **drp)
+{
+ struct adfs_discrecord *dr;
+ unsigned char *b_data;
- /* adjust the limits for the first and last map zones */
- i = zone - 1;
- dm[0].dm_startblk = 0;
- dm[0].dm_startbit = ADFS_DR_SIZE_BITS;
- dm[i].dm_endbit = (adfs_disc_size(dr) >> dr->log2bpmb) +
- (ADFS_DR_SIZE_BITS - i * zone_size);
+ b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize);
+ if (adfs_checkbblk(b_data))
+ return -EILSEQ;
- if (adfs_checkmap(sb, dm))
- return dm;
+ /* Do some sanity checks on the ADFS disc record */
+ dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
+ if (adfs_checkdiscrecord(dr))
+ return -EILSEQ;
+
+ *drp = dr;
+ return 0;
+}
- adfs_error(sb, "map corrupted");
+static int adfs_validate_dr0(struct super_block *sb, struct buffer_head *bh,
+ struct adfs_discrecord **drp)
+{
+ struct adfs_discrecord *dr;
-error_free:
- while (--zone >= 0)
- brelse(dm[zone].dm_bh);
+ /* Do some sanity checks on the ADFS disc record */
+ dr = (struct adfs_discrecord *)(bh->b_data + 4);
+ if (adfs_checkdiscrecord(dr) || dr->nzones_high || dr->nzones != 1)
+ return -EILSEQ;
- kfree(dm);
- return ERR_PTR(-EIO);
+ *drp = dr;
+ return 0;
}
static int adfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct adfs_discrecord *dr;
- struct buffer_head *bh;
struct object_info root_obj;
- unsigned char *b_data;
- unsigned int blocksize;
struct adfs_sb_info *asb;
struct inode *root;
int ret = -EINVAL;
@@ -391,7 +374,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
asb = kzalloc(sizeof(*asb), GFP_KERNEL);
if (!asb)
return -ENOMEM;
+
sb->s_fs_info = asb;
+ sb->s_magic = ADFS_SUPER_MAGIC;
+ sb->s_time_gran = 10000000;
/* set default options */
asb->s_uid = GLOBAL_ROOT_UID;
@@ -403,78 +389,21 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
if (parse_options(sb, asb, data))
goto error;
- sb_set_blocksize(sb, BLOCK_SIZE);
- if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) {
- adfs_msg(sb, KERN_ERR, "error: unable to read superblock");
- ret = -EIO;
- goto error;
- }
-
- b_data = bh->b_data + (ADFS_DISCRECORD % BLOCK_SIZE);
-
- if (adfs_checkbblk(b_data)) {
- ret = -EINVAL;
- goto error_badfs;
- }
-
- dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
-
- /*
- * Do some sanity checks on the ADFS disc record
- */
- if (adfs_checkdiscrecord(dr)) {
- ret = -EINVAL;
- goto error_badfs;
- }
-
- blocksize = 1 << dr->log2secsize;
- brelse(bh);
-
- if (sb_set_blocksize(sb, blocksize)) {
- bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
- if (!bh) {
- adfs_msg(sb, KERN_ERR,
- "error: couldn't read superblock on 2nd try.");
- ret = -EIO;
- goto error;
- }
- b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize);
- if (adfs_checkbblk(b_data)) {
- adfs_msg(sb, KERN_ERR,
- "error: disc record mismatch, very weird!");
- ret = -EINVAL;
- goto error_free_bh;
- }
- dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET);
- } else {
+ /* Try to probe the filesystem boot block */
+ ret = adfs_probe(sb, ADFS_DISCRECORD, 1, adfs_validate_bblk);
+ if (ret == -EILSEQ)
+ ret = adfs_probe(sb, 0, silent, adfs_validate_dr0);
+ if (ret == -EILSEQ) {
if (!silent)
adfs_msg(sb, KERN_ERR,
- "error: unsupported blocksize");
+ "error: can't find an ADFS filesystem on dev %s.",
+ sb->s_id);
ret = -EINVAL;
- goto error;
}
+ if (ret)
+ goto error;
- /*
- * blocksize on this device should now be set to the ADFS log2secsize
- */
-
- sb->s_magic = ADFS_SUPER_MAGIC;
- asb->s_idlen = dr->idlen;
- asb->s_map_size = dr->nzones | (dr->nzones_high << 8);
- asb->s_map2blk = dr->log2bpmb - dr->log2secsize;
- asb->s_log2sharesize = dr->log2sharesize;
-
- asb->s_map = adfs_read_map(sb, dr);
- if (IS_ERR(asb->s_map)) {
- ret = PTR_ERR(asb->s_map);
- goto error_free_bh;
- }
-
- brelse(bh);
-
- /*
- * set up enough so that we can read an inode
- */
+ /* set up enough so that we can read an inode */
sb->s_op = &adfs_sops;
dr = adfs_map_discrecord(asb->s_map);
@@ -511,23 +440,13 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
root = adfs_iget(sb, &root_obj);
sb->s_root = d_make_root(root);
if (!sb->s_root) {
- int i;
- for (i = 0; i < asb->s_map_size; i++)
- brelse(asb->s_map[i].dm_bh);
- kfree(asb->s_map);
+ adfs_free_map(sb);
adfs_error(sb, "get root inode failed\n");
ret = -EIO;
goto error;
}
return 0;
-error_badfs:
- if (!silent)
- adfs_msg(sb, KERN_ERR,
- "error: can't find an ADFS filesystem on dev %s.",
- sb->s_id);
-error_free_bh:
- brelse(bh);
error:
sb->s_fs_info = NULL;
kfree(asb);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index fd5133e26a38..78ba5f932287 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -134,8 +134,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
_leave(" = -ENAMETOOLONG");
return ERR_PTR(-ENAMETOOLONG);
}
- if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
+
+ /* Prohibit cell names that contain unprintable chars, '/' and '@' or
+ * that begin with a dot. This also precludes "@cell".
+ */
+ if (name[0] == '.')
return ERR_PTR(-EINVAL);
+ for (i = 0; i < namelen; i++) {
+ char ch = name[i];
+ if (!isprint(ch) || ch == '/' || ch == '@')
+ return ERR_PTR(-EINVAL);
+ }
_enter("%*.*s,%s", namelen, namelen, name, addresses);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 7f8a9b3137bf..dda7a9a66848 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -38,13 +38,13 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int afs_show_devname(struct seq_file *m, struct dentry *root);
static int afs_show_options(struct seq_file *m, struct dentry *root);
static int afs_init_fs_context(struct fs_context *fc);
-static const struct fs_parameter_description afs_fs_parameters;
+static const struct fs_parameter_spec afs_fs_parameters[];
struct file_system_type afs_fs_type = {
.owner = THIS_MODULE,
.name = "afs",
.init_fs_context = afs_init_fs_context,
- .parameters = &afs_fs_parameters,
+ .parameters = afs_fs_parameters,
.kill_sb = afs_kill_super,
.fs_flags = FS_RENAME_DOES_D_MOVE,
};
@@ -73,28 +73,22 @@ enum afs_param {
Opt_source,
};
-static const struct fs_parameter_spec afs_param_specs[] = {
- fsparam_flag ("autocell", Opt_autocell),
- fsparam_flag ("dyn", Opt_dyn),
- fsparam_enum ("flock", Opt_flock),
- fsparam_string("source", Opt_source),
+static const struct constant_table afs_param_flock[] = {
+ {"local", afs_flock_mode_local },
+ {"openafs", afs_flock_mode_openafs },
+ {"strict", afs_flock_mode_strict },
+ {"write", afs_flock_mode_write },
{}
};
-static const struct fs_parameter_enum afs_param_enums[] = {
- { Opt_flock, "local", afs_flock_mode_local },
- { Opt_flock, "openafs", afs_flock_mode_openafs },
- { Opt_flock, "strict", afs_flock_mode_strict },
- { Opt_flock, "write", afs_flock_mode_write },
+static const struct fs_parameter_spec afs_fs_parameters[] = {
+ fsparam_flag ("autocell", Opt_autocell),
+ fsparam_flag ("dyn", Opt_dyn),
+ fsparam_enum ("flock", Opt_flock, afs_param_flock),
+ fsparam_string("source", Opt_source),
{}
};
-static const struct fs_parameter_description afs_fs_parameters = {
- .name = "kAFS",
- .specs = afs_param_specs,
- .enums = afs_param_enums,
-};
-
/*
* initialise the filesystem
*/
@@ -323,7 +317,7 @@ static int afs_parse_param(struct fs_context *fc, struct fs_parameter *param)
struct afs_fs_context *ctx = fc->fs_private;
int opt;
- opt = fs_parse(fc, &afs_fs_parameters, param, &result);
+ opt = fs_parse(fc, afs_fs_parameters, param, &result);
if (opt < 0)
return opt;
diff --git a/fs/aio.c b/fs/aio.c
index a9fbad2ce5e6..5f3d3d814928 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1610,6 +1610,14 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
return 0;
}
+static void aio_poll_put_work(struct work_struct *work)
+{
+ struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+
+ iocb_put(iocb);
+}
+
static void aio_poll_complete_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1674,6 +1682,8 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del_init(&req->wait.entry);
if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+ struct kioctx *ctx = iocb->ki_ctx;
+
/*
* Try to complete the iocb inline if we can. Use
* irqsave/irqrestore because not all filesystems (e.g. fuse)
@@ -1683,8 +1693,14 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
req->done = true;
- spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
- iocb_put(iocb);
+ if (iocb->ki_eventfd && eventfd_signal_count()) {
+ iocb = NULL;
+ INIT_WORK(&req->work, aio_poll_put_work);
+ schedule_work(&req->work);
+ }
+ spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+ if (iocb)
+ iocb_put(iocb);
} else {
schedule_work(&req->work);
}
diff --git a/fs/attr.c b/fs/attr.c
index df28035aa23e..b4bbdbd4c8ca 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -183,18 +183,12 @@ void setattr_copy(struct inode *inode, const struct iattr *attr)
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (ia_valid & ATTR_ATIME) {
- inode->i_atime = timestamp_truncate(attr->ia_atime,
- inode);
- }
- if (ia_valid & ATTR_MTIME) {
- inode->i_mtime = timestamp_truncate(attr->ia_mtime,
- inode);
- }
- if (ia_valid & ATTR_CTIME) {
- inode->i_ctime = timestamp_truncate(attr->ia_ctime,
- inode);
- }
+ if (ia_valid & ATTR_ATIME)
+ inode->i_atime = attr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+ inode->i_mtime = attr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+ inode->i_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
@@ -268,8 +262,13 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
attr->ia_ctime = now;
if (!(ia_valid & ATTR_ATIME_SET))
attr->ia_atime = now;
+ else
+ attr->ia_atime = timestamp_truncate(attr->ia_atime, inode);
if (!(ia_valid & ATTR_MTIME_SET))
attr->ia_mtime = now;
+ else
+ attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode);
+
if (ia_valid & ATTR_KILL_PRIV) {
error = security_inode_need_killpriv(dentry);
if (error < 0)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index ecd8d2698515..f4713ea76e82 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -97,7 +97,7 @@ static struct linux_binfmt elf_format = {
.min_coredump = ELF_EXEC_PAGESIZE,
};
-#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
+#define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
static int set_brk(unsigned long start, unsigned long end, int prot)
{
@@ -161,9 +161,11 @@ static int padzero(unsigned long elf_bss)
#endif
static int
-create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
- unsigned long load_addr, unsigned long interp_load_addr)
+create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
+ unsigned long load_addr, unsigned long interp_load_addr,
+ unsigned long e_entry)
{
+ struct mm_struct *mm = current->mm;
unsigned long p = bprm->p;
int argc = bprm->argc;
int envc = bprm->envc;
@@ -176,7 +178,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
unsigned char k_rand_bytes[16];
int items;
elf_addr_t *elf_info;
- int ei_index = 0;
+ int ei_index;
const struct cred *cred = current_cred();
struct vm_area_struct *vma;
@@ -226,12 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
return -EFAULT;
/* Create the ELF interpreter info */
- elf_info = (elf_addr_t *)current->mm->saved_auxv;
+ elf_info = (elf_addr_t *)mm->saved_auxv;
/* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
#define NEW_AUX_ENT(id, val) \
do { \
- elf_info[ei_index++] = id; \
- elf_info[ei_index++] = val; \
+ *elf_info++ = id; \
+ *elf_info++ = val; \
} while (0)
#ifdef ARCH_DLINFO
@@ -251,7 +253,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
NEW_AUX_ENT(AT_BASE, interp_load_addr);
NEW_AUX_ENT(AT_FLAGS, 0);
- NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
+ NEW_AUX_ENT(AT_ENTRY, e_entry);
NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
@@ -275,12 +277,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
}
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
- memset(&elf_info[ei_index], 0,
- sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
+ memset(elf_info, 0, (char *)mm->saved_auxv +
+ sizeof(mm->saved_auxv) - (char *)elf_info);
/* And advance past the AT_NULL entry. */
- ei_index += 2;
+ elf_info += 2;
+ ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
sp = STACK_ADD(p, ei_index);
items = (argc + 1) + (envc + 1) + 1;
@@ -299,7 +302,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
* Grow the stack manually; some architectures have a limit on how
* far ahead a user-space access may be in order to grow the stack.
*/
- vma = find_extend_vma(current->mm, bprm->p);
+ vma = find_extend_vma(mm, bprm->p);
if (!vma)
return -EFAULT;
@@ -308,7 +311,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
return -EFAULT;
/* Populate list of argv pointers back to argv strings. */
- p = current->mm->arg_end = current->mm->arg_start;
+ p = mm->arg_end = mm->arg_start;
while (argc-- > 0) {
size_t len;
if (__put_user((elf_addr_t)p, sp++))
@@ -320,10 +323,10 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
}
if (__put_user(0, sp++))
return -EFAULT;
- current->mm->arg_end = p;
+ mm->arg_end = p;
/* Populate list of envp pointers back to envp strings. */
- current->mm->env_end = current->mm->env_start = p;
+ mm->env_end = mm->env_start = p;
while (envc-- > 0) {
size_t len;
if (__put_user((elf_addr_t)p, sp++))
@@ -335,10 +338,10 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
}
if (__put_user(0, sp++))
return -EFAULT;
- current->mm->env_end = p;
+ mm->env_end = p;
/* Put the elf_info on the stack in the right place. */
- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
+ if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
return -EFAULT;
return 0;
}
@@ -689,15 +692,17 @@ static int load_elf_binary(struct linux_binprm *bprm)
int bss_prot = 0;
int retval, i;
unsigned long elf_entry;
+ unsigned long e_entry;
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
+ struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
struct {
- struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
} *loc;
struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
+ struct mm_struct *mm;
struct pt_regs *regs;
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
@@ -705,30 +710,27 @@ static int load_elf_binary(struct linux_binprm *bprm)
retval = -ENOMEM;
goto out_ret;
}
-
- /* Get the exec-header */
- loc->elf_ex = *((struct elfhdr *)bprm->buf);
retval = -ENOEXEC;
/* First of all, some simple consistency checks */
- if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
+ if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
goto out;
- if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
+ if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
goto out;
- if (!elf_check_arch(&loc->elf_ex))
+ if (!elf_check_arch(elf_ex))
goto out;
- if (elf_check_fdpic(&loc->elf_ex))
+ if (elf_check_fdpic(elf_ex))
goto out;
if (!bprm->file->f_op->mmap)
goto out;
- elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
+ elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
if (!elf_phdata)
goto out;
elf_ppnt = elf_phdata;
- for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
char *elf_interpreter;
if (elf_ppnt->p_type != PT_INTERP)
@@ -782,7 +784,7 @@ out_free_interp:
}
elf_ppnt = elf_phdata;
- for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
+ for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
switch (elf_ppnt->p_type) {
case PT_GNU_STACK:
if (elf_ppnt->p_flags & PF_X)
@@ -792,7 +794,7 @@ out_free_interp:
break;
case PT_LOPROC ... PT_HIPROC:
- retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
+ retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
bprm->file, false,
&arch_state);
if (retval)
@@ -836,7 +838,7 @@ out_free_interp:
* still possible to return an error to the code that invoked
* the exec syscall.
*/
- retval = arch_check_elf(&loc->elf_ex,
+ retval = arch_check_elf(elf_ex,
!!interpreter, &loc->interp_elf_ex,
&arch_state);
if (retval)
@@ -849,8 +851,8 @@ out_free_interp:
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
- SET_PERSONALITY2(loc->elf_ex, &arch_state);
- if (elf_read_implies_exec(loc->elf_ex, executable_stack))
+ SET_PERSONALITY2(*elf_ex, &arch_state);
+ if (elf_read_implies_exec(*elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
@@ -877,7 +879,7 @@ out_free_interp:
/* Now we do a little grungy work by mmapping the ELF image into
the correct location in memory. */
for(i = 0, elf_ppnt = elf_phdata;
- i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ i < elf_ex->e_phnum; i++, elf_ppnt++) {
int elf_prot, elf_flags;
unsigned long k, vaddr;
unsigned long total_size = 0;
@@ -921,9 +923,9 @@ out_free_interp:
* If we are loading ET_EXEC or we have already performed
* the ET_DYN load_addr calculations, proceed normally.
*/
- if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
+ if (elf_ex->e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
- } else if (loc->elf_ex.e_type == ET_DYN) {
+ } else if (elf_ex->e_type == ET_DYN) {
/*
* This logic is run once for the first LOAD Program
* Header for ET_DYN binaries to calculate the
@@ -972,7 +974,7 @@ out_free_interp:
load_bias = ELF_PAGESTART(load_bias - vaddr);
total_size = total_mapping_size(elf_phdata,
- loc->elf_ex.e_phnum);
+ elf_ex->e_phnum);
if (!total_size) {
retval = -EINVAL;
goto out_free_dentry;
@@ -990,7 +992,7 @@ out_free_interp:
if (!load_addr_set) {
load_addr_set = 1;
load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
- if (loc->elf_ex.e_type == ET_DYN) {
+ if (elf_ex->e_type == ET_DYN) {
load_bias += error -
ELF_PAGESTART(load_bias + vaddr);
load_addr += load_bias;
@@ -998,7 +1000,7 @@ out_free_interp:
}
}
k = elf_ppnt->p_vaddr;
- if (k < start_code)
+ if ((elf_ppnt->p_flags & PF_X) && k < start_code)
start_code = k;
if (start_data < k)
start_data = k;
@@ -1031,7 +1033,7 @@ out_free_interp:
}
}
- loc->elf_ex.e_entry += load_bias;
+ e_entry = elf_ex->e_entry + load_bias;
elf_bss += load_bias;
elf_brk += load_bias;
start_code += load_bias;
@@ -1074,7 +1076,7 @@ out_free_interp:
allow_write_access(interpreter);
fput(interpreter);
} else {
- elf_entry = loc->elf_ex.e_entry;
+ elf_entry = e_entry;
if (BAD_ADDR(elf_entry)) {
retval = -EINVAL;
goto out_free_dentry;
@@ -1092,15 +1094,17 @@ out_free_interp:
goto out;
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
- retval = create_elf_tables(bprm, &loc->elf_ex,
- load_addr, interp_load_addr);
+ retval = create_elf_tables(bprm, elf_ex,
+ load_addr, interp_load_addr, e_entry);
if (retval < 0)
goto out;
- current->mm->end_code = end_code;
- current->mm->start_code = start_code;
- current->mm->start_data = start_data;
- current->mm->end_data = end_data;
- current->mm->start_stack = bprm->p;
+
+ mm = current->mm;
+ mm->end_code = end_code;
+ mm->start_code = start_code;
+ mm->start_data = start_data;
+ mm->end_data = end_data;
+ mm->start_stack = bprm->p;
if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
/*
@@ -1111,12 +1115,11 @@ out_free_interp:
* growing down), and into the unused ELF_ET_DYN_BASE region.
*/
if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
- loc->elf_ex.e_type == ET_DYN && !interpreter)
- current->mm->brk = current->mm->start_brk =
- ELF_ET_DYN_BASE;
+ elf_ex->e_type == ET_DYN && !interpreter) {
+ mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
+ }
- current->mm->brk = current->mm->start_brk =
- arch_randomize_brk(current->mm);
+ mm->brk = mm->start_brk = arch_randomize_brk(mm);
#ifdef compat_brk_randomized
current->brk_randomized = 1;
#endif
@@ -1574,6 +1577,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
*/
static int fill_files_note(struct memelfnote *note)
{
+ struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned count, size, names_ofs, remaining, n;
user_long_t *data;
@@ -1581,7 +1585,7 @@ static int fill_files_note(struct memelfnote *note)
char *name_base, *name_curpos;
/* *Estimated* file count and total data size needed */
- count = current->mm->map_count;
+ count = mm->map_count;
if (count > UINT_MAX / 64)
return -EINVAL;
size = count * 64;
@@ -1591,6 +1595,10 @@ static int fill_files_note(struct memelfnote *note)
if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
return -EINVAL;
size = round_up(size, PAGE_SIZE);
+ /*
+ * "size" can be 0 here legitimately.
+ * Let it ENOMEM and omit NT_FILE section which will be empty anyway.
+ */
data = kvmalloc(size, GFP_KERNEL);
if (ZERO_OR_NULL_PTR(data))
return -ENOMEM;
@@ -1599,7 +1607,7 @@ static int fill_files_note(struct memelfnote *note)
name_base = name_curpos = ((char *)data) + names_ofs;
remaining = size - names_ofs;
count = 0;
- for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
struct file *file;
const char *filename;
@@ -1633,10 +1641,10 @@ static int fill_files_note(struct memelfnote *note)
data[0] = count;
data[1] = PAGE_SIZE;
/*
- * Count usually is less than current->mm->map_count,
+ * Count usually is less than mm->map_count,
* we need to move filenames down.
*/
- n = current->mm->map_count - count;
+ n = mm->map_count - count;
if (n != 0) {
unsigned shift_bytes = n * 3 * sizeof(data[0]);
memmove(name_base - shift_bytes, name_base,
@@ -2182,7 +2190,7 @@ static int elf_core_dump(struct coredump_params *cprm)
int segs, i;
size_t vma_data_size = 0;
struct vm_area_struct *vma, *gate_vma;
- struct elfhdr *elf = NULL;
+ struct elfhdr elf;
loff_t offset = 0, dataoff;
struct elf_note_info info = { };
struct elf_phdr *phdr4note = NULL;
@@ -2203,10 +2211,6 @@ static int elf_core_dump(struct coredump_params *cprm)
* exists while dumping the mm->vm_next areas to the core file.
*/
- /* alloc memory for large data structures: too large to be on stack */
- elf = kmalloc(sizeof(*elf), GFP_KERNEL);
- if (!elf)
- goto out;
/*
* The number of segs are recored into ELF header as 16bit value.
* Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
@@ -2230,7 +2234,7 @@ static int elf_core_dump(struct coredump_params *cprm)
* Collect all the non-memory information about the process for the
* notes. This also sets up the file header.
*/
- if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
+ if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
goto cleanup;
has_dumped = 1;
@@ -2238,7 +2242,7 @@ static int elf_core_dump(struct coredump_params *cprm)
fs = get_fs();
set_fs(KERNEL_DS);
- offset += sizeof(*elf); /* Elf header */
+ offset += sizeof(elf); /* Elf header */
offset += segs * sizeof(struct elf_phdr); /* Program headers */
/* Write notes phdr entry */
@@ -2257,11 +2261,13 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
- if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
- goto end_coredump;
+ /*
+ * Zero vma process will get ZERO_SIZE_PTR here.
+ * Let coredump continue for register state at least.
+ */
vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(vma_filesz))
+ if (!vma_filesz)
goto end_coredump;
for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
@@ -2281,12 +2287,12 @@ static int elf_core_dump(struct coredump_params *cprm)
shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
if (!shdr4extnum)
goto end_coredump;
- fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
+ fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
}
offset = dataoff;
- if (!dump_emit(cprm, elf, sizeof(*elf)))
+ if (!dump_emit(cprm, &elf, sizeof(elf)))
goto end_coredump;
if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
@@ -2370,8 +2376,6 @@ cleanup:
kfree(shdr4extnum);
kvfree(vma_filesz);
kfree(phdr4note);
- kfree(elf);
-out:
return has_dumped;
}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index de95ad27722f..9ab610cc9114 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1290,7 +1290,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
/* copy bytes from the working buffer into the pages */
while (working_bytes > 0) {
bytes = min_t(unsigned long, bvec.bv_len,
- PAGE_SIZE - buf_offset);
+ PAGE_SIZE - (buf_offset % PAGE_SIZE));
bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(bvec.bv_page);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 8600beb9c086..2ca2a09d0e23 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -500,11 +500,8 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
&dev_replace->scrub_progress, 0, 1);
ret = btrfs_dev_replace_finishing(fs_info, ret);
- if (ret == -EINPROGRESS) {
+ if (ret == -EINPROGRESS)
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
- } else if (ret != -ECANCELED) {
- WARN_ON(ret);
- }
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 1b1b6ff855aa..4f4b13830b25 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3243,6 +3243,7 @@ static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
struct inode *dst, u64 dst_loff)
{
+ const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
int ret;
/*
@@ -3250,7 +3251,7 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
* source range to serialize with relocation.
*/
btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
- ret = btrfs_clone(src, dst, loff, len, len, dst_loff, 1);
+ ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
return ret;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 22cf69e6e5bc..61b37c56a7fb 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3578,17 +3578,27 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* This can easily boost the amount of SYSTEM chunks if cleaner
* thread can't be triggered fast enough, and use up all space
* of btrfs_super_block::sys_chunk_array
+ *
+ * While for dev replace, we need to try our best to mark block
+ * group RO, to prevent race between:
+ * - Write duplication
+ * Contains latest data
+ * - Scrub copy
+ * Contains data from commit tree
+ *
+ * If target block group is not marked RO, nocow writes can
+ * be overwritten by scrub copy, causing data corruption.
+ * So for dev-replace, it's not allowed to continue if a block
+ * group is not RO.
*/
- ret = btrfs_inc_block_group_ro(cache, false);
- scrub_pause_off(fs_info);
-
+ ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
if (ret == 0) {
ro_set = 1;
- } else if (ret == -ENOSPC) {
+ } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
/*
* btrfs_inc_block_group_ro return -ENOSPC when it
* failed in creating new chunk for metadata.
- * It is not a problem for scrub/replace, because
+ * It is not a problem for scrub, because
* metadata are always cowed, and our scrub paused
* commit_transactions.
*/
@@ -3597,9 +3607,22 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
btrfs_warn(fs_info,
"failed setting block group ro: %d", ret);
btrfs_put_block_group(cache);
+ scrub_pause_off(fs_info);
break;
}
+ /*
+ * Now the target block is marked RO, wait for nocow writes to
+ * finish before dev-replace.
+ * COW is fine, as COW never overwrites extents in commit tree.
+ */
+ if (sctx->is_dev_replace) {
+ btrfs_wait_nocow_writers(cache);
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
+ cache->length);
+ }
+
+ scrub_pause_off(fs_info);
down_write(&dev_replace->rwsem);
dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 309cda477589..f01552a0785e 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -170,7 +170,7 @@ btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
write_seqcount_end(&dev->data_seqcount); \
preempt_enable(); \
}
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
#define BTRFS_DEVICE_GETSET_FUNCS(name) \
static inline u64 \
btrfs_device_get_##name(const struct btrfs_device *dev) \
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index a6c90a003c12..05615a1099db 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -20,9 +20,13 @@
#include <linux/refcount.h>
#include "compression.h"
+/* workspace buffer size for s390 zlib hardware support */
+#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
+
struct workspace {
z_stream strm;
char *buf;
+ unsigned int buf_size;
struct list_head list;
int level;
};
@@ -61,7 +65,21 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
zlib_inflate_workspacesize());
workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
workspace->level = level;
- workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ workspace->buf = NULL;
+ /*
+ * In case of s390 zlib hardware support, allocate lager workspace
+ * buffer. If allocator fails, fall back to a single page buffer.
+ */
+ if (zlib_deflate_dfltcc_enabled()) {
+ workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
+ __GFP_NOMEMALLOC | __GFP_NORETRY |
+ __GFP_NOWARN | GFP_NOIO);
+ workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
+ }
+ if (!workspace->buf) {
+ workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ workspace->buf_size = PAGE_SIZE;
+ }
if (!workspace->strm.workspace || !workspace->buf)
goto fail;
@@ -85,6 +103,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
struct page *in_page = NULL;
struct page *out_page = NULL;
unsigned long bytes_left;
+ unsigned int in_buf_pages;
unsigned long len = *total_out;
unsigned long nr_dest_pages = *out_pages;
const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
@@ -102,9 +121,6 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- data_in = kmap(in_page);
-
out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (out_page == NULL) {
ret = -ENOMEM;
@@ -114,12 +130,51 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
pages[0] = out_page;
nr_pages = 1;
- workspace->strm.next_in = data_in;
+ workspace->strm.next_in = workspace->buf;
+ workspace->strm.avail_in = 0;
workspace->strm.next_out = cpage_out;
workspace->strm.avail_out = PAGE_SIZE;
- workspace->strm.avail_in = min(len, PAGE_SIZE);
while (workspace->strm.total_in < len) {
+ /*
+ * Get next input pages and copy the contents to
+ * the workspace buffer if required.
+ */
+ if (workspace->strm.avail_in == 0) {
+ bytes_left = len - workspace->strm.total_in;
+ in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
+ workspace->buf_size / PAGE_SIZE);
+ if (in_buf_pages > 1) {
+ int i;
+
+ for (i = 0; i < in_buf_pages; i++) {
+ if (in_page) {
+ kunmap(in_page);
+ put_page(in_page);
+ }
+ in_page = find_get_page(mapping,
+ start >> PAGE_SHIFT);
+ data_in = kmap(in_page);
+ memcpy(workspace->buf + i * PAGE_SIZE,
+ data_in, PAGE_SIZE);
+ start += PAGE_SIZE;
+ }
+ workspace->strm.next_in = workspace->buf;
+ } else {
+ if (in_page) {
+ kunmap(in_page);
+ put_page(in_page);
+ }
+ in_page = find_get_page(mapping,
+ start >> PAGE_SHIFT);
+ data_in = kmap(in_page);
+ start += PAGE_SIZE;
+ workspace->strm.next_in = data_in;
+ }
+ workspace->strm.avail_in = min(bytes_left,
+ (unsigned long) workspace->buf_size);
+ }
+
ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
if (ret != Z_OK) {
pr_debug("BTRFS: deflate in loop returned %d\n",
@@ -161,33 +216,43 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
/* we're all done */
if (workspace->strm.total_in >= len)
break;
-
- /* we've read in a full page, get a new one */
- if (workspace->strm.avail_in == 0) {
- if (workspace->strm.total_out > max_out)
- break;
-
- bytes_left = len - workspace->strm.total_in;
- kunmap(in_page);
- put_page(in_page);
-
- start += PAGE_SIZE;
- in_page = find_get_page(mapping,
- start >> PAGE_SHIFT);
- data_in = kmap(in_page);
- workspace->strm.avail_in = min(bytes_left,
- PAGE_SIZE);
- workspace->strm.next_in = data_in;
- }
+ if (workspace->strm.total_out > max_out)
+ break;
}
workspace->strm.avail_in = 0;
- ret = zlib_deflate(&workspace->strm, Z_FINISH);
- zlib_deflateEnd(&workspace->strm);
-
- if (ret != Z_STREAM_END) {
- ret = -EIO;
- goto out;
+ /*
+ * Call deflate with Z_FINISH flush parameter providing more output
+ * space but no more input data, until it returns with Z_STREAM_END.
+ */
+ while (ret != Z_STREAM_END) {
+ ret = zlib_deflate(&workspace->strm, Z_FINISH);
+ if (ret == Z_STREAM_END)
+ break;
+ if (ret != Z_OK && ret != Z_BUF_ERROR) {
+ zlib_deflateEnd(&workspace->strm);
+ ret = -EIO;
+ goto out;
+ } else if (workspace->strm.avail_out == 0) {
+ /* get another page for the stream end */
+ kunmap(out_page);
+ if (nr_pages == nr_dest_pages) {
+ out_page = NULL;
+ ret = -E2BIG;
+ goto out;
+ }
+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (out_page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cpage_out = kmap(out_page);
+ pages[nr_pages] = out_page;
+ nr_pages++;
+ workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.next_out = cpage_out;
+ }
}
+ zlib_deflateEnd(&workspace->strm);
if (workspace->strm.total_out >= workspace->strm.total_in) {
ret = -E2BIG;
@@ -231,7 +296,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
workspace->strm.total_out = 0;
workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.avail_out = workspace->buf_size;
/* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */
@@ -270,7 +335,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
}
workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.avail_out = workspace->buf_size;
if (workspace->strm.avail_in == 0) {
unsigned long tmp;
@@ -320,7 +385,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
workspace->strm.total_in = 0;
workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.avail_out = workspace->buf_size;
workspace->strm.total_out = 0;
/* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */
@@ -364,7 +429,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
buf_offset = 0;
bytes = min(PAGE_SIZE - pg_offset,
- PAGE_SIZE - buf_offset);
+ PAGE_SIZE - (buf_offset % PAGE_SIZE));
bytes = min(bytes, bytes_left);
kaddr = kmap_atomic(dest_page);
@@ -375,7 +440,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
bytes_left -= bytes;
next:
workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = PAGE_SIZE;
+ workspace->strm.avail_out = workspace->buf_size;
}
if (ret != Z_STREAM_END && bytes_left != 0)
diff --git a/fs/buffer.c b/fs/buffer.c
index 18a87ec8a465..b8d28370cfd7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
void invalidate_bh_lrus(void)
{
- on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
+ on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 44a3ce1e4ce4..1dc97f2d6201 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -396,7 +396,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
struct cachefiles_object *object;
struct cachefiles_cache *cache;
struct inode *inode;
- sector_t block0, block;
+ sector_t block;
unsigned shift;
int ret;
@@ -412,7 +412,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
inode = d_backing_inode(object->backer);
ASSERT(S_ISREG(inode->i_mode));
- ASSERT(inode->i_mapping->a_ops->bmap);
ASSERT(inode->i_mapping->a_ops->readpages);
/* calculate the shift required to use bmap */
@@ -428,12 +427,14 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
* enough for this as it doesn't indicate errors, but it's all we've
* got for the moment
*/
- block0 = page->index;
- block0 <<= shift;
+ block = page->index;
+ block <<= shift;
+
+ ret = bmap(inode, &block);
+ ASSERT(ret < 0);
- block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
_debug("%llx -> %llx",
- (unsigned long long) block0,
+ (unsigned long long) (page->index << shift),
(unsigned long long) block);
if (block) {
@@ -711,7 +712,6 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
inode = d_backing_inode(object->backer);
ASSERT(S_ISREG(inode->i_mode));
- ASSERT(inode->i_mapping->a_ops->bmap);
ASSERT(inode->i_mapping->a_ops->readpages);
/* calculate the shift required to use bmap */
@@ -728,7 +728,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
ret = space ? -ENODATA : -ENOBUFS;
list_for_each_entry_safe(page, _n, pages, lru) {
- sector_t block0, block;
+ sector_t block;
/* we assume the absence or presence of the first block is a
* good enough indication for the page as a whole
@@ -736,13 +736,14 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
* good enough for this as it doesn't indicate errors, but
* it's all we've got for the moment
*/
- block0 = page->index;
- block0 <<= shift;
+ block = page->index;
+ block <<= shift;
+
+ ret = bmap(inode, &block);
+ ASSERT(!ret);
- block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
- block0);
_debug("%llx -> %llx",
- (unsigned long long) block0,
+ (unsigned long long) (page->index << shift),
(unsigned long long) block);
if (block) {
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index c1da294418d1..0a0823d378db 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CEPH_FS) += ceph.o
ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
export.o caps.o snap.o xattr.o quota.o io.o \
mds_client.o mdsmap.o strings.o ceph_frag.o \
- debugfs.o
+ debugfs.o util.o
ceph-$(CONFIG_CEPH_FSCACHE) += cache.o
ceph-$(CONFIG_CEPH_FS_POSIX_ACL) += acl.o
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index aa55f412a6e3..26be6520d3fb 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -222,8 +222,8 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
err = ceph_pagelist_reserve(pagelist, len + val_size2 + 8);
if (err)
goto out_err;
- err = ceph_pagelist_encode_string(pagelist,
- XATTR_NAME_POSIX_ACL_DEFAULT, len);
+ ceph_pagelist_encode_string(pagelist,
+ XATTR_NAME_POSIX_ACL_DEFAULT, len);
err = posix_acl_to_xattr(&init_user_ns, default_acl,
tmp_buf, val_size2);
if (err < 0)
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 73f24f307a4a..270b769607a2 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -67,7 +67,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
continue;
- errorf(fc, "ceph: fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
+ errorfc(fc, "fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
fsid);
err = -EBUSY;
goto out_unlock;
@@ -96,7 +96,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
list_add_tail(&ent->list, &ceph_fscache_list);
} else {
kfree(ent);
- errorf(fc, "ceph: unable to register fscache cookie for fsid %pU",
+ errorfc(fc, "unable to register fscache cookie for fsid %pU",
fsid);
/* all other fs ignore this error */
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 9d09bb53c1ab..28ae0c134700 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -908,7 +908,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
ci_node);
if (!__cap_is_valid(cap))
continue;
- __touch_cap(cap);
+ if (cap->issued & mask)
+ __touch_cap(cap);
}
}
return 1;
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index c281f32b54f7..fb7cabd98e7b 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -33,7 +33,7 @@ static int mdsmap_show(struct seq_file *s, void *p)
seq_printf(s, "max_mds %d\n", mdsmap->m_max_mds);
seq_printf(s, "session_timeout %d\n", mdsmap->m_session_timeout);
seq_printf(s, "session_autoclose %d\n", mdsmap->m_session_autoclose);
- for (i = 0; i < mdsmap->m_num_mds; i++) {
+ for (i = 0; i < mdsmap->possible_max_rank; i++) {
struct ceph_entity_addr *addr = &mdsmap->m_info[i].addr;
int state = mdsmap->m_info[i].state;
seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 2e4764fd1872..d0cd0aba5843 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1186,7 +1186,7 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
struct dentry *dn = di->dentry;
struct ceph_mds_client *mdsc;
- dout("dentry_dir_lease_touch %p %p '%pd' (offset %lld)\n",
+ dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n",
di, dn, dn, di->offset);
if (!list_empty(&di->lease_list)) {
@@ -1567,7 +1567,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
inode = d_inode(dentry);
}
- dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
+ dout("d_revalidate %p '%pd' inode %p offset 0x%llx\n", dentry,
dentry, inode, ceph_dentry(dentry)->offset);
/* always trust cached snapped dentries, snapdir dentry */
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 11929d2bb594..7e0190b1f821 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1418,6 +1418,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_cap_flush *prealloc_cf;
ssize_t count, written = 0;
int err, want, got;
+ bool direct_lock = false;
loff_t pos;
loff_t limit = max(i_size_read(inode), fsc->max_file_size);
@@ -1428,8 +1429,11 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!prealloc_cf)
return -ENOMEM;
+ if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
+ direct_lock = true;
+
retry_snap:
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (direct_lock)
ceph_start_io_direct(inode);
else
ceph_start_io_write(inode);
@@ -1519,14 +1523,15 @@ retry_snap:
/* we might need to revert back to that point */
data = *from;
- if (iocb->ki_flags & IOCB_DIRECT) {
+ if (iocb->ki_flags & IOCB_DIRECT)
written = ceph_direct_read_write(iocb, &data, snapc,
&prealloc_cf);
- ceph_end_io_direct(inode);
- } else {
+ else
written = ceph_sync_write(iocb, &data, pos, snapc);
+ if (direct_lock)
+ ceph_end_io_direct(inode);
+ else
ceph_end_io_write(inode);
- }
if (written > 0)
iov_iter_advance(from, written);
ceph_put_snap_context(snapc);
@@ -1577,7 +1582,7 @@ retry_snap:
goto out_unlocked;
out:
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (direct_lock)
ceph_end_io_direct(inode);
else
ceph_end_io_write(inode);
@@ -1974,6 +1979,9 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
return -EOPNOTSUPP;
+ if (!src_fsc->have_copy_from2)
+ return -EOPNOTSUPP;
+
/*
* Striped file layouts require that we copy partial objects, but the
* OSD copy-from operation only supports full-object copies. Limit
@@ -2101,8 +2109,14 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
&dst_oid, &dst_oloc,
CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
- CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
+ dst_ci->i_truncate_seq, dst_ci->i_truncate_size,
+ CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
if (err) {
+ if (err == -EOPNOTSUPP) {
+ src_fsc->have_copy_from2 = false;
+ pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
+ }
dout("ceph_osdc_copy_from returned %d\n", err);
if (!ret)
ret = err;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index c07407586ce8..d01710a16a4a 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -55,11 +55,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
+ if (inode->i_state & I_NEW)
dout("get_inode created new inode %p %llx.%llx ino %llx\n",
inode, ceph_vinop(inode), (u64)inode->i_ino);
- unlock_new_inode(inode);
- }
dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
vino.snap, inode);
@@ -88,6 +86,10 @@ struct inode *ceph_get_snapdir(struct inode *parent)
inode->i_fop = &ceph_snapdir_fops;
ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
ci->i_rbytes = 0;
+
+ if (inode->i_state & I_NEW)
+ unlock_new_inode(inode);
+
return inode;
}
@@ -728,8 +730,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
static int fill_inode(struct inode *inode, struct page *locked_page,
struct ceph_mds_reply_info_in *iinfo,
struct ceph_mds_reply_dirfrag *dirinfo,
- struct ceph_mds_session *session,
- unsigned long ttl_from, int cap_fmode,
+ struct ceph_mds_session *session, int cap_fmode,
struct ceph_cap_reservation *caps_reservation)
{
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
@@ -754,8 +755,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
info_caps = le32_to_cpu(info->cap.caps);
/* prealloc new cap struct */
- if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
+ if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
new_cap = ceph_get_cap(mdsc, caps_reservation);
+ if (!new_cap)
+ return -ENOMEM;
+ }
/*
* prealloc xattr data, if it looks like we'll need it. only
@@ -1237,7 +1241,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
if (dir) {
err = fill_inode(dir, NULL,
&rinfo->diri, rinfo->dirfrag,
- session, req->r_request_started, -1,
+ session, -1,
&req->r_caps_reservation);
if (err < 0)
goto done;
@@ -1302,18 +1306,22 @@ retry_lookup:
err = PTR_ERR(in);
goto done;
}
- req->r_target_inode = in;
err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
- session, req->r_request_started,
+ session,
(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
- rinfo->head->result == 0) ? req->r_fmode : -1,
+ rinfo->head->result == 0) ? req->r_fmode : -1,
&req->r_caps_reservation);
if (err < 0) {
pr_err("fill_inode badness %p %llx.%llx\n",
in, ceph_vinop(in));
+ if (in->i_state & I_NEW)
+ discard_new_inode(in);
goto done;
}
+ req->r_target_inode = in;
+ if (in->i_state & I_NEW)
+ unlock_new_inode(in);
}
/*
@@ -1493,12 +1501,18 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
continue;
}
rc = fill_inode(in, NULL, &rde->inode, NULL, session,
- req->r_request_started, -1,
- &req->r_caps_reservation);
+ -1, &req->r_caps_reservation);
if (rc < 0) {
pr_err("fill_inode badness on %p got %d\n", in, rc);
err = rc;
+ if (in->i_state & I_NEW) {
+ ihold(in);
+ discard_new_inode(in);
+ }
+ } else if (in->i_state & I_NEW) {
+ unlock_new_inode(in);
}
+
/* avoid calling iput_final() in mds dispatch threads */
ceph_async_iput(in);
}
@@ -1694,19 +1708,24 @@ retry_lookup:
}
ret = fill_inode(in, NULL, &rde->inode, NULL, session,
- req->r_request_started, -1,
- &req->r_caps_reservation);
+ -1, &req->r_caps_reservation);
if (ret < 0) {
pr_err("fill_inode badness on %p\n", in);
if (d_really_is_negative(dn)) {
/* avoid calling iput_final() in mds
* dispatch threads */
+ if (in->i_state & I_NEW) {
+ ihold(in);
+ discard_new_inode(in);
+ }
ceph_async_iput(in);
}
d_drop(dn);
err = ret;
goto next_item;
}
+ if (in->i_state & I_NEW)
+ unlock_new_inode(in);
if (d_really_is_negative(dn)) {
if (ceph_security_xattr_deadlock(in)) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 374db1bd57d1..bbbbddf71326 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -9,6 +9,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <linux/bits.h>
#include "super.h"
#include "mds_client.h"
@@ -530,6 +531,7 @@ const char *ceph_session_state_name(int s)
case CEPH_MDS_SESSION_OPEN: return "open";
case CEPH_MDS_SESSION_HUNG: return "hung";
case CEPH_MDS_SESSION_CLOSING: return "closing";
+ case CEPH_MDS_SESSION_CLOSED: return "closed";
case CEPH_MDS_SESSION_RESTARTING: return "restarting";
case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
case CEPH_MDS_SESSION_REJECTED: return "rejected";
@@ -537,7 +539,7 @@ const char *ceph_session_state_name(int s)
}
}
-static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
+struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
{
if (refcount_inc_not_zero(&s->s_ref)) {
dout("mdsc get_session %p %d -> %d\n", s,
@@ -568,7 +570,7 @@ struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
{
if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
return NULL;
- return get_session(mdsc->sessions[mds]);
+ return ceph_get_mds_session(mdsc->sessions[mds]);
}
static bool __have_session(struct ceph_mds_client *mdsc, int mds)
@@ -597,7 +599,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
{
struct ceph_mds_session *s;
- if (mds >= mdsc->mdsmap->m_num_mds)
+ if (mds >= mdsc->mdsmap->possible_max_rank)
return ERR_PTR(-EINVAL);
s = kzalloc(sizeof(*s), GFP_NOFS);
@@ -674,7 +676,6 @@ static void __unregister_session(struct ceph_mds_client *mdsc,
dout("__unregister_session mds%d %p\n", s->s_mds, s);
BUG_ON(mdsc->sessions[s->s_mds] != s);
mdsc->sessions[s->s_mds] = NULL;
- s->s_state = 0;
ceph_con_close(&s->s_con);
ceph_put_mds_session(s);
atomic_dec(&mdsc->num_sessions);
@@ -708,8 +709,10 @@ void ceph_mdsc_release_request(struct kref *kref)
/* avoid calling iput_final() in mds dispatch threads */
ceph_async_iput(req->r_inode);
}
- if (req->r_parent)
+ if (req->r_parent) {
ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+ ceph_async_iput(req->r_parent);
+ }
ceph_async_iput(req->r_target_inode);
if (req->r_dentry)
dput(req->r_dentry);
@@ -876,7 +879,8 @@ static struct inode *get_nonsnap_parent(struct dentry *dentry)
* Called under mdsc->mutex.
*/
static int __choose_mds(struct ceph_mds_client *mdsc,
- struct ceph_mds_request *req)
+ struct ceph_mds_request *req,
+ bool *random)
{
struct inode *inode;
struct ceph_inode_info *ci;
@@ -886,6 +890,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
u32 hash = req->r_direct_hash;
bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
+ if (random)
+ *random = false;
+
/*
* is there a specific mds we should try? ignore hint if we have
* no session and the mds is not up (active or recovering).
@@ -893,7 +900,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
if (req->r_resend_mds >= 0 &&
(__have_session(mdsc, req->r_resend_mds) ||
ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
- dout("choose_mds using resend_mds mds%d\n",
+ dout("%s using resend_mds mds%d\n", __func__,
req->r_resend_mds);
return req->r_resend_mds;
}
@@ -911,7 +918,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
rcu_read_lock();
inode = get_nonsnap_parent(req->r_dentry);
rcu_read_unlock();
- dout("__choose_mds using snapdir's parent %p\n", inode);
+ dout("%s using snapdir's parent %p\n", __func__, inode);
}
} else if (req->r_dentry) {
/* ignore race with rename; old or new d_parent is okay */
@@ -931,7 +938,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
/* direct snapped/virtual snapdir requests
* based on parent dir inode */
inode = get_nonsnap_parent(parent);
- dout("__choose_mds using nonsnap parent %p\n", inode);
+ dout("%s using nonsnap parent %p\n", __func__, inode);
} else {
/* dentry target */
inode = d_inode(req->r_dentry);
@@ -947,8 +954,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
rcu_read_unlock();
}
- dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
- (int)hash, mode);
+ dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
+ hash, mode);
if (!inode)
goto random;
ci = ceph_inode(inode);
@@ -966,30 +973,33 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
get_random_bytes(&r, 1);
r %= frag.ndist;
mds = frag.dist[r];
- dout("choose_mds %p %llx.%llx "
- "frag %u mds%d (%d/%d)\n",
- inode, ceph_vinop(inode),
- frag.frag, mds,
- (int)r, frag.ndist);
+ dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
+ __func__, inode, ceph_vinop(inode),
+ frag.frag, mds, (int)r, frag.ndist);
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
- CEPH_MDS_STATE_ACTIVE)
+ CEPH_MDS_STATE_ACTIVE &&
+ !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
goto out;
}
/* since this file/dir wasn't known to be
* replicated, then we want to look for the
* authoritative mds. */
- mode = USE_AUTH_MDS;
if (frag.mds >= 0) {
/* choose auth mds */
mds = frag.mds;
- dout("choose_mds %p %llx.%llx "
- "frag %u mds%d (auth)\n",
- inode, ceph_vinop(inode), frag.frag, mds);
+ dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
+ __func__, inode, ceph_vinop(inode),
+ frag.frag, mds);
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
- CEPH_MDS_STATE_ACTIVE)
- goto out;
+ CEPH_MDS_STATE_ACTIVE) {
+ if (mode == USE_ANY_MDS &&
+ !ceph_mdsmap_is_laggy(mdsc->mdsmap,
+ mds))
+ goto out;
+ }
}
+ mode = USE_AUTH_MDS;
}
}
@@ -1005,7 +1015,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
goto random;
}
mds = cap->session->s_mds;
- dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
+ dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
inode, ceph_vinop(inode), mds,
cap == ci->i_auth_cap ? "auth " : "", cap);
spin_unlock(&ci->i_ceph_lock);
@@ -1016,8 +1026,11 @@ out:
return mds;
random:
+ if (random)
+ *random = true;
+
mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
- dout("choose_mds chose random mds%d\n", mds);
+ dout("%s chose random mds%d\n", __func__, mds);
return mds;
}
@@ -1043,20 +1056,21 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
return msg;
}
+static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
+#define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
static void encode_supported_features(void **p, void *end)
{
- static const unsigned char bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
- static const size_t count = ARRAY_SIZE(bits);
+ static const size_t count = ARRAY_SIZE(feature_bits);
if (count > 0) {
size_t i;
- size_t size = ((size_t)bits[count - 1] + 64) / 64 * 8;
+ size_t size = FEATURE_BYTES(count);
BUG_ON(*p + 4 + size > end);
ceph_encode_32(p, size);
memset(*p, 0, size);
for (i = 0; i < count; i++)
- ((unsigned char*)(*p))[i / 8] |= 1 << (bits[i] % 8);
+ ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
*p += size;
} else {
BUG_ON(*p + 4 > end);
@@ -1077,6 +1091,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
int metadata_key_count = 0;
struct ceph_options *opt = mdsc->fsc->client->options;
struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
+ size_t size, count;
void *p, *end;
const char* metadata[][2] = {
@@ -1094,8 +1109,13 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
strlen(metadata[i][1]);
metadata_key_count++;
}
+
/* supported feature */
- extra_bytes += 4 + 8;
+ size = 0;
+ count = ARRAY_SIZE(feature_bits);
+ if (count > 0)
+ size = FEATURE_BYTES(count);
+ extra_bytes += 4 + size;
/* Allocate the message */
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
@@ -1115,7 +1135,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
* Serialize client metadata into waiting buffer space, using
* the format that userspace expects for map<string, string>
*
- * ClientSession messages with metadata are v2
+ * ClientSession messages with metadata are v3
*/
msg->hdr.version = cpu_to_le16(3);
msg->hdr.compat_version = cpu_to_le16(1);
@@ -1217,7 +1237,7 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
struct ceph_mds_session *ts;
int i, mds = session->s_mds;
- if (mds >= mdsc->mdsmap->m_num_mds)
+ if (mds >= mdsc->mdsmap->possible_max_rank)
return;
mi = &mdsc->mdsmap->m_info[mds];
@@ -1965,7 +1985,7 @@ void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
if (mdsc->stopping)
return;
- get_session(session);
+ ceph_get_mds_session(session);
if (queue_work(mdsc->fsc->cap_wq,
&session->s_cap_release_work)) {
dout("cap release work queued\n");
@@ -2070,7 +2090,6 @@ struct ceph_mds_request *
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
{
struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
- struct timespec64 ts;
if (!req)
return ERR_PTR(-ENOMEM);
@@ -2089,8 +2108,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
init_completion(&req->r_safe_completion);
INIT_LIST_HEAD(&req->r_unsafe_item);
- ktime_get_coarse_real_ts64(&ts);
- req->r_stamp = timespec64_trunc(ts, mdsc->fsc->sb->s_time_gran);
+ ktime_get_coarse_real_ts64(&req->r_stamp);
req->r_op = op;
req->r_direct_mode = mode;
@@ -2516,6 +2534,26 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
}
/*
+ * called under mdsc->mutex
+ */
+static int __send_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_mds_request *req,
+ bool drop_cap_releases)
+{
+ int err;
+
+ err = __prepare_send_request(mdsc, req, session->s_mds,
+ drop_cap_releases);
+ if (!err) {
+ ceph_msg_get(req->r_request);
+ ceph_con_send(&session->s_con, req->r_request);
+ }
+
+ return err;
+}
+
+/*
* send request, or put it on the appropriate wait list.
*/
static void __do_request(struct ceph_mds_client *mdsc,
@@ -2524,6 +2562,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session = NULL;
int mds = -1;
int err = 0;
+ bool random;
if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
@@ -2556,15 +2595,14 @@ static void __do_request(struct ceph_mds_client *mdsc,
if (!(mdsc->fsc->mount_options->flags &
CEPH_MOUNT_OPT_MOUNTWAIT) &&
!ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
- err = -ENOENT;
- pr_info("probably no mds server is up\n");
+ err = -EHOSTUNREACH;
goto finish;
}
}
put_request_session(req);
- mds = __choose_mds(mdsc, req);
+ mds = __choose_mds(mdsc, req, &random);
if (mds < 0 ||
ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
dout("do_request no mds or not active, waiting for map\n");
@@ -2581,7 +2619,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
goto finish;
}
}
- req->r_session = get_session(session);
+ req->r_session = ceph_get_mds_session(session);
dout("do_request mds%d session %p state %s\n", mds, session,
ceph_session_state_name(session->s_state));
@@ -2592,8 +2630,12 @@ static void __do_request(struct ceph_mds_client *mdsc,
goto out_session;
}
if (session->s_state == CEPH_MDS_SESSION_NEW ||
- session->s_state == CEPH_MDS_SESSION_CLOSING)
+ session->s_state == CEPH_MDS_SESSION_CLOSING) {
__open_session(mdsc, session);
+ /* retry the same mds later */
+ if (random)
+ req->r_resend_mds = mds;
+ }
list_add(&req->r_wait, &session->s_waiting);
goto out_session;
}
@@ -2604,11 +2646,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
if (req->r_request_started == 0) /* note request start time */
req->r_request_started = jiffies;
- err = __prepare_send_request(mdsc, req, mds, false);
- if (!err) {
- ceph_msg_get(req->r_request);
- ceph_con_send(&session->s_con, req->r_request);
- }
+ err = __send_request(mdsc, session, req, false);
out_session:
ceph_put_mds_session(session);
@@ -2676,8 +2714,10 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
if (req->r_inode)
ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
- if (req->r_parent)
+ if (req->r_parent) {
ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+ ihold(req->r_parent);
+ }
if (req->r_old_dentry_dir)
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
@@ -2859,7 +2899,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
mutex_unlock(&mdsc->mutex);
goto out;
} else {
- int mds = __choose_mds(mdsc, req);
+ int mds = __choose_mds(mdsc, req, NULL);
if (mds >= 0 && mds != req->r_session->s_mds) {
dout("but auth changed, so resending\n");
__do_request(mdsc, req);
@@ -2875,6 +2915,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
__unregister_request(mdsc, req);
+ /* last request during umount? */
+ if (mdsc->stopping && !__get_oldest_req(mdsc))
+ complete_all(&mdsc->safe_umount_waiters);
+
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
/*
* We already handled the unsafe response, now do the
@@ -2885,9 +2929,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
*/
dout("got safe reply %llu, mds%d\n", tid, mds);
- /* last unsafe request during umount? */
- if (mdsc->stopping && !__get_oldest_req(mdsc))
- complete_all(&mdsc->safe_umount_waiters);
mutex_unlock(&mdsc->mutex);
goto out;
}
@@ -3102,7 +3143,7 @@ static void handle_session(struct ceph_mds_session *session,
mutex_lock(&mdsc->mutex);
if (op == CEPH_SESSION_CLOSE) {
- get_session(session);
+ ceph_get_mds_session(session);
__unregister_session(mdsc, session);
}
/* FIXME: this ttl calculation is generous */
@@ -3140,6 +3181,7 @@ static void handle_session(struct ceph_mds_session *session,
case CEPH_SESSION_CLOSE:
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
pr_info("mds%d reconnect denied\n", session->s_mds);
+ session->s_state = CEPH_MDS_SESSION_CLOSED;
cleanup_session_requests(mdsc, session);
remove_session_caps(session);
wake = 2; /* for good measure */
@@ -3207,7 +3249,6 @@ bad:
return;
}
-
/*
* called under session->mutex.
*/
@@ -3216,18 +3257,12 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
{
struct ceph_mds_request *req, *nreq;
struct rb_node *p;
- int err;
dout("replay_unsafe_requests mds%d\n", session->s_mds);
mutex_lock(&mdsc->mutex);
- list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
- err = __prepare_send_request(mdsc, req, session->s_mds, true);
- if (!err) {
- ceph_msg_get(req->r_request);
- ceph_con_send(&session->s_con, req->r_request);
- }
- }
+ list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
+ __send_request(mdsc, session, req, true);
/*
* also re-send old requests when MDS enters reconnect stage. So that MDS
@@ -3242,14 +3277,8 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
if (req->r_attempts == 0)
continue; /* only old requests */
if (req->r_session &&
- req->r_session->s_mds == session->s_mds) {
- err = __prepare_send_request(mdsc, req,
- session->s_mds, true);
- if (!err) {
- ceph_msg_get(req->r_request);
- ceph_con_send(&session->s_con, req->r_request);
- }
- }
+ req->r_session->s_mds == session->s_mds)
+ __send_request(mdsc, session, req, true);
}
mutex_unlock(&mdsc->mutex);
}
@@ -3760,7 +3789,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
dout("check_new_map new %u old %u\n",
newmap->m_epoch, oldmap->m_epoch);
- for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) {
+ for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
if (!mdsc->sessions[i])
continue;
s = mdsc->sessions[i];
@@ -3774,9 +3803,9 @@ static void check_new_map(struct ceph_mds_client *mdsc,
ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
ceph_session_state_name(s->s_state));
- if (i >= newmap->m_num_mds) {
+ if (i >= newmap->possible_max_rank) {
/* force close session for stopped mds */
- get_session(s);
+ ceph_get_mds_session(s);
__unregister_session(mdsc, s);
__wake_requests(mdsc, &s->s_waiting);
mutex_unlock(&mdsc->mutex);
@@ -3831,7 +3860,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
}
}
- for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) {
+ for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
s = mdsc->sessions[i];
if (!s)
continue;
@@ -4377,7 +4406,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
mutex_lock(&mdsc->mutex);
for (i = 0; i < mdsc->max_sessions; i++) {
if (mdsc->sessions[i]) {
- session = get_session(mdsc->sessions[i]);
+ session = ceph_get_mds_session(mdsc->sessions[i]);
__unregister_session(mdsc, session);
mutex_unlock(&mdsc->mutex);
mutex_lock(&session->s_mutex);
@@ -4605,11 +4634,8 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
- if (get_session(s)) {
- dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref));
+ if (ceph_get_mds_session(s))
return con;
- }
- dout("mdsc con_get %p FAIL\n", s);
return NULL;
}
@@ -4617,7 +4643,6 @@ static void con_put(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
- dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1);
ceph_put_mds_session(s);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 14c7e8c49970..27a7446e10d3 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -17,22 +17,31 @@
#include <linux/ceph/auth.h>
/* The first 8 bits are reserved for old ceph releases */
-#define CEPHFS_FEATURE_MIMIC 8
-#define CEPHFS_FEATURE_REPLY_ENCODING 9
-#define CEPHFS_FEATURE_RECLAIM_CLIENT 10
-#define CEPHFS_FEATURE_LAZY_CAP_WANTED 11
-#define CEPHFS_FEATURE_MULTI_RECONNECT 12
+enum ceph_feature_type {
+ CEPHFS_FEATURE_MIMIC = 8,
+ CEPHFS_FEATURE_REPLY_ENCODING,
+ CEPHFS_FEATURE_RECLAIM_CLIENT,
+ CEPHFS_FEATURE_LAZY_CAP_WANTED,
+ CEPHFS_FEATURE_MULTI_RECONNECT,
+
+ CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_MULTI_RECONNECT,
+};
-#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
+/*
+ * This will always have the highest feature bit value
+ * as the last element of the array.
+ */
+#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
0, 1, 2, 3, 4, 5, 6, 7, \
CEPHFS_FEATURE_MIMIC, \
CEPHFS_FEATURE_REPLY_ENCODING, \
CEPHFS_FEATURE_LAZY_CAP_WANTED, \
CEPHFS_FEATURE_MULTI_RECONNECT, \
+ \
+ CEPHFS_FEATURE_MAX, \
}
#define CEPHFS_FEATURES_CLIENT_REQUIRED {}
-
/*
* Some lock dependencies:
*
@@ -151,7 +160,8 @@ enum {
CEPH_MDS_SESSION_RESTARTING = 5,
CEPH_MDS_SESSION_RECONNECTING = 6,
CEPH_MDS_SESSION_CLOSING = 7,
- CEPH_MDS_SESSION_REJECTED = 8,
+ CEPH_MDS_SESSION_CLOSED = 8,
+ CEPH_MDS_SESSION_REJECTED = 9,
};
struct ceph_mds_session {
@@ -174,6 +184,7 @@ struct ceph_mds_session {
/* protected by s_cap_lock */
spinlock_t s_cap_lock;
+ refcount_t s_ref;
struct list_head s_caps; /* all caps issued by this session */
struct ceph_cap *s_cap_iterator;
int s_nr_caps;
@@ -188,7 +199,6 @@ struct ceph_mds_session {
unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq;
- refcount_t s_ref;
struct list_head s_waiting; /* waiting requests */
struct list_head s_unsafe; /* unsafe requests */
};
@@ -224,6 +234,7 @@ struct ceph_mds_request {
struct rb_node r_node;
struct ceph_mds_client *r_mdsc;
+ struct kref r_kref;
int r_op; /* mds op code */
/* operation on what? */
@@ -294,7 +305,6 @@ struct ceph_mds_request {
int r_resend_mds; /* mds to resend to next, if any*/
u32 r_sent_on_mseq; /* cap mseq request was sent at*/
- struct kref r_kref;
struct list_head r_wait;
struct completion r_completion;
struct completion r_safe_completion;
@@ -451,15 +461,10 @@ extern const char *ceph_mds_op_name(int op);
extern struct ceph_mds_session *
__ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
-static inline struct ceph_mds_session *
-ceph_get_mds_session(struct ceph_mds_session *s)
-{
- refcount_inc(&s->s_ref);
- return s;
-}
-
extern const char *ceph_session_state_name(int s);
+extern struct ceph_mds_session *
+ceph_get_mds_session(struct ceph_mds_session *s);
extern void ceph_put_mds_session(struct ceph_mds_session *s);
extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 471bac335fae..889627817e52 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -13,30 +13,25 @@
#include "super.h"
+#define CEPH_MDS_IS_READY(i, ignore_laggy) \
+ (m->m_info[i].state > 0 && ignore_laggy ? true : !m->m_info[i].laggy)
-/*
- * choose a random mds that is "up" (i.e. has a state > 0), or -1.
- */
-int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
+static int __mdsmap_get_random_mds(struct ceph_mdsmap *m, bool ignore_laggy)
{
int n = 0;
int i, j;
- /* special case for one mds */
- if (1 == m->m_num_mds && m->m_info[0].state > 0)
- return 0;
-
/* count */
- for (i = 0; i < m->m_num_mds; i++)
- if (m->m_info[i].state > 0)
+ for (i = 0; i < m->possible_max_rank; i++)
+ if (CEPH_MDS_IS_READY(i, ignore_laggy))
n++;
if (n == 0)
return -1;
/* pick */
n = prandom_u32() % n;
- for (j = 0, i = 0; i < m->m_num_mds; i++) {
- if (m->m_info[i].state > 0)
+ for (j = 0, i = 0; i < m->possible_max_rank; i++) {
+ if (CEPH_MDS_IS_READY(i, ignore_laggy))
j++;
if (j > n)
break;
@@ -45,6 +40,20 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
return i;
}
+/*
+ * choose a random mds that is "up" (i.e. has a state > 0), or -1.
+ */
+int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
+{
+ int mds;
+
+ mds = __mdsmap_get_random_mds(m, false);
+ if (mds == m->possible_max_rank || mds == -1)
+ mds = __mdsmap_get_random_mds(m, true);
+
+ return mds == m->possible_max_rank ? -1 : mds;
+}
+
#define __decode_and_drop_type(p, end, type, bad) \
do { \
if (*p + sizeof(type) > end) \
@@ -138,14 +147,29 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
m->m_session_autoclose = ceph_decode_32(p);
m->m_max_file_size = ceph_decode_64(p);
m->m_max_mds = ceph_decode_32(p);
- m->m_num_mds = m->m_max_mds;
- m->m_info = kcalloc(m->m_num_mds, sizeof(*m->m_info), GFP_NOFS);
+ /*
+ * pick out the active nodes as the m_num_active_mds, the
+ * m_num_active_mds maybe larger than m_max_mds when decreasing
+ * the max_mds in cluster side, in other case it should less
+ * than or equal to m_max_mds.
+ */
+ m->m_num_active_mds = n = ceph_decode_32(p);
+
+ /*
+ * the possible max rank, it maybe larger than the m_num_active_mds,
+ * for example if the mds_max == 2 in the cluster, when the MDS(0)
+ * was laggy and being replaced by a new MDS, we will temporarily
+ * receive a new mds map with n_num_mds == 1 and the active MDS(1),
+ * and the mds rank >= m_num_active_mds.
+ */
+ m->possible_max_rank = max(m->m_num_active_mds, m->m_max_mds);
+
+ m->m_info = kcalloc(m->possible_max_rank, sizeof(*m->m_info), GFP_NOFS);
if (!m->m_info)
goto nomem;
/* pick out active nodes from mds_info (state > 0) */
- n = ceph_decode_32(p);
for (i = 0; i < n; i++) {
u64 global_id;
u32 namelen;
@@ -215,18 +239,15 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
ceph_mds_state_name(state),
laggy ? "(laggy)" : "");
- if (mds < 0 || state <= 0)
+ if (mds < 0 || mds >= m->possible_max_rank) {
+ pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds);
continue;
+ }
- if (mds >= m->m_num_mds) {
- int new_num = max(mds + 1, m->m_num_mds * 2);
- void *new_m_info = krealloc(m->m_info,
- new_num * sizeof(*m->m_info),
- GFP_NOFS | __GFP_ZERO);
- if (!new_m_info)
- goto nomem;
- m->m_info = new_m_info;
- m->m_num_mds = new_num;
+ if (state <= 0) {
+ pr_warn("mdsmap_decode got incorrect state(%s)\n",
+ ceph_mds_state_name(state));
+ continue;
}
info = &m->m_info[mds];
@@ -247,14 +268,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
info->export_targets = NULL;
}
}
- if (m->m_num_mds > m->m_max_mds) {
- /* find max up mds */
- for (i = m->m_num_mds; i >= m->m_max_mds; i--) {
- if (i == 0 || m->m_info[i-1].state > 0)
- break;
- }
- m->m_num_mds = i;
- }
/* pg_pools */
ceph_decode_32_safe(p, end, n, bad);
@@ -296,14 +309,14 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
for (i = 0; i < n; i++) {
s32 mds = ceph_decode_32(p);
- if (mds >= 0 && mds < m->m_num_mds) {
+ if (mds >= 0 && mds < m->possible_max_rank) {
if (m->m_info[mds].laggy)
num_laggy++;
}
}
m->m_num_laggy = num_laggy;
- if (n > m->m_num_mds) {
+ if (n > m->possible_max_rank) {
void *new_m_info = krealloc(m->m_info,
n * sizeof(*m->m_info),
GFP_NOFS | __GFP_ZERO);
@@ -311,7 +324,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
goto nomem;
m->m_info = new_m_info;
}
- m->m_num_mds = n;
+ m->possible_max_rank = n;
}
/* inc */
@@ -382,7 +395,7 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
{
int i;
- for (i = 0; i < m->m_num_mds; i++)
+ for (i = 0; i < m->possible_max_rank; i++)
kfree(m->m_info[i].export_targets);
kfree(m->m_info);
kfree(m->m_data_pg_pools);
@@ -396,9 +409,9 @@ bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m)
return false;
if (m->m_damaged)
return false;
- if (m->m_num_laggy > 0)
+ if (m->m_num_laggy == m->m_num_active_mds)
return false;
- for (i = 0; i < m->m_num_mds; i++) {
+ for (i = 0; i < m->possible_max_rank; i++) {
if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE)
nr_active++;
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 29a795f975df..c7f150686a53 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -107,7 +107,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-
static int ceph_sync_fs(struct super_block *sb, int wait)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
@@ -163,13 +162,13 @@ enum ceph_recover_session_mode {
ceph_recover_session_clean
};
-static const struct fs_parameter_enum ceph_mount_param_enums[] = {
- { Opt_recover_session, "no", ceph_recover_session_no },
- { Opt_recover_session, "clean", ceph_recover_session_clean },
+static const struct constant_table ceph_param_recover[] = {
+ { "no", ceph_recover_session_no },
+ { "clean", ceph_recover_session_clean },
{}
};
-static const struct fs_parameter_spec ceph_mount_param_specs[] = {
+static const struct fs_parameter_spec ceph_mount_parameters[] = {
fsparam_flag_no ("acl", Opt_acl),
fsparam_flag_no ("asyncreaddir", Opt_asyncreaddir),
fsparam_s32 ("caps_max", Opt_caps_max),
@@ -179,8 +178,8 @@ static const struct fs_parameter_spec ceph_mount_param_specs[] = {
fsparam_flag_no ("copyfrom", Opt_copyfrom),
fsparam_flag_no ("dcache", Opt_dcache),
fsparam_flag_no ("dirstat", Opt_dirstat),
- __fsparam (fs_param_is_string, "fsc", Opt_fscache,
- fs_param_neg_with_no | fs_param_v_optional),
+ fsparam_flag_no ("fsc", Opt_fscache), // fsc|nofsc
+ fsparam_string ("fsc", Opt_fscache), // fsc=...
fsparam_flag_no ("ino32", Opt_ino32),
fsparam_string ("mds_namespace", Opt_mds_namespace),
fsparam_flag_no ("poolperm", Opt_poolperm),
@@ -189,7 +188,7 @@ static const struct fs_parameter_spec ceph_mount_param_specs[] = {
fsparam_flag_no ("rbytes", Opt_rbytes),
fsparam_u32 ("readdir_max_bytes", Opt_readdir_max_bytes),
fsparam_u32 ("readdir_max_entries", Opt_readdir_max_entries),
- fsparam_enum ("recover_session", Opt_recover_session),
+ fsparam_enum ("recover_session", Opt_recover_session, ceph_param_recover),
fsparam_flag_no ("require_active_mds", Opt_require_active_mds),
fsparam_u32 ("rsize", Opt_rsize),
fsparam_string ("snapdirname", Opt_snapdirname),
@@ -198,20 +197,33 @@ static const struct fs_parameter_spec ceph_mount_param_specs[] = {
{}
};
-static const struct fs_parameter_description ceph_mount_parameters = {
- .name = "ceph",
- .specs = ceph_mount_param_specs,
- .enums = ceph_mount_param_enums,
-};
-
struct ceph_parse_opts_ctx {
struct ceph_options *copts;
struct ceph_mount_options *opts;
};
/*
+ * Remove adjacent slashes and then the trailing slash, unless it is
+ * the only remaining character.
+ *
+ * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
+ */
+static void canonicalize_path(char *path)
+{
+ int i, j = 0;
+
+ for (i = 0; path[i] != '\0'; i++) {
+ if (path[i] != '/' || j < 1 || path[j - 1] != '/')
+ path[j++] = path[i];
+ }
+
+ if (j > 1 && path[j - 1] == '/')
+ j--;
+ path[j] = '\0';
+}
+
+/*
* Parse the source parameter. Distinguish the server list from the path.
- * Internally we do not include the leading '/' in the path.
*
* The source will look like:
* <server_spec>[,<server_spec>...]:[<path>]
@@ -228,30 +240,34 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
dout("%s '%s'\n", __func__, dev_name);
if (!dev_name || !*dev_name)
- return invalf(fc, "ceph: Empty source");
+ return invalfc(fc, "Empty source");
dev_name_end = strchr(dev_name, '/');
if (dev_name_end) {
- if (strlen(dev_name_end) > 1) {
- kfree(fsopt->server_path);
- fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
- if (!fsopt->server_path)
- return -ENOMEM;
- }
+ /*
+ * The server_path will include the whole chars from userland
+ * including the leading '/'.
+ */
+ kfree(fsopt->server_path);
+ fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+ if (!fsopt->server_path)
+ return -ENOMEM;
+
+ canonicalize_path(fsopt->server_path);
} else {
dev_name_end = dev_name + strlen(dev_name);
}
dev_name_end--; /* back up to ':' separator */
if (dev_name_end < dev_name || *dev_name_end != ':')
- return invalf(fc, "ceph: No path or : separator in source");
+ return invalfc(fc, "No path or : separator in source");
dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
if (fsopt->server_path)
dout("server path '%s'\n", fsopt->server_path);
ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
- pctx->copts, fc);
+ pctx->copts, fc->log.log);
if (ret)
return ret;
@@ -269,11 +285,11 @@ static int ceph_parse_mount_param(struct fs_context *fc,
unsigned int mode;
int token, ret;
- ret = ceph_parse_param(param, pctx->copts, fc);
+ ret = ceph_parse_param(param, pctx->copts, fc->log.log);
if (ret != -ENOPARAM)
return ret;
- token = fs_parse(fc, &ceph_mount_parameters, param, &result);
+ token = fs_parse(fc, ceph_mount_parameters, param, &result);
dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
if (token < 0)
return token;
@@ -300,7 +316,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
break;
case Opt_source:
if (fc->source)
- return invalf(fc, "ceph: Multiple sources specified");
+ return invalfc(fc, "Multiple sources specified");
return ceph_parse_source(param, fc);
case Opt_wsize:
if (result.uint_32 < PAGE_SIZE ||
@@ -391,7 +407,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
}
break;
#else
- return invalf(fc, "ceph: fscache support is disabled");
+ return invalfc(fc, "fscache support is disabled");
#endif
case Opt_poolperm:
if (!result.negated)
@@ -422,7 +438,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
#ifdef CONFIG_CEPH_FS_POSIX_ACL
fc->sb_flags |= SB_POSIXACL;
#else
- return invalf(fc, "ceph: POSIX ACL support is disabled");
+ return invalfc(fc, "POSIX ACL support is disabled");
#endif
} else {
fc->sb_flags &= ~SB_POSIXACL;
@@ -434,7 +450,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
return 0;
out_of_range:
- return invalf(fc, "ceph: %s out of range", param->key);
+ return invalfc(fc, "%s out of range", param->key);
}
static void destroy_mount_options(struct ceph_mount_options *args)
@@ -477,12 +493,15 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
if (ret)
return ret;
+
ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
if (ret)
return ret;
+
ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
if (ret)
return ret;
+
ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
if (ret)
return ret;
@@ -637,6 +656,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
fsc->sb = NULL;
fsc->mount_state = CEPH_MOUNT_MOUNTING;
fsc->filp_gen = 1;
+ fsc->have_copy_from2 = true;
atomic_long_set(&fsc->writeback_count, 0);
@@ -788,7 +808,6 @@ static void destroy_caches(void)
ceph_fscache_unregister();
}
-
/*
* ceph_umount_begin - initiate forced umount. Tear down down the
* mount, skipping steps that may hang while waiting for server(s).
@@ -868,9 +887,6 @@ out:
return root;
}
-
-
-
/*
* mount: join the ceph cluster, and open root directory.
*/
@@ -885,7 +901,9 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
mutex_lock(&fsc->client->mount_mutex);
if (!fsc->sb->s_root) {
- const char *path;
+ const char *path = fsc->mount_options->server_path ?
+ fsc->mount_options->server_path + 1 : "";
+
err = __ceph_open_session(fsc->client, started);
if (err < 0)
goto out;
@@ -897,13 +915,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
goto out;
}
- if (!fsc->mount_options->server_path) {
- path = "";
- dout("mount opening path \\t\n");
- } else {
- path = fsc->mount_options->server_path + 1;
- dout("mount opening path %s\n", path);
- }
+ dout("mount opening path '%s'\n", path);
ceph_fs_debugfs_init(fsc);
@@ -1018,11 +1030,7 @@ static int ceph_get_tree(struct fs_context *fc)
dout("ceph_get_tree\n");
if (!fc->source)
- return invalf(fc, "ceph: No source");
-
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
- fc->sb_flags |= SB_POSIXACL;
-#endif
+ return invalfc(fc, "No source");
/* create client (which we may/may not use) */
fsc = create_fs_client(pctx->opts, pctx->copts);
@@ -1070,6 +1078,11 @@ static int ceph_get_tree(struct fs_context *fc)
return 0;
out_splat:
+ if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
+ pr_info("No mds server is up or the cluster is laggy\n");
+ err = -EHOSTUNREACH;
+ }
+
ceph_mdsc_close_sessions(fsc->mdsc);
deactivate_locked_super(sb);
goto out_final;
@@ -1141,6 +1154,10 @@ static int ceph_init_fs_context(struct fs_context *fc)
fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
fsopt->congestion_kb = default_congestion_kb();
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ fc->sb_flags |= SB_POSIXACL;
+#endif
+
fc->fs_private = pctx;
fc->ops = &ceph_context_ops;
return 0;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3bf1a01cd536..037cdfb2ad4f 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -91,7 +91,7 @@ struct ceph_mount_options {
char *snapdir_name; /* default ".snap" */
char *mds_namespace; /* default NULL */
- char *server_path; /* default "/" */
+ char *server_path; /* default NULL (means "/") */
char *fscache_uniq; /* default NULL */
};
@@ -106,6 +106,8 @@ struct ceph_fs_client {
unsigned long last_auto_reconnect;
bool blacklisted;
+ bool have_copy_from2;
+
u32 filp_gen;
loff_t max_file_size;
diff --git a/fs/ceph/util.c b/fs/ceph/util.c
new file mode 100644
index 000000000000..2c34875675bf
--- /dev/null
+++ b/fs/ceph/util.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Some non-inline ceph helpers
+ */
+#include <linux/module.h>
+#include <linux/ceph/types.h>
+
+/*
+ * return true if @layout appears to be valid
+ */
+int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
+{
+ __u32 su = layout->stripe_unit;
+ __u32 sc = layout->stripe_count;
+ __u32 os = layout->object_size;
+
+ /* stripe unit, object size must be non-zero, 64k increment */
+ if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
+ return 0;
+ if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
+ return 0;
+ /* object size must be a multiple of stripe unit */
+ if (os < su || os % su)
+ return 0;
+ /* stripe count must be non-zero */
+ if (!sc)
+ return 0;
+ return 1;
+}
+
+void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
+ struct ceph_file_layout_legacy *legacy)
+{
+ fl->stripe_unit = le32_to_cpu(legacy->fl_stripe_unit);
+ fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
+ fl->object_size = le32_to_cpu(legacy->fl_object_size);
+ fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
+ if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
+ fl->stripe_count == 0 && fl->object_size == 0)
+ fl->pool_id = -1;
+}
+
+void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
+ struct ceph_file_layout_legacy *legacy)
+{
+ legacy->fl_stripe_unit = cpu_to_le32(fl->stripe_unit);
+ legacy->fl_stripe_count = cpu_to_le32(fl->stripe_count);
+ legacy->fl_object_size = cpu_to_le32(fl->object_size);
+ if (fl->pool_id >= 0)
+ legacy->fl_pg_pool = cpu_to_le32(fl->pool_id);
+ else
+ legacy->fl_pg_pool = 0;
+}
+
+int ceph_flags_to_mode(int flags)
+{
+ int mode;
+
+#ifdef O_DIRECTORY /* fixme */
+ if ((flags & O_DIRECTORY) == O_DIRECTORY)
+ return CEPH_FILE_MODE_PIN;
+#endif
+
+ switch (flags & O_ACCMODE) {
+ case O_WRONLY:
+ mode = CEPH_FILE_MODE_WR;
+ break;
+ case O_RDONLY:
+ mode = CEPH_FILE_MODE_RD;
+ break;
+ case O_RDWR:
+ case O_ACCMODE: /* this is what the VFS does */
+ mode = CEPH_FILE_MODE_RDWR;
+ break;
+ }
+#ifdef O_LAZY
+ if (flags & O_LAZY)
+ mode |= CEPH_FILE_MODE_LAZY;
+#endif
+
+ return mode;
+}
+
+int ceph_caps_for_mode(int mode)
+{
+ int caps = CEPH_CAP_PIN;
+
+ if (mode & CEPH_FILE_MODE_RD)
+ caps |= CEPH_CAP_FILE_SHARED |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
+ if (mode & CEPH_FILE_MODE_WR)
+ caps |= CEPH_CAP_FILE_EXCL |
+ CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
+ CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
+ if (mode & CEPH_FILE_MODE_LAZY)
+ caps |= CEPH_CAP_FILE_LAZYIO;
+
+ return caps;
+}
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index cb18ee637cb7..7b8a070a782d 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -655,7 +655,7 @@ static int __build_xattrs(struct inode *inode)
u32 len;
const char *name, *val;
struct ceph_inode_info *ci = ceph_inode(inode);
- int xattr_version;
+ u64 xattr_version;
struct ceph_inode_xattr **xattrs = NULL;
int err = 0;
int i;
@@ -851,7 +851,7 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
req_mask = __get_request_mask(inode);
spin_lock(&ci->i_ceph_lock);
- dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
+ dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
ci->i_xattrs.version, ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
@@ -1078,7 +1078,8 @@ retry:
}
}
- dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
+ dout("setxattr %p name '%s' issued %s\n", inode, name,
+ ceph_cap_string(issued));
__build_xattrs(inode);
required_blob_size = __get_required_blob_size(ci, name_len, val_len);
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 19f6e592b941..276e4b5ea8e0 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -611,12 +611,12 @@ static int cifs_stats_proc_open(struct inode *inode, struct file *file)
return single_open(file, cifs_stats_proc_show, NULL);
}
-static const struct file_operations cifs_stats_proc_fops = {
- .open = cifs_stats_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_stats_proc_write,
+static const struct proc_ops cifs_stats_proc_ops = {
+ .proc_open = cifs_stats_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = cifs_stats_proc_write,
};
#ifdef CONFIG_CIFS_SMB_DIRECT
@@ -640,12 +640,12 @@ static int name##_open(struct inode *inode, struct file *file) \
return single_open(file, name##_proc_show, NULL); \
} \
\
-static const struct file_operations cifs_##name##_proc_fops = { \
- .open = name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- .write = name##_write, \
+static const struct proc_ops cifs_##name##_proc_fops = { \
+ .proc_open = name##_open, \
+ .proc_read = seq_read, \
+ .proc_lseek = seq_lseek, \
+ .proc_release = single_release, \
+ .proc_write = name##_write, \
}
PROC_FILE_DEFINE(rdma_readwrite_threshold);
@@ -659,11 +659,11 @@ PROC_FILE_DEFINE(smbd_receive_credit_max);
#endif
static struct proc_dir_entry *proc_fs_cifs;
-static const struct file_operations cifsFYI_proc_fops;
-static const struct file_operations cifs_lookup_cache_proc_fops;
-static const struct file_operations traceSMB_proc_fops;
-static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_linux_ext_proc_fops;
+static const struct proc_ops cifsFYI_proc_ops;
+static const struct proc_ops cifs_lookup_cache_proc_ops;
+static const struct proc_ops traceSMB_proc_ops;
+static const struct proc_ops cifs_security_flags_proc_ops;
+static const struct proc_ops cifs_linux_ext_proc_ops;
void
cifs_proc_init(void)
@@ -678,18 +678,18 @@ cifs_proc_init(void)
proc_create_single("open_files", 0400, proc_fs_cifs,
cifs_debug_files_proc_show);
- proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_fops);
- proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_fops);
- proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_fops);
+ proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_ops);
+ proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_ops);
+ proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_ops);
proc_create("LinuxExtensionsEnabled", 0644, proc_fs_cifs,
- &cifs_linux_ext_proc_fops);
+ &cifs_linux_ext_proc_ops);
proc_create("SecurityFlags", 0644, proc_fs_cifs,
- &cifs_security_flags_proc_fops);
+ &cifs_security_flags_proc_ops);
proc_create("LookupCacheEnabled", 0644, proc_fs_cifs,
- &cifs_lookup_cache_proc_fops);
+ &cifs_lookup_cache_proc_ops);
#ifdef CONFIG_CIFS_DFS_UPCALL
- proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_fops);
+ proc_create("dfscache", 0644, proc_fs_cifs, &dfscache_proc_ops);
#endif
#ifdef CONFIG_CIFS_SMB_DIRECT
@@ -774,12 +774,12 @@ static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
return count;
}
-static const struct file_operations cifsFYI_proc_fops = {
- .open = cifsFYI_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifsFYI_proc_write,
+static const struct proc_ops cifsFYI_proc_ops = {
+ .proc_open = cifsFYI_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = cifsFYI_proc_write,
};
static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
@@ -805,12 +805,12 @@ static ssize_t cifs_linux_ext_proc_write(struct file *file,
return count;
}
-static const struct file_operations cifs_linux_ext_proc_fops = {
- .open = cifs_linux_ext_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_linux_ext_proc_write,
+static const struct proc_ops cifs_linux_ext_proc_ops = {
+ .proc_open = cifs_linux_ext_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = cifs_linux_ext_proc_write,
};
static int cifs_lookup_cache_proc_show(struct seq_file *m, void *v)
@@ -836,12 +836,12 @@ static ssize_t cifs_lookup_cache_proc_write(struct file *file,
return count;
}
-static const struct file_operations cifs_lookup_cache_proc_fops = {
- .open = cifs_lookup_cache_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_lookup_cache_proc_write,
+static const struct proc_ops cifs_lookup_cache_proc_ops = {
+ .proc_open = cifs_lookup_cache_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = cifs_lookup_cache_proc_write,
};
static int traceSMB_proc_show(struct seq_file *m, void *v)
@@ -867,12 +867,12 @@ static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
return count;
}
-static const struct file_operations traceSMB_proc_fops = {
- .open = traceSMB_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = traceSMB_proc_write,
+static const struct proc_ops traceSMB_proc_ops = {
+ .proc_open = traceSMB_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = traceSMB_proc_write,
};
static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
@@ -978,12 +978,12 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
return count;
}
-static const struct file_operations cifs_security_flags_proc_fops = {
- .open = cifs_security_flags_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_security_flags_proc_write,
+static const struct proc_ops cifs_security_flags_proc_ops = {
+ .proc_open = cifs_security_flags_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = cifs_security_flags_proc_write,
};
#else
inline void cifs_proc_init(void)
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 41957b82d796..606f26d862dc 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -120,17 +120,17 @@ cifs_build_devname(char *nodename, const char *prepath)
/**
- * cifs_compose_mount_options - creates mount options for refferral
+ * cifs_compose_mount_options - creates mount options for referral
* @sb_mountdata: parent/root DFS mount options (template)
* @fullpath: full path in UNC format
- * @ref: server's referral
+ * @ref: optional server's referral
* @devname: optional pointer for saving device name
*
* creates mount options for submount based on template options sb_mountdata
* and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
*
* Returns: pointer to new mount options or ERR_PTR.
- * Caller is responcible for freeing retunrned value if it is not error.
+ * Caller is responsible for freeing returned value if it is not error.
*/
char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath,
@@ -150,18 +150,27 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
if (sb_mountdata == NULL)
return ERR_PTR(-EINVAL);
- if (strlen(fullpath) - ref->path_consumed) {
- prepath = fullpath + ref->path_consumed;
- /* skip initial delimiter */
- if (*prepath == '/' || *prepath == '\\')
- prepath++;
- }
+ if (ref) {
+ if (strlen(fullpath) - ref->path_consumed) {
+ prepath = fullpath + ref->path_consumed;
+ /* skip initial delimiter */
+ if (*prepath == '/' || *prepath == '\\')
+ prepath++;
+ }
- name = cifs_build_devname(ref->node_name, prepath);
- if (IS_ERR(name)) {
- rc = PTR_ERR(name);
- name = NULL;
- goto compose_mount_options_err;
+ name = cifs_build_devname(ref->node_name, prepath);
+ if (IS_ERR(name)) {
+ rc = PTR_ERR(name);
+ name = NULL;
+ goto compose_mount_options_err;
+ }
+ } else {
+ name = cifs_build_devname((char *)fullpath, NULL);
+ if (IS_ERR(name)) {
+ rc = PTR_ERR(name);
+ name = NULL;
+ goto compose_mount_options_err;
+ }
}
rc = dns_resolve_server_name_to_ip(name, &srvIP);
@@ -225,6 +234,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
if (devname)
*devname = name;
+ else
+ kfree(name);
/*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
/*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
@@ -241,23 +252,23 @@ compose_mount_options_err:
}
/**
- * cifs_dfs_do_refmount - mounts specified path using provided refferal
+ * cifs_dfs_do_mount - mounts specified path using DFS full path
+ *
+ * Always pass down @fullpath to smb3_do_mount() so we can use the root server
+ * to perform failover in case we failed to connect to the first target in the
+ * referral.
+ *
* @cifs_sb: parent/root superblock
* @fullpath: full path in UNC format
- * @ref: server's referral
*/
-static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
- struct cifs_sb_info *cifs_sb,
- const char *fullpath, const struct dfs_info3_param *ref)
+static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
+ struct cifs_sb_info *cifs_sb,
+ const char *fullpath)
{
struct vfsmount *mnt;
char *mountdata;
char *devname;
- /*
- * Always pass down the DFS full path to smb3_do_mount() so we
- * can use it later for failover.
- */
devname = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
if (!devname)
return ERR_PTR(-ENOMEM);
@@ -266,7 +277,7 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
/* strip first '\' from fullpath */
mountdata = cifs_compose_mount_options(cifs_sb->mountdata,
- fullpath + 1, ref, NULL);
+ fullpath + 1, NULL, NULL);
if (IS_ERR(mountdata)) {
kfree(devname);
return (struct vfsmount *)mountdata;
@@ -278,28 +289,16 @@ static struct vfsmount *cifs_dfs_do_refmount(struct dentry *mntpt,
return mnt;
}
-static void dump_referral(const struct dfs_info3_param *ref)
-{
- cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
- cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
- cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
- ref->flags, ref->server_type);
- cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
- ref->ref_flag, ref->path_consumed);
-}
-
/*
* Create a vfsmount that we can automount
*/
static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
{
- struct dfs_info3_param referral = {0};
struct cifs_sb_info *cifs_sb;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
char *full_path, *root_path;
unsigned int xid;
- int len;
int rc;
struct vfsmount *mnt;
@@ -357,7 +356,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
if (!rc) {
rc = dfs_cache_find(xid, ses, cifs_sb->local_nls,
cifs_remap(cifs_sb), full_path + 1,
- &referral, NULL);
+ NULL, NULL);
}
free_xid(xid);
@@ -366,26 +365,16 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
mnt = ERR_PTR(rc);
goto free_root_path;
}
-
- dump_referral(&referral);
-
- len = strlen(referral.node_name);
- if (len < 2) {
- cifs_dbg(VFS, "%s: Net Address path too short: %s\n",
- __func__, referral.node_name);
- mnt = ERR_PTR(-EINVAL);
- goto free_dfs_ref;
- }
/*
- * cifs_mount() will retry every available node server in case
- * of failures.
+ * OK - we were able to get and cache a referral for @full_path.
+ *
+ * Now, pass it down to cifs_mount() and it will retry every available
+ * node server in case of failures - no need to do it here.
*/
- mnt = cifs_dfs_do_refmount(mntpt, cifs_sb, full_path, &referral);
- cifs_dbg(FYI, "%s: cifs_dfs_do_refmount:%s , mnt:%p\n", __func__,
- referral.node_name, mnt);
+ mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
+ cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__,
+ full_path + 1, mnt);
-free_dfs_ref:
- free_dfs_info_param(&referral);
free_root_path:
kfree(root_path);
free_full_path:
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 0f0dc1c1fe41..153d5c842a9b 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -65,6 +65,11 @@ struct smb3_key_debug_info {
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
} __packed;
+struct smb3_notify {
+ __u32 completion_filter;
+ bool watch_tree;
+} __packed;
+
#define CIFS_IOCTL_MAGIC 0xCF
#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
@@ -72,3 +77,4 @@ struct smb3_key_debug_info {
#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
#define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info)
#define CIFS_DUMP_KEY _IOWR(CIFS_IOCTL_MAGIC, 8, struct smb3_key_debug_info)
+#define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 96ae72b556ac..716574aab3b6 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -601,7 +601,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pmode |= (S_IXUGO & (*pbits_to_set));
- cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
+ cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
return;
}
@@ -630,7 +630,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
if (mode & S_IXUGO)
*pace_flags |= SET_FILE_EXEC_RIGHTS;
- cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
+ cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
mode, *pace_flags);
return;
}
@@ -802,6 +802,26 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
return;
}
+unsigned int setup_authusers_ACE(struct cifs_ace *pntace)
+{
+ int i;
+ unsigned int ace_size = 20;
+
+ pntace->type = ACCESS_ALLOWED_ACE_TYPE;
+ pntace->flags = 0x0;
+ pntace->access_req = cpu_to_le32(GENERIC_ALL);
+ pntace->sid.num_subauth = 1;
+ pntace->sid.revision = 1;
+ for (i = 0; i < NUM_AUTHS; i++)
+ pntace->sid.authority[i] = sid_authusers.authority[i];
+
+ pntace->sid.sub_auth[0] = sid_authusers.sub_auth[0];
+
+ /* size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth*4) */
+ pntace->size = cpu_to_le16(ace_size);
+ return ace_size;
+}
+
/*
* Fill in the special SID based on the mode. See
* http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
@@ -1064,7 +1084,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
struct cifs_ntsd *pntsd = NULL;
int oplock = 0;
unsigned int xid;
- int rc, create_options = 0;
+ int rc;
struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct cifs_fid fid;
@@ -1076,13 +1096,10 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = READ_CONTROL;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
@@ -1127,7 +1144,7 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
{
int oplock = 0;
unsigned int xid;
- int rc, access_flags, create_options = 0;
+ int rc, access_flags;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
@@ -1140,9 +1157,6 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
access_flags = WRITE_OWNER;
else
@@ -1151,7 +1165,7 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = access_flags;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 5492b9860baa..46ebaf3f0824 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -275,7 +275,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_ffree = 0; /* unlimited */
if (server->ops->queryfs)
- rc = server->ops->queryfs(xid, tcon, buf);
+ rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
free_xid(xid);
return 0;
@@ -414,7 +414,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
seq_puts(s, "ntlm");
break;
case Kerberos:
- seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
+ seq_puts(s, "krb5");
break;
case RawNTLMSSP:
seq_puts(s, "ntlmssp");
@@ -427,6 +427,10 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
if (ses->sign)
seq_puts(s, "i");
+
+ if (ses->sectype == Kerberos)
+ seq_printf(s, ",cruid=%u",
+ from_kuid_munged(&init_user_ns, ses->cred_uid));
}
static void
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index b59dc7478130..b87456bae1a1 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -149,9 +149,12 @@ extern ssize_t cifs_file_copychunk_range(unsigned int xid,
size_t len, unsigned int flags);
extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+extern void cifs_setsize(struct inode *inode, loff_t offset);
+extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
+
#ifdef CONFIG_CIFS_NFSD_EXPORT
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.24"
+#define CIFS_VERSION "2.25"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 40705e862451..de82cfa44b1a 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -298,7 +298,8 @@ struct smb_version_operations {
const char *, struct dfs_info3_param **,
unsigned int *, const struct nls_table *, int);
/* informational QFS call */
- void (*qfs_tcon)(const unsigned int, struct cifs_tcon *);
+ void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
+ struct cifs_sb_info *);
/* check if a path is accessible or not */
int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const char *);
@@ -409,7 +410,7 @@ struct smb_version_operations {
struct cifsInodeInfo *);
/* query remote filesystem */
int (*queryfs)(const unsigned int, struct cifs_tcon *,
- struct kstatfs *);
+ struct cifs_sb_info *, struct kstatfs *);
/* send mandatory brlock to the server */
int (*mand_lock)(const unsigned int, struct cifsFileInfo *, __u64,
__u64, __u32, int, int, bool);
@@ -430,6 +431,8 @@ struct smb_version_operations {
struct cifsFileInfo *src_file);
int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *src_file, void __user *);
+ int (*notify)(const unsigned int xid, struct file *pfile,
+ void __user *pbuf);
int (*query_mf_symlink)(unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const unsigned char *,
char *, unsigned int *);
@@ -490,6 +493,7 @@ struct smb_version_operations {
/* ioctl passthrough for query_info */
int (*ioctl_query_info)(const unsigned int xid,
struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
__le16 *path, int is_dir,
unsigned long p);
/* make unix special files (block, char, fifo, socket) */
@@ -1588,6 +1592,7 @@ struct mid_q_entry {
mid_callback_t *callback; /* call completion callback */
mid_handle_t *handle; /* call handle mid callback */
void *callback_data; /* general purpose pointer for callback */
+ struct task_struct *creator;
void *resp_buf; /* pointer to received SMB header */
unsigned int resp_buf_size;
int mid_state; /* wish this were enum but can not pass to wait_event */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 9c229408a251..89eaaf46d1ca 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -213,6 +213,7 @@ extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
const struct cifs_fid *, u32 *);
extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
const char *, int);
+extern unsigned int setup_authusers_ACE(struct cifs_ace *pace);
extern unsigned int setup_special_mode_ACE(struct cifs_ace *pace, __u64 nmode);
extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
@@ -596,6 +597,9 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
int copy_path_name(char *dst, const char *src);
+int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
+ int resp_buftype,
+ struct cifs_search_info *srch_inf);
#ifdef CONFIG_CIFS_DFS_UPCALL
static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
@@ -608,4 +612,12 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
}
#endif
+static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
+{
+ if (cifs_sb && (backup_cred(cifs_sb)))
+ return options | CREATE_OPEN_BACKUP_INTENT;
+ else
+ return options;
+}
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index cc86a67225d1..3c89569e7210 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -260,7 +260,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
if (server->tcpStatus != CifsNeedReconnect)
break;
- if (--retries)
+ if (retries && --retries)
continue;
/*
@@ -4619,7 +4619,7 @@ findFirstRetry:
psrch_inf->unicode = false;
psrch_inf->ntwrk_buf_start = (char *)pSMBr;
- psrch_inf->smallBuf = 0;
+ psrch_inf->smallBuf = false;
psrch_inf->srch_entries_start =
(char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
@@ -4753,7 +4753,7 @@ int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
cifs_buf_release(psrch_inf->ntwrk_buf_start);
psrch_inf->srch_entries_start = response_data;
psrch_inf->ntwrk_buf_start = (char *)pSMB;
- psrch_inf->smallBuf = 0;
+ psrch_inf->smallBuf = false;
if (parms->EndofSearch)
psrch_inf->endOfSearch = true;
else
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 05ea0e2b7e0e..4804d1df8c1c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3709,8 +3709,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
{
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
- bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
- bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
+ bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ old->prepath;
+ bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ new->prepath;
if (old_set && new_set && !strcmp(new->prepath, old->prepath))
return 1;
@@ -4149,7 +4151,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_gid = pvolume_info->linux_gid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
- cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
+ cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
cifs_sb->actimeo = pvolume_info->actimeo;
@@ -4363,7 +4365,7 @@ static int mount_get_conns(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
/* do not care if a following call succeed - informational */
if (!tcon->pipe && server->ops->qfs_tcon) {
- server->ops->qfs_tcon(*xid, tcon);
+ server->ops->qfs_tcon(*xid, tcon, cifs_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
if (tcon->fsDevInfo.DeviceCharacteristics &
cpu_to_le32(FILE_READ_ONLY_DEVICE))
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 2faa05860a48..43c1b43a07ec 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -5,11 +5,10 @@
* Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
*/
-#include <linux/rcupdate.h>
-#include <linux/rculist.h>
#include <linux/jhash.h>
#include <linux/ktime.h>
#include <linux/slab.h>
+#include <linux/proc_fs.h>
#include <linux/nls.h>
#include <linux/workqueue.h>
#include "cifsglob.h"
@@ -22,67 +21,68 @@
#include "dfs_cache.h"
-#define DFS_CACHE_HTABLE_SIZE 32
-#define DFS_CACHE_MAX_ENTRIES 64
+#define CACHE_HTABLE_SIZE 32
+#define CACHE_MAX_ENTRIES 64
#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
DFSREF_STORAGE_SERVER))
-struct dfs_cache_tgt {
- char *t_name;
- struct list_head t_list;
+struct cache_dfs_tgt {
+ char *name;
+ struct list_head list;
};
-struct dfs_cache_entry {
- struct hlist_node ce_hlist;
- const char *ce_path;
- int ce_ttl;
- int ce_srvtype;
- int ce_flags;
- struct timespec64 ce_etime;
- int ce_path_consumed;
- int ce_numtgts;
- struct list_head ce_tlist;
- struct dfs_cache_tgt *ce_tgthint;
- struct rcu_head ce_rcu;
+struct cache_entry {
+ struct hlist_node hlist;
+ const char *path;
+ int ttl;
+ int srvtype;
+ int flags;
+ struct timespec64 etime;
+ int path_consumed;
+ int numtgts;
+ struct list_head tlist;
+ struct cache_dfs_tgt *tgthint;
};
-static struct kmem_cache *dfs_cache_slab __read_mostly;
-
-struct dfs_cache_vol_info {
- char *vi_fullpath;
- struct smb_vol vi_vol;
- char *vi_mntdata;
- struct list_head vi_list;
+struct vol_info {
+ char *fullpath;
+ spinlock_t smb_vol_lock;
+ struct smb_vol smb_vol;
+ char *mntdata;
+ struct list_head list;
+ struct list_head rlist;
+ struct kref refcnt;
};
-struct dfs_cache {
- struct mutex dc_lock;
- struct nls_table *dc_nlsc;
- struct list_head dc_vol_list;
- int dc_ttl;
- struct delayed_work dc_refresh;
-};
+static struct kmem_cache *cache_slab __read_mostly;
+static struct workqueue_struct *dfscache_wq __read_mostly;
-static struct dfs_cache dfs_cache;
+static int cache_ttl;
+static DEFINE_SPINLOCK(cache_ttl_lock);
+
+static struct nls_table *cache_nlsc;
/*
* Number of entries in the cache
*/
-static size_t dfs_cache_count;
+static atomic_t cache_count;
+
+static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
+static DECLARE_RWSEM(htable_rw_lock);
-static DEFINE_MUTEX(dfs_cache_list_lock);
-static struct hlist_head dfs_cache_htable[DFS_CACHE_HTABLE_SIZE];
+static LIST_HEAD(vol_list);
+static DEFINE_SPINLOCK(vol_list_lock);
static void refresh_cache_worker(struct work_struct *work);
-static inline bool is_path_valid(const char *path)
-{
- return path && (strchr(path + 1, '\\') || strchr(path + 1, '/'));
-}
+static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
-static inline int get_normalized_path(const char *path, char **npath)
+static int get_normalized_path(const char *path, char **npath)
{
+ if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
+ return -EINVAL;
+
if (*path == '\\') {
*npath = (char *)path;
} else {
@@ -100,57 +100,48 @@ static inline void free_normalized_path(const char *path, char *npath)
kfree(npath);
}
-static inline bool cache_entry_expired(const struct dfs_cache_entry *ce)
+static inline bool cache_entry_expired(const struct cache_entry *ce)
{
struct timespec64 ts;
ktime_get_coarse_real_ts64(&ts);
- return timespec64_compare(&ts, &ce->ce_etime) >= 0;
+ return timespec64_compare(&ts, &ce->etime) >= 0;
}
-static inline void free_tgts(struct dfs_cache_entry *ce)
+static inline void free_tgts(struct cache_entry *ce)
{
- struct dfs_cache_tgt *t, *n;
+ struct cache_dfs_tgt *t, *n;
- list_for_each_entry_safe(t, n, &ce->ce_tlist, t_list) {
- list_del(&t->t_list);
- kfree(t->t_name);
+ list_for_each_entry_safe(t, n, &ce->tlist, list) {
+ list_del(&t->list);
+ kfree(t->name);
kfree(t);
}
}
-static void free_cache_entry(struct rcu_head *rcu)
+static inline void flush_cache_ent(struct cache_entry *ce)
{
- struct dfs_cache_entry *ce = container_of(rcu, struct dfs_cache_entry,
- ce_rcu);
- kmem_cache_free(dfs_cache_slab, ce);
-}
-
-static inline void flush_cache_ent(struct dfs_cache_entry *ce)
-{
- if (hlist_unhashed(&ce->ce_hlist))
- return;
-
- hlist_del_init_rcu(&ce->ce_hlist);
- kfree_const(ce->ce_path);
+ hlist_del_init(&ce->hlist);
+ kfree(ce->path);
free_tgts(ce);
- dfs_cache_count--;
- call_rcu(&ce->ce_rcu, free_cache_entry);
+ atomic_dec(&cache_count);
+ kmem_cache_free(cache_slab, ce);
}
static void flush_cache_ents(void)
{
int i;
- rcu_read_lock();
- for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++) {
- struct hlist_head *l = &dfs_cache_htable[i];
- struct dfs_cache_entry *ce;
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+ struct hlist_head *l = &cache_htable[i];
+ struct hlist_node *n;
+ struct cache_entry *ce;
- hlist_for_each_entry_rcu(ce, l, ce_hlist)
- flush_cache_ent(ce);
+ hlist_for_each_entry_safe(ce, n, l, hlist) {
+ if (!hlist_unhashed(&ce->hlist))
+ flush_cache_ent(ce);
+ }
}
- rcu_read_unlock();
}
/*
@@ -158,36 +149,39 @@ static void flush_cache_ents(void)
*/
static int dfscache_proc_show(struct seq_file *m, void *v)
{
- int bucket;
- struct dfs_cache_entry *ce;
- struct dfs_cache_tgt *t;
+ int i;
+ struct cache_entry *ce;
+ struct cache_dfs_tgt *t;
seq_puts(m, "DFS cache\n---------\n");
- mutex_lock(&dfs_cache_list_lock);
-
- rcu_read_lock();
- hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
- seq_printf(m,
- "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
- "interlink=%s,path_consumed=%d,expired=%s\n",
- ce->ce_path,
- ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link",
- ce->ce_ttl, ce->ce_etime.tv_nsec,
- IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no",
- ce->ce_path_consumed,
- cache_entry_expired(ce) ? "yes" : "no");
-
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
- seq_printf(m, " %s%s\n",
- t->t_name,
- ce->ce_tgthint == t ? " (target hint)" : "");
+ down_read(&htable_rw_lock);
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+ struct hlist_head *l = &cache_htable[i];
+
+ hlist_for_each_entry(ce, l, hlist) {
+ if (hlist_unhashed(&ce->hlist))
+ continue;
+
+ seq_printf(m,
+ "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
+ "interlink=%s,path_consumed=%d,expired=%s\n",
+ ce->path,
+ ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
+ ce->ttl, ce->etime.tv_nsec,
+ IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
+ ce->path_consumed,
+ cache_entry_expired(ce) ? "yes" : "no");
+
+ list_for_each_entry(t, &ce->tlist, list) {
+ seq_printf(m, " %s%s\n",
+ t->name,
+ ce->tgthint == t ? " (target hint)" : "");
+ }
}
-
}
- rcu_read_unlock();
+ up_read(&htable_rw_lock);
- mutex_unlock(&dfs_cache_list_lock);
return 0;
}
@@ -205,9 +199,10 @@ static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
return -EINVAL;
cifs_dbg(FYI, "clearing dfs cache");
- mutex_lock(&dfs_cache_list_lock);
+
+ down_write(&htable_rw_lock);
flush_cache_ents();
- mutex_unlock(&dfs_cache_list_lock);
+ up_write(&htable_rw_lock);
return count;
}
@@ -217,34 +212,34 @@ static int dfscache_proc_open(struct inode *inode, struct file *file)
return single_open(file, dfscache_proc_show, NULL);
}
-const struct file_operations dfscache_proc_fops = {
- .open = dfscache_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = dfscache_proc_write,
+const struct proc_ops dfscache_proc_ops = {
+ .proc_open = dfscache_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = dfscache_proc_write,
};
#ifdef CONFIG_CIFS_DEBUG2
-static inline void dump_tgts(const struct dfs_cache_entry *ce)
+static inline void dump_tgts(const struct cache_entry *ce)
{
- struct dfs_cache_tgt *t;
+ struct cache_dfs_tgt *t;
cifs_dbg(FYI, "target list:\n");
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
- cifs_dbg(FYI, " %s%s\n", t->t_name,
- ce->ce_tgthint == t ? " (target hint)" : "");
+ list_for_each_entry(t, &ce->tlist, list) {
+ cifs_dbg(FYI, " %s%s\n", t->name,
+ ce->tgthint == t ? " (target hint)" : "");
}
}
-static inline void dump_ce(const struct dfs_cache_entry *ce)
+static inline void dump_ce(const struct cache_entry *ce)
{
cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
- "interlink=%s,path_consumed=%d,expired=%s\n", ce->ce_path,
- ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ce_ttl,
- ce->ce_etime.tv_nsec,
- IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no",
- ce->ce_path_consumed,
+ "interlink=%s,path_consumed=%d,expired=%s\n", ce->path,
+ ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
+ ce->etime.tv_nsec,
+ IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
+ ce->path_consumed,
cache_entry_expired(ce) ? "yes" : "no");
dump_tgts(ce);
}
@@ -284,25 +279,34 @@ static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
*/
int dfs_cache_init(void)
{
+ int rc;
int i;
- dfs_cache_slab = kmem_cache_create("cifs_dfs_cache",
- sizeof(struct dfs_cache_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!dfs_cache_slab)
+ dfscache_wq = alloc_workqueue("cifs-dfscache",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
+ if (!dfscache_wq)
return -ENOMEM;
- for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++)
- INIT_HLIST_HEAD(&dfs_cache_htable[i]);
+ cache_slab = kmem_cache_create("cifs_dfs_cache",
+ sizeof(struct cache_entry), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!cache_slab) {
+ rc = -ENOMEM;
+ goto out_destroy_wq;
+ }
+
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&cache_htable[i]);
- INIT_LIST_HEAD(&dfs_cache.dc_vol_list);
- mutex_init(&dfs_cache.dc_lock);
- INIT_DELAYED_WORK(&dfs_cache.dc_refresh, refresh_cache_worker);
- dfs_cache.dc_ttl = -1;
- dfs_cache.dc_nlsc = load_nls_default();
+ atomic_set(&cache_count, 0);
+ cache_nlsc = load_nls_default();
cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
return 0;
+
+out_destroy_wq:
+ destroy_workqueue(dfscache_wq);
+ return rc;
}
static inline unsigned int cache_entry_hash(const void *data, int size)
@@ -310,7 +314,7 @@ static inline unsigned int cache_entry_hash(const void *data, int size)
unsigned int h;
h = jhash(data, size, 0);
- return h & (DFS_CACHE_HTABLE_SIZE - 1);
+ return h & (CACHE_HTABLE_SIZE - 1);
}
/* Check whether second path component of @path is SYSVOL or NETLOGON */
@@ -325,11 +329,11 @@ static inline bool is_sysvol_or_netlogon(const char *path)
}
/* Return target hint of a DFS cache entry */
-static inline char *get_tgt_name(const struct dfs_cache_entry *ce)
+static inline char *get_tgt_name(const struct cache_entry *ce)
{
- struct dfs_cache_tgt *t = ce->ce_tgthint;
+ struct cache_dfs_tgt *t = ce->tgthint;
- return t ? t->t_name : ERR_PTR(-ENOENT);
+ return t ? t->name : ERR_PTR(-ENOENT);
}
/* Return expire time out of a new entry's TTL */
@@ -346,19 +350,19 @@ static inline struct timespec64 get_expire_time(int ttl)
}
/* Allocate a new DFS target */
-static inline struct dfs_cache_tgt *alloc_tgt(const char *name)
+static struct cache_dfs_tgt *alloc_target(const char *name)
{
- struct dfs_cache_tgt *t;
+ struct cache_dfs_tgt *t;
- t = kmalloc(sizeof(*t), GFP_KERNEL);
+ t = kmalloc(sizeof(*t), GFP_ATOMIC);
if (!t)
return ERR_PTR(-ENOMEM);
- t->t_name = kstrndup(name, strlen(name), GFP_KERNEL);
- if (!t->t_name) {
+ t->name = kstrndup(name, strlen(name), GFP_ATOMIC);
+ if (!t->name) {
kfree(t);
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&t->t_list);
+ INIT_LIST_HEAD(&t->list);
return t;
}
@@ -367,180 +371,184 @@ static inline struct dfs_cache_tgt *alloc_tgt(const char *name)
* target hint.
*/
static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
- struct dfs_cache_entry *ce, const char *tgthint)
+ struct cache_entry *ce, const char *tgthint)
{
int i;
- ce->ce_ttl = refs[0].ttl;
- ce->ce_etime = get_expire_time(ce->ce_ttl);
- ce->ce_srvtype = refs[0].server_type;
- ce->ce_flags = refs[0].ref_flag;
- ce->ce_path_consumed = refs[0].path_consumed;
+ ce->ttl = refs[0].ttl;
+ ce->etime = get_expire_time(ce->ttl);
+ ce->srvtype = refs[0].server_type;
+ ce->flags = refs[0].ref_flag;
+ ce->path_consumed = refs[0].path_consumed;
for (i = 0; i < numrefs; i++) {
- struct dfs_cache_tgt *t;
+ struct cache_dfs_tgt *t;
- t = alloc_tgt(refs[i].node_name);
+ t = alloc_target(refs[i].node_name);
if (IS_ERR(t)) {
free_tgts(ce);
return PTR_ERR(t);
}
- if (tgthint && !strcasecmp(t->t_name, tgthint)) {
- list_add(&t->t_list, &ce->ce_tlist);
+ if (tgthint && !strcasecmp(t->name, tgthint)) {
+ list_add(&t->list, &ce->tlist);
tgthint = NULL;
} else {
- list_add_tail(&t->t_list, &ce->ce_tlist);
+ list_add_tail(&t->list, &ce->tlist);
}
- ce->ce_numtgts++;
+ ce->numtgts++;
}
- ce->ce_tgthint = list_first_entry_or_null(&ce->ce_tlist,
- struct dfs_cache_tgt, t_list);
+ ce->tgthint = list_first_entry_or_null(&ce->tlist,
+ struct cache_dfs_tgt, list);
return 0;
}
/* Allocate a new cache entry */
-static struct dfs_cache_entry *
-alloc_cache_entry(const char *path, const struct dfs_info3_param *refs,
- int numrefs)
+static struct cache_entry *alloc_cache_entry(const char *path,
+ const struct dfs_info3_param *refs,
+ int numrefs)
{
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
int rc;
- ce = kmem_cache_zalloc(dfs_cache_slab, GFP_KERNEL);
+ ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
if (!ce)
return ERR_PTR(-ENOMEM);
- ce->ce_path = kstrdup_const(path, GFP_KERNEL);
- if (!ce->ce_path) {
- kmem_cache_free(dfs_cache_slab, ce);
+ ce->path = kstrndup(path, strlen(path), GFP_KERNEL);
+ if (!ce->path) {
+ kmem_cache_free(cache_slab, ce);
return ERR_PTR(-ENOMEM);
}
- INIT_HLIST_NODE(&ce->ce_hlist);
- INIT_LIST_HEAD(&ce->ce_tlist);
+ INIT_HLIST_NODE(&ce->hlist);
+ INIT_LIST_HEAD(&ce->tlist);
rc = copy_ref_data(refs, numrefs, ce, NULL);
if (rc) {
- kfree_const(ce->ce_path);
- kmem_cache_free(dfs_cache_slab, ce);
+ kfree(ce->path);
+ kmem_cache_free(cache_slab, ce);
ce = ERR_PTR(rc);
}
return ce;
}
+/* Must be called with htable_rw_lock held */
static void remove_oldest_entry(void)
{
- int bucket;
- struct dfs_cache_entry *ce;
- struct dfs_cache_entry *to_del = NULL;
-
- rcu_read_lock();
- hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
- if (!to_del || timespec64_compare(&ce->ce_etime,
- &to_del->ce_etime) < 0)
- to_del = ce;
+ int i;
+ struct cache_entry *ce;
+ struct cache_entry *to_del = NULL;
+
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+ struct hlist_head *l = &cache_htable[i];
+
+ hlist_for_each_entry(ce, l, hlist) {
+ if (hlist_unhashed(&ce->hlist))
+ continue;
+ if (!to_del || timespec64_compare(&ce->etime,
+ &to_del->etime) < 0)
+ to_del = ce;
+ }
}
+
if (!to_del) {
cifs_dbg(FYI, "%s: no entry to remove", __func__);
- goto out;
+ return;
}
+
cifs_dbg(FYI, "%s: removing entry", __func__);
dump_ce(to_del);
flush_cache_ent(to_del);
-out:
- rcu_read_unlock();
}
/* Add a new DFS cache entry */
-static inline struct dfs_cache_entry *
-add_cache_entry(unsigned int hash, const char *path,
- const struct dfs_info3_param *refs, int numrefs)
+static int add_cache_entry(const char *path, unsigned int hash,
+ struct dfs_info3_param *refs, int numrefs)
{
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
ce = alloc_cache_entry(path, refs, numrefs);
if (IS_ERR(ce))
- return ce;
+ return PTR_ERR(ce);
- hlist_add_head_rcu(&ce->ce_hlist, &dfs_cache_htable[hash]);
-
- mutex_lock(&dfs_cache.dc_lock);
- if (dfs_cache.dc_ttl < 0) {
- dfs_cache.dc_ttl = ce->ce_ttl;
- queue_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh,
- dfs_cache.dc_ttl * HZ);
+ spin_lock(&cache_ttl_lock);
+ if (!cache_ttl) {
+ cache_ttl = ce->ttl;
+ queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
} else {
- dfs_cache.dc_ttl = min_t(int, dfs_cache.dc_ttl, ce->ce_ttl);
- mod_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh,
- dfs_cache.dc_ttl * HZ);
+ cache_ttl = min_t(int, cache_ttl, ce->ttl);
+ mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
}
- mutex_unlock(&dfs_cache.dc_lock);
+ spin_unlock(&cache_ttl_lock);
- return ce;
+ down_write(&htable_rw_lock);
+ hlist_add_head(&ce->hlist, &cache_htable[hash]);
+ dump_ce(ce);
+ up_write(&htable_rw_lock);
+
+ return 0;
}
-static struct dfs_cache_entry *__find_cache_entry(unsigned int hash,
- const char *path)
+/*
+ * Find a DFS cache entry in hash table and optionally check prefix path against
+ * @path.
+ * Use whole path components in the match.
+ * Must be called with htable_rw_lock held.
+ *
+ * Return ERR_PTR(-ENOENT) if the entry is not found.
+ */
+static struct cache_entry *lookup_cache_entry(const char *path,
+ unsigned int *hash)
{
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
+ unsigned int h;
bool found = false;
- rcu_read_lock();
- hlist_for_each_entry_rcu(ce, &dfs_cache_htable[hash], ce_hlist) {
- if (!strcasecmp(path, ce->ce_path)) {
-#ifdef CONFIG_CIFS_DEBUG2
- char *name = get_tgt_name(ce);
+ h = cache_entry_hash(path, strlen(path));
- if (IS_ERR(name)) {
- rcu_read_unlock();
- return ERR_CAST(name);
- }
- cifs_dbg(FYI, "%s: cache hit\n", __func__);
- cifs_dbg(FYI, "%s: target hint: %s\n", __func__, name);
-#endif
+ hlist_for_each_entry(ce, &cache_htable[h], hlist) {
+ if (!strcasecmp(path, ce->path)) {
found = true;
+ dump_ce(ce);
break;
}
}
- rcu_read_unlock();
- return found ? ce : ERR_PTR(-ENOENT);
-}
-/*
- * Find a DFS cache entry in hash table and optionally check prefix path against
- * @path.
- * Use whole path components in the match.
- * Return ERR_PTR(-ENOENT) if the entry is not found.
- */
-static inline struct dfs_cache_entry *find_cache_entry(const char *path,
- unsigned int *hash)
-{
- *hash = cache_entry_hash(path, strlen(path));
- return __find_cache_entry(*hash, path);
+ if (!found)
+ ce = ERR_PTR(-ENOENT);
+ if (hash)
+ *hash = h;
+
+ return ce;
}
-static inline void destroy_slab_cache(void)
+static void __vol_release(struct vol_info *vi)
{
- rcu_barrier();
- kmem_cache_destroy(dfs_cache_slab);
+ kfree(vi->fullpath);
+ kfree(vi->mntdata);
+ cifs_cleanup_volume_info_contents(&vi->smb_vol);
+ kfree(vi);
}
-static inline void free_vol(struct dfs_cache_vol_info *vi)
+static void vol_release(struct kref *kref)
{
- list_del(&vi->vi_list);
- kfree(vi->vi_fullpath);
- kfree(vi->vi_mntdata);
- cifs_cleanup_volume_info_contents(&vi->vi_vol);
- kfree(vi);
+ struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
+
+ spin_lock(&vol_list_lock);
+ list_del(&vi->list);
+ spin_unlock(&vol_list_lock);
+ __vol_release(vi);
}
static inline void free_vol_list(void)
{
- struct dfs_cache_vol_info *vi, *nvi;
+ struct vol_info *vi, *nvi;
- list_for_each_entry_safe(vi, nvi, &dfs_cache.dc_vol_list, vi_list)
- free_vol(vi);
+ list_for_each_entry_safe(vi, nvi, &vol_list, list) {
+ list_del_init(&vi->list);
+ __vol_release(vi);
+ }
}
/**
@@ -548,83 +556,78 @@ static inline void free_vol_list(void)
*/
void dfs_cache_destroy(void)
{
- cancel_delayed_work_sync(&dfs_cache.dc_refresh);
- unload_nls(dfs_cache.dc_nlsc);
+ cancel_delayed_work_sync(&refresh_task);
+ unload_nls(cache_nlsc);
free_vol_list();
- mutex_destroy(&dfs_cache.dc_lock);
-
flush_cache_ents();
- destroy_slab_cache();
- mutex_destroy(&dfs_cache_list_lock);
+ kmem_cache_destroy(cache_slab);
+ destroy_workqueue(dfscache_wq);
cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
}
-static inline struct dfs_cache_entry *
-__update_cache_entry(const char *path, const struct dfs_info3_param *refs,
- int numrefs)
+/* Must be called with htable_rw_lock held */
+static int __update_cache_entry(const char *path,
+ const struct dfs_info3_param *refs,
+ int numrefs)
{
int rc;
- unsigned int h;
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
char *s, *th = NULL;
- ce = find_cache_entry(path, &h);
+ ce = lookup_cache_entry(path, NULL);
if (IS_ERR(ce))
- return ce;
+ return PTR_ERR(ce);
- if (ce->ce_tgthint) {
- s = ce->ce_tgthint->t_name;
- th = kstrndup(s, strlen(s), GFP_KERNEL);
+ if (ce->tgthint) {
+ s = ce->tgthint->name;
+ th = kstrndup(s, strlen(s), GFP_ATOMIC);
if (!th)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
free_tgts(ce);
- ce->ce_numtgts = 0;
+ ce->numtgts = 0;
rc = copy_ref_data(refs, numrefs, ce, th);
- kfree(th);
- if (rc)
- ce = ERR_PTR(rc);
+ kfree(th);
- return ce;
+ return rc;
}
-/* Update an expired cache entry by getting a new DFS referral from server */
-static struct dfs_cache_entry *
-update_cache_entry(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_codepage, int remap,
- const char *path, struct dfs_cache_entry *ce)
+static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
+ const struct nls_table *nls_codepage, int remap,
+ const char *path, struct dfs_info3_param **refs,
+ int *numrefs)
{
- int rc;
- struct dfs_info3_param *refs = NULL;
- int numrefs = 0;
+ cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
- cifs_dbg(FYI, "%s: update expired cache entry\n", __func__);
- /*
- * Check if caller provided enough parameters to update an expired
- * entry.
- */
if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
- return ERR_PTR(-ETIME);
+ return -EOPNOTSUPP;
if (unlikely(!nls_codepage))
- return ERR_PTR(-ETIME);
+ return -EINVAL;
- cifs_dbg(FYI, "%s: DFS referral request for %s\n", __func__, path);
+ *refs = NULL;
+ *numrefs = 0;
- rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs, &numrefs,
- nls_codepage, remap);
- if (rc)
- ce = ERR_PTR(rc);
- else
- ce = __update_cache_entry(path, refs, numrefs);
+ return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
+ nls_codepage, remap);
+}
- dump_refs(refs, numrefs);
- free_dfs_info_array(refs, numrefs);
+/* Update an expired cache entry by getting a new DFS referral from server */
+static int update_cache_entry(const char *path,
+ const struct dfs_info3_param *refs,
+ int numrefs)
+{
- return ce;
+ int rc;
+
+ down_write(&htable_rw_lock);
+ rc = __update_cache_entry(path, refs, numrefs);
+ up_write(&htable_rw_lock);
+
+ return rc;
}
/*
@@ -636,95 +639,86 @@ update_cache_entry(const unsigned int xid, struct cifs_ses *ses,
* For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
* handle them properly.
*/
-static struct dfs_cache_entry *
-do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_codepage, int remap,
- const char *path, bool noreq)
+static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
+ const struct nls_table *nls_codepage, int remap,
+ const char *path, bool noreq)
{
int rc;
- unsigned int h;
- struct dfs_cache_entry *ce;
- struct dfs_info3_param *nrefs;
- int numnrefs;
+ unsigned int hash;
+ struct cache_entry *ce;
+ struct dfs_info3_param *refs = NULL;
+ int numrefs = 0;
+ bool newent = false;
cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
- ce = find_cache_entry(path, &h);
- if (IS_ERR(ce)) {
- cifs_dbg(FYI, "%s: cache miss\n", __func__);
- /*
- * If @noreq is set, no requests will be sent to the server for
- * either updating or getting a new DFS referral.
- */
- if (noreq)
- return ce;
- /*
- * No cache entry was found, so check for valid parameters that
- * will be required to get a new DFS referral and then create a
- * new cache entry.
- */
- if (!ses || !ses->server || !ses->server->ops->get_dfs_refer) {
- ce = ERR_PTR(-EOPNOTSUPP);
- return ce;
- }
- if (unlikely(!nls_codepage)) {
- ce = ERR_PTR(-EINVAL);
- return ce;
- }
+ down_read(&htable_rw_lock);
- nrefs = NULL;
- numnrefs = 0;
+ ce = lookup_cache_entry(path, &hash);
- cifs_dbg(FYI, "%s: DFS referral request for %s\n", __func__,
- path);
+ /*
+ * If @noreq is set, no requests will be sent to the server. Just return
+ * the cache entry.
+ */
+ if (noreq) {
+ up_read(&htable_rw_lock);
+ return PTR_ERR_OR_ZERO(ce);
+ }
- rc = ses->server->ops->get_dfs_refer(xid, ses, path, &nrefs,
- &numnrefs, nls_codepage,
- remap);
- if (rc) {
- ce = ERR_PTR(rc);
- return ce;
+ if (!IS_ERR(ce)) {
+ if (!cache_entry_expired(ce)) {
+ dump_ce(ce);
+ up_read(&htable_rw_lock);
+ return 0;
}
+ } else {
+ newent = true;
+ }
- dump_refs(nrefs, numnrefs);
+ up_read(&htable_rw_lock);
- cifs_dbg(FYI, "%s: new cache entry\n", __func__);
+ /*
+ * No entry was found.
+ *
+ * Request a new DFS referral in order to create a new cache entry, or
+ * updating an existing one.
+ */
+ rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
+ &refs, &numrefs);
+ if (rc)
+ return rc;
- if (dfs_cache_count >= DFS_CACHE_MAX_ENTRIES) {
- cifs_dbg(FYI, "%s: reached max cache size (%d)",
- __func__, DFS_CACHE_MAX_ENTRIES);
- remove_oldest_entry();
- }
- ce = add_cache_entry(h, path, nrefs, numnrefs);
- free_dfs_info_array(nrefs, numnrefs);
+ dump_refs(refs, numrefs);
- if (IS_ERR(ce))
- return ce;
+ if (!newent) {
+ rc = update_cache_entry(path, refs, numrefs);
+ goto out_free_refs;
+ }
- dfs_cache_count++;
+ if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
+ cifs_dbg(FYI, "%s: reached max cache size (%d)", __func__,
+ CACHE_MAX_ENTRIES);
+ down_write(&htable_rw_lock);
+ remove_oldest_entry();
+ up_write(&htable_rw_lock);
}
- dump_ce(ce);
+ rc = add_cache_entry(path, hash, refs, numrefs);
+ if (!rc)
+ atomic_inc(&cache_count);
- /* Just return the found cache entry in case @noreq is set */
- if (noreq)
- return ce;
-
- if (cache_entry_expired(ce)) {
- cifs_dbg(FYI, "%s: expired cache entry\n", __func__);
- ce = update_cache_entry(xid, ses, nls_codepage, remap, path,
- ce);
- if (IS_ERR(ce)) {
- cifs_dbg(FYI, "%s: failed to update expired entry\n",
- __func__);
- }
- }
- return ce;
+out_free_refs:
+ free_dfs_info_array(refs, numrefs);
+ return rc;
}
-/* Set up a new DFS referral from a given cache entry */
-static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
- struct dfs_info3_param *ref, const char *tgt)
+/*
+ * Set up a DFS referral from a given cache entry.
+ *
+ * Must be called with htable_rw_lock held.
+ */
+static int setup_referral(const char *path, struct cache_entry *ce,
+ struct dfs_info3_param *ref, const char *target)
{
int rc;
@@ -732,21 +726,20 @@ static int setup_ref(const char *path, const struct dfs_cache_entry *ce,
memset(ref, 0, sizeof(*ref));
- ref->path_name = kstrndup(path, strlen(path), GFP_KERNEL);
+ ref->path_name = kstrndup(path, strlen(path), GFP_ATOMIC);
if (!ref->path_name)
return -ENOMEM;
- ref->path_consumed = ce->ce_path_consumed;
-
- ref->node_name = kstrndup(tgt, strlen(tgt), GFP_KERNEL);
+ ref->node_name = kstrndup(target, strlen(target), GFP_ATOMIC);
if (!ref->node_name) {
rc = -ENOMEM;
goto err_free_path;
}
- ref->ttl = ce->ce_ttl;
- ref->server_type = ce->ce_srvtype;
- ref->ref_flag = ce->ce_flags;
+ ref->path_consumed = ce->path_consumed;
+ ref->ttl = ce->ttl;
+ ref->server_type = ce->srvtype;
+ ref->ref_flag = ce->flags;
return 0;
@@ -757,38 +750,37 @@ err_free_path:
}
/* Return target list of a DFS cache entry */
-static int get_tgt_list(const struct dfs_cache_entry *ce,
- struct dfs_cache_tgt_list *tl)
+static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
{
int rc;
struct list_head *head = &tl->tl_list;
- struct dfs_cache_tgt *t;
+ struct cache_dfs_tgt *t;
struct dfs_cache_tgt_iterator *it, *nit;
memset(tl, 0, sizeof(*tl));
INIT_LIST_HEAD(head);
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
- it = kzalloc(sizeof(*it), GFP_KERNEL);
+ list_for_each_entry(t, &ce->tlist, list) {
+ it = kzalloc(sizeof(*it), GFP_ATOMIC);
if (!it) {
rc = -ENOMEM;
goto err_free_it;
}
- it->it_name = kstrndup(t->t_name, strlen(t->t_name),
- GFP_KERNEL);
+ it->it_name = kstrndup(t->name, strlen(t->name), GFP_ATOMIC);
if (!it->it_name) {
kfree(it);
rc = -ENOMEM;
goto err_free_it;
}
- if (ce->ce_tgthint == t)
+ if (ce->tgthint == t)
list_add(&it->it_list, head);
else
list_add_tail(&it->it_list, head);
}
- tl->tl_numtgts = ce->ce_numtgts;
+
+ tl->tl_numtgts = ce->numtgts;
return 0;
@@ -829,28 +821,35 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
-
- if (unlikely(!is_path_valid(path)))
- return -EINVAL;
+ struct cache_entry *ce;
rc = get_normalized_path(path, &npath);
if (rc)
return rc;
- mutex_lock(&dfs_cache_list_lock);
- ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
- if (!IS_ERR(ce)) {
- if (ref)
- rc = setup_ref(path, ce, ref, get_tgt_name(ce));
- else
- rc = 0;
- if (!rc && tgt_list)
- rc = get_tgt_list(ce, tgt_list);
- } else {
+ rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
+ if (rc)
+ goto out_free_path;
+
+ down_read(&htable_rw_lock);
+
+ ce = lookup_cache_entry(npath, NULL);
+ if (IS_ERR(ce)) {
+ up_read(&htable_rw_lock);
rc = PTR_ERR(ce);
+ goto out_free_path;
}
- mutex_unlock(&dfs_cache_list_lock);
+
+ if (ref)
+ rc = setup_referral(path, ce, ref, get_tgt_name(ce));
+ else
+ rc = 0;
+ if (!rc && tgt_list)
+ rc = get_targets(ce, tgt_list);
+
+ up_read(&htable_rw_lock);
+
+out_free_path:
free_normalized_path(path, npath);
return rc;
}
@@ -876,31 +875,33 @@ int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
-
- if (unlikely(!is_path_valid(path)))
- return -EINVAL;
+ struct cache_entry *ce;
rc = get_normalized_path(path, &npath);
if (rc)
return rc;
- mutex_lock(&dfs_cache_list_lock);
- ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true);
+ cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
+
+ down_read(&htable_rw_lock);
+
+ ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
- goto out;
+ goto out_unlock;
}
if (ref)
- rc = setup_ref(path, ce, ref, get_tgt_name(ce));
+ rc = setup_referral(path, ce, ref, get_tgt_name(ce));
else
rc = 0;
if (!rc && tgt_list)
- rc = get_tgt_list(ce, tgt_list);
-out:
- mutex_unlock(&dfs_cache_list_lock);
+ rc = get_targets(ce, tgt_list);
+
+out_unlock:
+ up_read(&htable_rw_lock);
free_normalized_path(path, npath);
+
return rc;
}
@@ -929,44 +930,46 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
- struct dfs_cache_tgt *t;
-
- if (unlikely(!is_path_valid(path)))
- return -EINVAL;
+ struct cache_entry *ce;
+ struct cache_dfs_tgt *t;
rc = get_normalized_path(path, &npath);
if (rc)
return rc;
- cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
+ cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
- mutex_lock(&dfs_cache_list_lock);
- ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
+ rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
+ if (rc)
+ goto out_free_path;
+
+ down_write(&htable_rw_lock);
+
+ ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
- goto out;
+ goto out_unlock;
}
- rc = 0;
-
- t = ce->ce_tgthint;
+ t = ce->tgthint;
- if (likely(!strcasecmp(it->it_name, t->t_name)))
- goto out;
+ if (likely(!strcasecmp(it->it_name, t->name)))
+ goto out_unlock;
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
- if (!strcasecmp(t->t_name, it->it_name)) {
- ce->ce_tgthint = t;
+ list_for_each_entry(t, &ce->tlist, list) {
+ if (!strcasecmp(t->name, it->it_name)) {
+ ce->tgthint = t;
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name);
break;
}
}
-out:
- mutex_unlock(&dfs_cache_list_lock);
+out_unlock:
+ up_write(&htable_rw_lock);
+out_free_path:
free_normalized_path(path, npath);
+
return rc;
}
@@ -989,10 +992,10 @@ int dfs_cache_noreq_update_tgthint(const char *path,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
- struct dfs_cache_tgt *t;
+ struct cache_entry *ce;
+ struct cache_dfs_tgt *t;
- if (unlikely(!is_path_valid(path)) || !it)
+ if (!it)
return -EINVAL;
rc = get_normalized_path(path, &npath);
@@ -1001,33 +1004,33 @@ int dfs_cache_noreq_update_tgthint(const char *path,
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
- mutex_lock(&dfs_cache_list_lock);
+ down_write(&htable_rw_lock);
- ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true);
+ ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
- goto out;
+ goto out_unlock;
}
rc = 0;
+ t = ce->tgthint;
- t = ce->ce_tgthint;
+ if (unlikely(!strcasecmp(it->it_name, t->name)))
+ goto out_unlock;
- if (unlikely(!strcasecmp(it->it_name, t->t_name)))
- goto out;
-
- list_for_each_entry(t, &ce->ce_tlist, t_list) {
- if (!strcasecmp(t->t_name, it->it_name)) {
- ce->ce_tgthint = t;
+ list_for_each_entry(t, &ce->tlist, list) {
+ if (!strcasecmp(t->name, it->it_name)) {
+ ce->tgthint = t;
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name);
break;
}
}
-out:
- mutex_unlock(&dfs_cache_list_lock);
+out_unlock:
+ up_write(&htable_rw_lock);
free_normalized_path(path, npath);
+
return rc;
}
@@ -1047,13 +1050,10 @@ int dfs_cache_get_tgt_referral(const char *path,
{
int rc;
char *npath;
- struct dfs_cache_entry *ce;
- unsigned int h;
+ struct cache_entry *ce;
if (!it || !ref)
return -EINVAL;
- if (unlikely(!is_path_valid(path)))
- return -EINVAL;
rc = get_normalized_path(path, &npath);
if (rc)
@@ -1061,21 +1061,22 @@ int dfs_cache_get_tgt_referral(const char *path,
cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
- mutex_lock(&dfs_cache_list_lock);
+ down_read(&htable_rw_lock);
- ce = find_cache_entry(npath, &h);
+ ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
- goto out;
+ goto out_unlock;
}
cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
- rc = setup_ref(path, ce, ref, it->it_name);
+ rc = setup_referral(path, ce, ref, it->it_name);
-out:
- mutex_unlock(&dfs_cache_list_lock);
+out_unlock:
+ up_read(&htable_rw_lock);
free_normalized_path(path, npath);
+
return rc;
}
@@ -1085,7 +1086,7 @@ static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
if (vol->username) {
new->username = kstrndup(vol->username, strlen(vol->username),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!new->username)
return -ENOMEM;
}
@@ -1103,7 +1104,7 @@ static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
}
if (vol->domainname) {
new->domainname = kstrndup(vol->domainname,
- strlen(vol->domainname), GFP_KERNEL);
+ strlen(vol->domainname), GFP_KERNEL);
if (!new->domainname)
goto err_free_unc;
}
@@ -1150,7 +1151,7 @@ err_free_username:
int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
{
int rc;
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi;
if (!vol || !fullpath || !mntdata)
return -EINVAL;
@@ -1161,38 +1162,41 @@ int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
if (!vi)
return -ENOMEM;
- vi->vi_fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
- if (!vi->vi_fullpath) {
+ vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
+ if (!vi->fullpath) {
rc = -ENOMEM;
goto err_free_vi;
}
- rc = dup_vol(vol, &vi->vi_vol);
+ rc = dup_vol(vol, &vi->smb_vol);
if (rc)
goto err_free_fullpath;
- vi->vi_mntdata = mntdata;
+ vi->mntdata = mntdata;
+ spin_lock_init(&vi->smb_vol_lock);
+ kref_init(&vi->refcnt);
+
+ spin_lock(&vol_list_lock);
+ list_add_tail(&vi->list, &vol_list);
+ spin_unlock(&vol_list_lock);
- mutex_lock(&dfs_cache.dc_lock);
- list_add_tail(&vi->vi_list, &dfs_cache.dc_vol_list);
- mutex_unlock(&dfs_cache.dc_lock);
return 0;
err_free_fullpath:
- kfree(vi->vi_fullpath);
+ kfree(vi->fullpath);
err_free_vi:
kfree(vi);
return rc;
}
-static inline struct dfs_cache_vol_info *find_vol(const char *fullpath)
+/* Must be called with vol_list_lock held */
+static struct vol_info *find_vol(const char *fullpath)
{
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi;
- list_for_each_entry(vi, &dfs_cache.dc_vol_list, vi_list) {
- cifs_dbg(FYI, "%s: vi->vi_fullpath: %s\n", __func__,
- vi->vi_fullpath);
- if (!strcasecmp(vi->vi_fullpath, fullpath))
+ list_for_each_entry(vi, &vol_list, list) {
+ cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
+ if (!strcasecmp(vi->fullpath, fullpath))
return vi;
}
return ERR_PTR(-ENOENT);
@@ -1208,30 +1212,31 @@ static inline struct dfs_cache_vol_info *find_vol(const char *fullpath)
*/
int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
{
- int rc;
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi;
if (!fullpath || !server)
return -EINVAL;
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
- mutex_lock(&dfs_cache.dc_lock);
-
+ spin_lock(&vol_list_lock);
vi = find_vol(fullpath);
if (IS_ERR(vi)) {
- rc = PTR_ERR(vi);
- goto out;
+ spin_unlock(&vol_list_lock);
+ return PTR_ERR(vi);
}
+ kref_get(&vi->refcnt);
+ spin_unlock(&vol_list_lock);
cifs_dbg(FYI, "%s: updating volume info\n", __func__);
- memcpy(&vi->vi_vol.dstaddr, &server->dstaddr,
- sizeof(vi->vi_vol.dstaddr));
- rc = 0;
+ spin_lock(&vi->smb_vol_lock);
+ memcpy(&vi->smb_vol.dstaddr, &server->dstaddr,
+ sizeof(vi->smb_vol.dstaddr));
+ spin_unlock(&vi->smb_vol_lock);
-out:
- mutex_unlock(&dfs_cache.dc_lock);
- return rc;
+ kref_put(&vi->refcnt, vol_release);
+
+ return 0;
}
/**
@@ -1241,18 +1246,18 @@ out:
*/
void dfs_cache_del_vol(const char *fullpath)
{
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi;
if (!fullpath || !*fullpath)
return;
cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
- mutex_lock(&dfs_cache.dc_lock);
+ spin_lock(&vol_list_lock);
vi = find_vol(fullpath);
- if (!IS_ERR(vi))
- free_vol(vi);
- mutex_unlock(&dfs_cache.dc_lock);
+ spin_unlock(&vol_list_lock);
+
+ kref_put(&vi->refcnt, vol_release);
}
/* Get all tcons that are within a DFS namespace and can be refreshed */
@@ -1280,7 +1285,7 @@ static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
spin_unlock(&cifs_tcp_ses_lock);
}
-static inline bool is_dfs_link(const char *path)
+static bool is_dfs_link(const char *path)
{
char *s;
@@ -1290,7 +1295,7 @@ static inline bool is_dfs_link(const char *path)
return !!strchr(s + 1, '\\');
}
-static inline char *get_dfs_root(const char *path)
+static char *get_dfs_root(const char *path)
{
char *s, *npath;
@@ -1309,31 +1314,67 @@ static inline char *get_dfs_root(const char *path)
return npath;
}
+static inline void put_tcp_server(struct TCP_Server_Info *server)
+{
+ cifs_put_tcp_session(server, 0);
+}
+
+static struct TCP_Server_Info *get_tcp_server(struct smb_vol *vol)
+{
+ struct TCP_Server_Info *server;
+
+ server = cifs_find_tcp_session(vol);
+ if (IS_ERR_OR_NULL(server))
+ return NULL;
+
+ spin_lock(&GlobalMid_Lock);
+ if (server->tcpStatus != CifsGood) {
+ spin_unlock(&GlobalMid_Lock);
+ put_tcp_server(server);
+ return NULL;
+ }
+ spin_unlock(&GlobalMid_Lock);
+
+ return server;
+}
+
/* Find root SMB session out of a DFS link path */
-static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
- struct cifs_tcon *tcon, const char *path)
+static struct cifs_ses *find_root_ses(struct vol_info *vi,
+ struct cifs_tcon *tcon,
+ const char *path)
{
char *rpath;
int rc;
+ struct cache_entry *ce;
struct dfs_info3_param ref = {0};
char *mdata = NULL, *devname = NULL;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
- struct smb_vol vol;
+ struct smb_vol vol = {NULL};
rpath = get_dfs_root(path);
if (IS_ERR(rpath))
return ERR_CAST(rpath);
- memset(&vol, 0, sizeof(vol));
+ down_read(&htable_rw_lock);
+
+ ce = lookup_cache_entry(rpath, NULL);
+ if (IS_ERR(ce)) {
+ up_read(&htable_rw_lock);
+ ses = ERR_CAST(ce);
+ goto out;
+ }
- rc = dfs_cache_noreq_find(rpath, &ref, NULL);
+ rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
if (rc) {
+ up_read(&htable_rw_lock);
ses = ERR_PTR(rc);
goto out;
}
- mdata = cifs_compose_mount_options(vi->vi_mntdata, rpath, &ref,
+ up_read(&htable_rw_lock);
+
+ mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
&devname);
free_dfs_info_param(&ref);
@@ -1351,13 +1392,8 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
goto out;
}
- server = cifs_find_tcp_session(&vol);
- if (IS_ERR_OR_NULL(server)) {
- ses = ERR_PTR(-EHOSTDOWN);
- goto out;
- }
- if (server->tcpStatus != CifsGood) {
- cifs_put_tcp_session(server, 0);
+ server = get_tcp_server(&vol);
+ if (!server) {
ses = ERR_PTR(-EHOSTDOWN);
goto out;
}
@@ -1373,17 +1409,15 @@ out:
}
/* Refresh DFS cache entry from a given tcon */
-static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
- struct cifs_tcon *tcon)
+static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
{
int rc = 0;
unsigned int xid;
char *path, *npath;
- unsigned int h;
- struct dfs_cache_entry *ce;
+ struct cache_entry *ce;
+ struct cifs_ses *root_ses = NULL, *ses;
struct dfs_info3_param *refs = NULL;
int numrefs = 0;
- struct cifs_ses *root_ses = NULL, *ses;
xid = get_xid();
@@ -1391,19 +1425,23 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
rc = get_normalized_path(path, &npath);
if (rc)
- goto out;
+ goto out_free_xid;
- mutex_lock(&dfs_cache_list_lock);
- ce = find_cache_entry(npath, &h);
- mutex_unlock(&dfs_cache_list_lock);
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(npath, NULL);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
- goto out;
+ up_read(&htable_rw_lock);
+ goto out_free_path;
}
- if (!cache_entry_expired(ce))
- goto out;
+ if (!cache_entry_expired(ce)) {
+ up_read(&htable_rw_lock);
+ goto out_free_path;
+ }
+
+ up_read(&htable_rw_lock);
/* If it's a DFS Link, then use root SMB session for refreshing it */
if (is_dfs_link(npath)) {
@@ -1411,35 +1449,29 @@ static void do_refresh_tcon(struct dfs_cache *dc, struct dfs_cache_vol_info *vi,
if (IS_ERR(ses)) {
rc = PTR_ERR(ses);
root_ses = NULL;
- goto out;
+ goto out_free_path;
}
} else {
ses = tcon->ses;
}
- if (unlikely(!ses->server->ops->get_dfs_refer)) {
- rc = -EOPNOTSUPP;
- } else {
- rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs,
- &numrefs, dc->dc_nlsc,
- tcon->remap);
- if (!rc) {
- mutex_lock(&dfs_cache_list_lock);
- ce = __update_cache_entry(npath, refs, numrefs);
- mutex_unlock(&dfs_cache_list_lock);
- dump_refs(refs, numrefs);
- free_dfs_info_array(refs, numrefs);
- if (IS_ERR(ce))
- rc = PTR_ERR(ce);
- }
+ rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
+ &numrefs);
+ if (!rc) {
+ dump_refs(refs, numrefs);
+ rc = update_cache_entry(npath, refs, numrefs);
+ free_dfs_info_array(refs, numrefs);
}
-out:
if (root_ses)
cifs_put_smb_ses(root_ses);
- free_xid(xid);
+out_free_path:
free_normalized_path(path, npath);
+
+out_free_xid:
+ free_xid(xid);
+ return rc;
}
/*
@@ -1448,30 +1480,61 @@ out:
*/
static void refresh_cache_worker(struct work_struct *work)
{
- struct dfs_cache *dc = container_of(work, struct dfs_cache,
- dc_refresh.work);
- struct dfs_cache_vol_info *vi;
+ struct vol_info *vi, *nvi;
struct TCP_Server_Info *server;
- LIST_HEAD(list);
+ LIST_HEAD(vols);
+ LIST_HEAD(tcons);
struct cifs_tcon *tcon, *ntcon;
+ int rc;
- mutex_lock(&dc->dc_lock);
-
- list_for_each_entry(vi, &dc->dc_vol_list, vi_list) {
- server = cifs_find_tcp_session(&vi->vi_vol);
- if (IS_ERR_OR_NULL(server))
+ /*
+ * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
+ * for refreshing.
+ */
+ spin_lock(&vol_list_lock);
+ list_for_each_entry(vi, &vol_list, list) {
+ server = get_tcp_server(&vi->smb_vol);
+ if (!server)
continue;
- if (server->tcpStatus != CifsGood)
- goto next;
- get_tcons(server, &list);
- list_for_each_entry_safe(tcon, ntcon, &list, ulist) {
- do_refresh_tcon(dc, vi, tcon);
+
+ kref_get(&vi->refcnt);
+ list_add_tail(&vi->rlist, &vols);
+ put_tcp_server(server);
+ }
+ spin_unlock(&vol_list_lock);
+
+ /* Walk through all TCONs and refresh any expired cache entry */
+ list_for_each_entry_safe(vi, nvi, &vols, rlist) {
+ spin_lock(&vi->smb_vol_lock);
+ server = get_tcp_server(&vi->smb_vol);
+ spin_unlock(&vi->smb_vol_lock);
+
+ if (!server)
+ goto next_vol;
+
+ get_tcons(server, &tcons);
+ rc = 0;
+
+ list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+ /*
+ * Skip tcp server if any of its tcons failed to refresh
+ * (possibily due to reconnects).
+ */
+ if (!rc)
+ rc = refresh_tcon(vi, tcon);
+
list_del_init(&tcon->ulist);
cifs_put_tcon(tcon);
}
-next:
- cifs_put_tcp_session(server, 0);
+
+ put_tcp_server(server);
+
+next_vol:
+ list_del_init(&vi->rlist);
+ kref_put(&vi->refcnt, vol_release);
}
- queue_delayed_work(cifsiod_wq, &dc->dc_refresh, dc->dc_ttl * HZ);
- mutex_unlock(&dc->dc_lock);
+
+ spin_lock(&cache_ttl_lock);
+ queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+ spin_unlock(&cache_ttl_lock);
}
diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
index 76c732943f5f..99ee44f8ad07 100644
--- a/fs/cifs/dfs_cache.h
+++ b/fs/cifs/dfs_cache.h
@@ -24,7 +24,7 @@ struct dfs_cache_tgt_iterator {
extern int dfs_cache_init(void);
extern void dfs_cache_destroy(void);
-extern const struct file_operations dfscache_proc_fops;
+extern const struct proc_ops dfscache_proc_ops;
extern int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_codepage, int remap,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f3b79012ff29..0ef099442f20 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -355,13 +355,10 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.disposition = disposition;
oparms.path = full_path;
oparms.fid = fid;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 043288b5c728..bc9516ab4b34 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -222,9 +222,6 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
if (!buf)
return -ENOMEM;
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (f_flags & O_SYNC)
create_options |= CREATE_WRITE_THROUGH;
@@ -235,7 +232,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.disposition = disposition;
oparms.path = full_path;
oparms.fid = fid;
@@ -752,9 +749,6 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
desired_access = cifs_convert_flags(cfile->f_flags);
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (cfile->f_flags & O_SYNC)
create_options |= CREATE_WRITE_THROUGH;
@@ -768,7 +762,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = desired_access;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.disposition = disposition;
oparms.path = full_path;
oparms.fid = &cfile->fid;
@@ -2599,8 +2593,10 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
rc = file_write_and_wait_range(file, start, end);
- if (rc)
+ if (rc) {
+ trace_cifs_fsync_err(inode->i_ino, rc);
return rc;
+ }
xid = get_xid();
@@ -2638,8 +2634,10 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
rc = file_write_and_wait_range(file, start, end);
- if (rc)
+ if (rc) {
+ trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
return rc;
+ }
xid = get_xid();
@@ -2672,7 +2670,8 @@ int cifs_flush(struct file *file, fl_owner_t id)
rc = filemap_write_and_wait(inode->i_mapping);
cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
-
+ if (rc)
+ trace_cifs_flush_err(inode->i_ino, rc);
return rc;
}
@@ -2921,7 +2920,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
"direct_writev couldn't get user pages "
"(rc=%zd) iter type %d iov_offset %zd "
"count %zd\n",
- result, from->type,
+ result, iov_iter_type(from),
from->iov_offset, from->count);
dump_stack();
@@ -3132,7 +3131,7 @@ static ssize_t __cifs_writev(
* In this case, fall back to non-direct write function.
* this could be improved by getting pages directly in ITER_KVEC
*/
- if (direct && from->type & ITER_KVEC) {
+ if (direct && iov_iter_is_kvec(from)) {
cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
direct = false;
}
@@ -3652,7 +3651,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
"couldn't get user pages (rc=%zd)"
" iter type %d"
" iov_offset %zd count %zd\n",
- result, direct_iov.type,
+ result, iov_iter_type(&direct_iov),
direct_iov.iov_offset,
direct_iov.count);
dump_stack();
@@ -3863,7 +3862,7 @@ static ssize_t __cifs_readv(
* fall back to data copy read path
* this could be improved by getting pages directly in ITER_KVEC
*/
- if (direct && to->type & ITER_KVEC) {
+ if (direct && iov_iter_is_kvec(to)) {
cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
direct = false;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ca76a9287456..b5e6635c578e 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -113,6 +113,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
}
/* revalidate if mtime or size have changed */
+ fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
@@ -162,6 +163,9 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
cifs_revalidate_cache(inode, fattr);
spin_lock(&inode->i_lock);
+ fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
+ fattr->cf_atime = timestamp_truncate(fattr->cf_atime, inode);
+ fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode);
/* we do not want atime to be less than mtime, it broke some apps */
if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
inode->i_atime = fattr->cf_mtime;
@@ -329,8 +333,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
- ktime_get_real_ts64(&fattr->cf_mtime);
- fattr->cf_mtime = timespec64_trunc(fattr->cf_mtime, sb->s_time_gran);
+ ktime_get_coarse_real_ts64(&fattr->cf_mtime);
fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
fattr->cf_nlink = 2;
fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL;
@@ -472,9 +475,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
- oparms.create_options = CREATE_NOT_DIR;
- if (backup_cred(cifs_sb))
- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
@@ -609,10 +610,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
if (info->LastAccessTime)
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
- else {
- ktime_get_real_ts64(&fattr->cf_atime);
- fattr->cf_atime = timespec64_trunc(fattr->cf_atime, sb->s_time_gran);
- }
+ else
+ ktime_get_coarse_real_ts64(&fattr->cf_atime);
fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
@@ -1284,7 +1283,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = DELETE | FILE_WRITE_ATTRIBUTES;
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = full_path;
oparms.fid = &fid;
@@ -1649,7 +1648,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
struct TCP_Server_Info *server;
char *full_path;
- cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
+ cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
mode, inode);
cifs_sb = CIFS_SB(inode->i_sb);
@@ -1822,7 +1821,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
oparms.cifs_sb = cifs_sb;
/* open the file to be renamed -- we need DELETE perms */
oparms.desired_access = DELETE;
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = from_path;
oparms.fid = &fid;
@@ -2228,7 +2227,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
return -ENOTSUPP;
}
-static int cifs_truncate_page(struct address_space *mapping, loff_t from)
+int cifs_truncate_page(struct address_space *mapping, loff_t from)
{
pgoff_t index = from >> PAGE_SHIFT;
unsigned offset = from & (PAGE_SIZE - 1);
@@ -2245,7 +2244,7 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
return rc;
}
-static void cifs_setsize(struct inode *inode, loff_t offset)
+void cifs_setsize(struct inode *inode, loff_t offset)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 1a01e108d75e..4a73e63c4d43 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -65,7 +65,7 @@ static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
if (tcon->ses->server->ops->ioctl_query_info)
rc = tcon->ses->server->ops->ioctl_query_info(
- xid, tcon, utf16_path,
+ xid, tcon, cifs_sb, utf16_path,
filep->private_data ? 0 : 1, p);
else
rc = -EOPNOTSUPP;
@@ -169,6 +169,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
unsigned int xid;
struct cifsFileInfo *pSMBFile = filep->private_data;
struct cifs_tcon *tcon;
+ struct cifs_sb_info *cifs_sb;
__u64 ExtAttrBits = 0;
__u64 caps;
@@ -299,6 +300,21 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
else
rc = 0;
break;
+ case CIFS_IOC_NOTIFY:
+ if (!S_ISDIR(inode->i_mode)) {
+ /* Notify can only be done on directories */
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ cifs_sb = CIFS_SB(inode->i_sb);
+ tcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+ if (tcon && tcon->ses->server->ops->notify) {
+ rc = tcon->ses->server->ops->notify(xid,
+ filep, (void __user *)arg);
+ cifs_dbg(FYI, "ioctl notify rc %d\n", rc);
+ } else
+ rc = -EOPNOTSUPP;
+ break;
default:
cifs_dbg(FYI, "unsupported ioctl\n");
break;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index b736acd3917b..852aa00ec729 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -315,7 +315,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
@@ -353,15 +353,11 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms;
- int create_options = CREATE_NOT_DIR;
-
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_CREATE;
oparms.path = path;
oparms.fid = &fid;
@@ -402,9 +398,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_READ;
- oparms.create_options = CREATE_NOT_DIR;
- if (backup_cred(cifs_sb))
- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.fid = &fid;
oparms.reconnect = false;
@@ -457,14 +451,10 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms;
- int create_options = CREATE_NOT_DIR;
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct kvec iov[2];
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
@@ -474,7 +464,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_CREATE;
oparms.fid = &fid;
oparms.reconnect = false;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index d17587c2c4ab..ba9dadf3be24 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -196,7 +196,8 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
* may look wrong since the inodes may not have timed out by the time
* "ls" does a stat() call on them.
*/
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID))
fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL &&
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index f0795c856d8f..43a88e26d26b 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -101,7 +101,7 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
iface_count = ses->iface_count;
if (iface_count <= 0) {
spin_unlock(&ses->iface_lock);
- cifs_dbg(FYI, "no iface list available to open channels\n");
+ cifs_dbg(VFS, "no iface list available to open channels\n");
return 0;
}
ifaces = kmemdup(ses->iface_list, iface_count*sizeof(*ifaces),
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index d70a2bb062df..eb994e313c6a 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -504,7 +504,8 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
}
static void
-cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb)
{
CIFSSMBQFSDeviceInfo(xid, tcon);
CIFSSMBQFSAttributeInfo(xid, tcon);
@@ -565,7 +566,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = FILE_READ_ATTRIBUTES;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.disposition = FILE_OPEN;
oparms.path = full_path;
oparms.fid = &fid;
@@ -793,7 +794,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES;
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
oparms.disposition = FILE_OPEN;
oparms.path = full_path;
oparms.fid = &fid;
@@ -872,7 +873,7 @@ cifs_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
static int
cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
- struct kstatfs *buf)
+ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
{
int rc = -EOPNOTSUPP;
@@ -970,7 +971,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = FILE_READ_ATTRIBUTES;
- oparms.create_options = OPEN_REPARSE_POINT;
+ oparms.create_options = cifs_create_options(cifs_sb,
+ OPEN_REPARSE_POINT);
oparms.disposition = FILE_OPEN;
oparms.path = full_path;
oparms.fid = &fid;
@@ -1029,7 +1031,6 @@ cifs_make_node(unsigned int xid, struct inode *inode,
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct inode *newinode = NULL;
int rc = -EPERM;
- int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
FILE_ALL_INFO *buf = NULL;
struct cifs_io_parms io_parms;
__u32 oplock = 0;
@@ -1090,13 +1091,11 @@ cifs_make_node(unsigned int xid, struct inode *inode,
goto out;
}
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+ CREATE_OPTION_SPECIAL);
oparms.disposition = FILE_CREATE;
oparms.path = full_path;
oparms.fid = &fid;
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 5ef5e97a6d13..1cf207564ff9 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -99,9 +99,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = desired_access;
oparms.disposition = create_disposition;
- oparms.create_options = create_options;
- if (backup_cred(cifs_sb))
- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.fid = &fid;
oparms.reconnect = false;
oparms.mode = mode;
@@ -457,7 +455,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
/* If it is a root and its handle is cached then use it */
if (!strlen(full_path) && !no_cached_open) {
- rc = open_shroot(xid, tcon, &fid);
+ rc = open_shroot(xid, tcon, cifs_sb, &fid);
if (rc)
goto out;
@@ -474,9 +472,6 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
goto out;
}
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 0516fc482d43..0511aaf451d4 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -743,7 +743,7 @@ __smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
{
struct close_cancelled_open *cancelled;
- cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
if (!cancelled)
return -ENOMEM;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 6250370c1170..e47190cae163 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -12,6 +12,7 @@
#include <linux/uuid.h>
#include <linux/sort.h>
#include <crypto/aead.h>
+#include "cifsfs.h"
#include "cifsglob.h"
#include "smb2pdu.h"
#include "smb2proto.h"
@@ -654,7 +655,8 @@ smb2_cached_lease_break(struct work_struct *work)
/*
* Open the directory at the root of a share
*/
-int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
+int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid)
{
struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *server = ses->server;
@@ -701,7 +703,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
oparms.tcon = tcon;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
oparms.fid = pfid;
@@ -804,7 +806,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
sizeof(struct smb2_file_all_info),
&rsp_iov[1], sizeof(struct smb2_file_all_info),
(char *)&tcon->crfid.file_all_info))
- tcon->crfid.file_all_info_is_valid = 1;
+ tcon->crfid.file_all_info_is_valid = true;
oshr_exit:
mutex_unlock(&tcon->crfid.fid_mutex);
@@ -817,7 +819,8 @@ oshr_free:
}
static void
-smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb)
{
int rc;
__le16 srch_path = 0; /* Null - open root of share */
@@ -829,7 +832,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -837,7 +840,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
NULL);
else
- rc = open_shroot(xid, tcon, &fid);
+ rc = open_shroot(xid, tcon, cifs_sb, &fid);
if (rc)
return;
@@ -859,7 +862,8 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
}
static void
-smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb)
{
int rc;
__le16 srch_path = 0; /* Null - open root of share */
@@ -870,7 +874,7 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -905,10 +909,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -1115,7 +1116,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
void *data[1];
struct smb2_file_full_ea_info *ea = NULL;
struct kvec close_iov[1];
- int rc;
+ struct smb2_query_info_rsp *rsp;
+ int rc, used_len = 0;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -1138,6 +1140,38 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
cifs_sb);
if (rc == -ENODATA)
goto sea_exit;
+ } else {
+ /* If we are adding a attribute we should first check
+ * if there will be enough space available to store
+ * the new EA. If not we should not add it since we
+ * would not be able to even read the EAs back.
+ */
+ rc = smb2_query_info_compound(xid, tcon, utf16_path,
+ FILE_READ_EA,
+ FILE_FULL_EA_INFORMATION,
+ SMB2_O_INFO_FILE,
+ CIFSMaxBufSize -
+ MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE,
+ &rsp_iov[1], &resp_buftype[1], cifs_sb);
+ if (rc == 0) {
+ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ used_len = le32_to_cpu(rsp->OutputBufferLength);
+ }
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ resp_buftype[1] = CIFS_NO_BUFFER;
+ memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
+ rc = 0;
+
+ /* Use a fudge factor of 256 bytes in case we collide
+ * with a different set_EAs command.
+ */
+ if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
+ used_len + ea_name_len + ea_value_len + 1) {
+ rc = -ENOSPC;
+ goto sea_exit;
+ }
}
}
@@ -1150,10 +1184,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_WRITE_EA;
oparms.disposition = FILE_OPEN;
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -1421,6 +1452,7 @@ req_res_key_exit:
static int
smb2_ioctl_query_info(const unsigned int xid,
struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
__le16 *path, int is_dir,
unsigned long p)
{
@@ -1446,6 +1478,7 @@ smb2_ioctl_query_info(const unsigned int xid,
struct kvec close_iov[1];
unsigned int size[2];
void *data[2];
+ int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
memset(rqst, 0, sizeof(rqst));
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
@@ -1476,10 +1509,7 @@ smb2_ioctl_query_info(const unsigned int xid,
memset(&oparms, 0, sizeof(oparms));
oparms.tcon = tcon;
oparms.disposition = FILE_OPEN;
- if (is_dir)
- oparms.create_options = CREATE_NOT_FILE;
- else
- oparms.create_options = CREATE_NOT_DIR;
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -1523,7 +1553,9 @@ smb2_ioctl_query_info(const unsigned int xid,
COMPOUND_FID, COMPOUND_FID,
qi.info_type, true, buffer,
qi.output_buffer_length,
- CIFSMaxBufSize);
+ CIFSMaxBufSize -
+ MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE);
}
} else if (qi.flags == PASSTHRU_SET_INFO) {
/* Can eventually relax perm check since server enforces too */
@@ -2046,6 +2078,66 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
+
+
+static int
+smb3_notify(const unsigned int xid, struct file *pfile,
+ void __user *ioc_buf)
+{
+ struct smb3_notify notify;
+ struct dentry *dentry = pfile->f_path.dentry;
+ struct inode *inode = file_inode(pfile);
+ struct cifs_sb_info *cifs_sb;
+ struct cifs_open_parms oparms;
+ struct cifs_fid fid;
+ struct cifs_tcon *tcon;
+ unsigned char *path = NULL;
+ __le16 *utf16_path = NULL;
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ int rc = 0;
+
+ path = build_path_from_dentry(dentry);
+ if (path == NULL)
+ return -ENOMEM;
+
+ cifs_sb = CIFS_SB(inode->i_sb);
+
+ utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
+ if (utf16_path == NULL) {
+ rc = -ENOMEM;
+ goto notify_exit;
+ }
+
+ if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
+ rc = -EFAULT;
+ goto notify_exit;
+ }
+
+ tcon = cifs_sb_master_tcon(cifs_sb);
+ oparms.tcon = tcon;
+ oparms.desired_access = FILE_READ_ATTRIBUTES;
+ oparms.disposition = FILE_OPEN;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
+ if (rc)
+ goto notify_exit;
+
+ rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+ notify.watch_tree, notify.completion_filter);
+
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+
+ cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
+
+notify_exit:
+ kfree(path);
+ kfree(utf16_path);
+ return rc;
+}
+
static int
smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
const char *path, struct cifs_sb_info *cifs_sb,
@@ -2053,40 +2145,109 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_search_info *srch_inf)
{
__le16 *utf16_path;
- int rc;
- __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ struct smb_rqst rqst[2];
+ struct kvec rsp_iov[2];
+ int resp_buftype[2];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
+ int rc, flags = 0;
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
+ struct smb2_query_directory_rsp *qd_rsp = NULL;
+ struct smb2_create_rsp *op_rsp = NULL;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
return -ENOMEM;
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(rqst, 0, sizeof(rqst));
+ resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ /* Open */
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[0].rq_iov = open_iov;
+ rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
oparms.disposition = FILE_OPEN;
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = fid;
oparms.reconnect = false;
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
- kfree(utf16_path);
- if (rc) {
- cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
- return rc;
- }
+ rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ if (rc)
+ goto qdf_free;
+ smb2_set_next_command(tcon, &rqst[0]);
+ /* Query directory */
srch_inf->entries_in_buffer = 0;
srch_inf->index_of_last_entry = 2;
- rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
- fid->volatile_fid, 0, srch_inf);
- if (rc) {
- cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
+ memset(&qd_iov, 0, sizeof(qd_iov));
+ rqst[1].rq_iov = qd_iov;
+ rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
+
+ rc = SMB2_query_directory_init(xid, tcon, &rqst[1],
+ COMPOUND_FID, COMPOUND_FID,
+ 0, srch_inf->info_level);
+ if (rc)
+ goto qdf_free;
+
+ smb2_set_related(&rqst[1]);
+
+ rc = compound_send_recv(xid, tcon->ses, flags, 2, rqst,
+ resp_buftype, rsp_iov);
+
+ /* If the open failed there is nothing to do */
+ op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ if (op_rsp == NULL || op_rsp->sync_hdr.Status != STATUS_SUCCESS) {
+ cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
+ goto qdf_free;
+ }
+ fid->persistent_fid = op_rsp->PersistentFileId;
+ fid->volatile_fid = op_rsp->VolatileFileId;
+
+ /* Anything else than ENODATA means a genuine error */
+ if (rc && rc != -ENODATA) {
SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+ cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
+ trace_smb3_query_dir_err(xid, fid->persistent_fid,
+ tcon->tid, tcon->ses->Suid, 0, 0, rc);
+ goto qdf_free;
}
+
+ qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
+ if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
+ trace_smb3_query_dir_done(xid, fid->persistent_fid,
+ tcon->tid, tcon->ses->Suid, 0, 0);
+ srch_inf->endOfSearch = true;
+ rc = 0;
+ goto qdf_free;
+ }
+
+ rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
+ srch_inf);
+ if (rc) {
+ trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
+ tcon->ses->Suid, 0, 0, rc);
+ goto qdf_free;
+ }
+ resp_buftype[1] = CIFS_NO_BUFFER;
+
+ trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
+ tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
+
+ qdf_free:
+ kfree(utf16_path);
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_directory_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
return rc;
}
@@ -2268,10 +2429,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = desired_access;
oparms.disposition = FILE_OPEN;
- if (cifs_sb && backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -2327,7 +2485,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
static int
smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
- struct kstatfs *buf)
+ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
{
struct smb2_query_info_rsp *rsp;
struct smb2_fs_full_size_info *info = NULL;
@@ -2342,7 +2500,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
FS_FULL_SIZE_INFORMATION,
SMB2_O_INFO_FILESYSTEM,
sizeof(struct smb2_fs_full_size_info),
- &rsp_iov, &buftype, NULL);
+ &rsp_iov, &buftype, cifs_sb);
if (rc)
goto qfs_exit;
@@ -2364,7 +2522,7 @@ qfs_exit:
static int
smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
- struct kstatfs *buf)
+ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
{
int rc;
__le16 srch_path = 0; /* Null - open root of share */
@@ -2373,12 +2531,12 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
if (!tcon->posix_extensions)
- return smb2_queryfs(xid, tcon, buf);
+ return smb2_queryfs(xid, tcon, cifs_sb, buf);
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
- oparms.create_options = 0;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -2647,6 +2805,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_create_rsp *create_rsp;
struct smb2_ioctl_rsp *ioctl_rsp;
struct reparse_data_buffer *reparse_buf;
+ int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
u32 plen;
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
@@ -2673,14 +2832,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
-
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
- if (is_reparse_point)
- oparms.create_options = OPEN_REPARSE_POINT;
-
+ oparms.create_options = cifs_create_options(cifs_sb, create_options);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -2697,7 +2849,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
- true /* is_fctl */, NULL, 0, CIFSMaxBufSize);
+ true /* is_fctl */, NULL, 0,
+ CIFSMaxBufSize -
+ MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE);
if (rc)
goto querty_exit;
@@ -2856,11 +3011,6 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
-
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path) {
rc = -ENOMEM;
@@ -2871,6 +3021,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
oparms.tcon = tcon;
oparms.desired_access = READ_CONTROL;
oparms.disposition = FILE_OPEN;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.fid = &fid;
oparms.reconnect = false;
@@ -2912,11 +3063,6 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
tcon = tlink_tcon(tlink);
xid = get_xid();
- if (backup_cred(cifs_sb))
- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
- else
- oparms.create_options = 0;
-
if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
access_flags = WRITE_OWNER;
else
@@ -2931,6 +3077,7 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
oparms.tcon = tcon;
oparms.desired_access = access_flags;
+ oparms.create_options = cifs_create_options(cifs_sb, 0);
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
@@ -3095,28 +3242,32 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
}
/*
+ * Extending the file
+ */
+ if ((keep_size == false) && i_size_read(inode) < off + len) {
+ if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0)
+ smb2_set_sparse(xid, tcon, cfile, inode, false);
+
+ eof = cpu_to_le64(off + len);
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, cfile->pid, &eof);
+ if (rc == 0) {
+ cifsi->server_eof = off + len;
+ cifs_setsize(inode, off + len);
+ cifs_truncate_page(inode->i_mapping, inode->i_size);
+ truncate_setsize(inode, off + len);
+ }
+ goto out;
+ }
+
+ /*
* Files are non-sparse by default so falloc may be a no-op
- * Must check if file sparse. If not sparse, and not extending
- * then no need to do anything since file already allocated
+ * Must check if file sparse. If not sparse, and since we are not
+ * extending then no need to do anything since file already allocated
*/
if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
- if (keep_size == true)
- rc = 0;
- /* check if extending file */
- else if (i_size_read(inode) >= off + len)
- /* not extending file and already not sparse */
- rc = 0;
- /* BB: in future add else clause to extend file */
- else
- rc = -EOPNOTSUPP;
- if (rc)
- trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
- tcon->tid, tcon->ses->Suid, off, len, rc);
- else
- trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
- tcon->tid, tcon->ses->Suid, off, len);
- free_xid(xid);
- return rc;
+ rc = 0;
+ goto out;
}
if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
@@ -3130,25 +3281,14 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
*/
if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
rc = -EOPNOTSUPP;
- trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
- tcon->tid, tcon->ses->Suid, off, len, rc);
- free_xid(xid);
- return rc;
- }
-
- smb2_set_sparse(xid, tcon, cfile, inode, false);
- rc = 0;
- } else {
- smb2_set_sparse(xid, tcon, cfile, inode, false);
- rc = 0;
- if (i_size_read(inode) < off + len) {
- eof = cpu_to_le64(off + len);
- rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
- cfile->fid.volatile_fid, cfile->pid,
- &eof);
+ goto out;
}
}
+ smb2_set_sparse(xid, tcon, cfile, inode, false);
+ rc = 0;
+
+out:
if (rc)
trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
tcon->ses->Suid, off, len, rc);
@@ -4420,7 +4560,6 @@ smb2_make_node(unsigned int xid, struct inode *inode,
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
int rc = -EPERM;
- int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
FILE_ALL_INFO *buf = NULL;
struct cifs_io_parms io_parms;
__u32 oplock = 0;
@@ -4456,13 +4595,11 @@ smb2_make_node(unsigned int xid, struct inode *inode,
goto out;
}
- if (backup_cred(cifs_sb))
- create_options |= CREATE_OPEN_BACKUP_INTENT;
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
- oparms.create_options = create_options;
+ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+ CREATE_OPTION_SPECIAL);
oparms.disposition = FILE_CREATE;
oparms.path = full_path;
oparms.fid = &fid;
@@ -4691,6 +4828,7 @@ struct smb_version_operations smb21_operations = {
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
.enum_snapshots = smb3_enum_snapshots,
+ .notify = smb3_notify,
.get_dfs_refer = smb2_get_dfs_refer,
.select_sectype = smb2_select_sectype,
#ifdef CONFIG_CIFS_XATTR
@@ -4797,6 +4935,7 @@ struct smb_version_operations smb30_operations = {
.dir_needs_close = smb2_dir_needs_close,
.fallocate = smb3_fallocate,
.enum_snapshots = smb3_enum_snapshots,
+ .notify = smb3_notify,
.init_transform_rq = smb3_init_transform_rq,
.is_transform_hdr = smb3_is_transform_hdr,
.receive_transform = smb3_receive_transform,
@@ -4907,6 +5046,7 @@ struct smb_version_operations smb311_operations = {
.dir_needs_close = smb2_dir_needs_close,
.fallocate = smb3_fallocate,
.enum_snapshots = smb3_enum_snapshots,
+ .notify = smb3_notify,
.init_transform_rq = smb3_init_transform_rq,
.is_transform_hdr = smb3_is_transform_hdr,
.receive_transform = smb3_receive_transform,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 9434f6dd8df3..1234f9ccab03 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -312,7 +312,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
if (server->tcpStatus != CifsNeedReconnect)
break;
- if (--retries)
+ if (retries && --retries)
continue;
/*
@@ -350,9 +350,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
}
rc = cifs_negotiate_protocol(0, tcon->ses);
- if (!rc && tcon->ses->need_reconnect)
+ if (!rc && tcon->ses->need_reconnect) {
rc = cifs_setup_session(0, tcon->ses, nls_codepage);
-
+ if ((rc == -EACCES) && !tcon->retry) {
+ rc = -EHOSTDOWN;
+ mutex_unlock(&tcon->ses->session_mutex);
+ goto failed;
+ }
+ }
if (rc || !tcon->need_reconnect) {
mutex_unlock(&tcon->ses->session_mutex);
goto out;
@@ -397,6 +402,7 @@ out:
case SMB2_SET_INFO:
rc = -EAGAIN;
}
+failed:
unload_nls(nls_codepage);
return rc;
}
@@ -1933,6 +1939,16 @@ parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
buf->IndexNumber = pdisk_id->DiskFileId;
}
+static void
+parse_posix_ctxt(struct create_context *cc, struct smb_posix_info *pposix_inf)
+{
+ /* struct smb_posix_info *ppinf = (struct smb_posix_info *)cc; */
+
+ /* TODO: Need to add parsing for the context and return */
+ printk_once(KERN_WARNING
+ "SMB3 3.11 POSIX response context not completed yet\n");
+}
+
void
smb2_parse_contexts(struct TCP_Server_Info *server,
struct smb2_create_rsp *rsp,
@@ -1944,6 +1960,9 @@ smb2_parse_contexts(struct TCP_Server_Info *server,
unsigned int next;
unsigned int remaining;
char *name;
+ const char smb3_create_tag_posix[] = {0x93, 0xAD, 0x25, 0x50, 0x9C,
+ 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
+ 0xDE, 0x96, 0x8B, 0xCD, 0x7C};
*oplock = 0;
data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
@@ -1963,6 +1982,15 @@ smb2_parse_contexts(struct TCP_Server_Info *server,
else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
parse_query_id_ctxt(cc, buf);
+ else if ((le16_to_cpu(cc->NameLength) == 16)) {
+ if (memcmp(name, smb3_create_tag_posix, 16) == 0)
+ parse_posix_ctxt(cc, NULL);
+ }
+ /* else {
+ cifs_dbg(FYI, "Context not matched with len %d\n",
+ le16_to_cpu(cc->NameLength));
+ cifs_dump_mem("Cctxt name: ", name, 4);
+ } */
next = le32_to_cpu(cc->Next);
if (!next)
@@ -2199,13 +2227,14 @@ create_sd_buf(umode_t mode, unsigned int *len)
struct cifs_ace *pace;
unsigned int sdlen, acelen;
- *len = roundup(sizeof(struct crt_sd_ctxt) + sizeof(struct cifs_ace), 8);
+ *len = roundup(sizeof(struct crt_sd_ctxt) + sizeof(struct cifs_ace) * 2,
+ 8);
buf = kzalloc(*len, GFP_KERNEL);
if (buf == NULL)
return buf;
sdlen = sizeof(struct smb3_sd) + sizeof(struct smb3_acl) +
- sizeof(struct cifs_ace);
+ 2 * sizeof(struct cifs_ace);
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct crt_sd_ctxt, sd));
@@ -2232,8 +2261,12 @@ create_sd_buf(umode_t mode, unsigned int *len)
/* create one ACE to hold the mode embedded in reserved special SID */
pace = (struct cifs_ace *)(sizeof(struct crt_sd_ctxt) + (char *)buf);
acelen = setup_special_mode_ACE(pace, (__u64)mode);
+ /* and one more ACE to allow access for authenticated users */
+ pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) +
+ (char *)buf));
+ acelen += setup_authusers_ACE(pace);
buf->acl.AclSize = cpu_to_le16(sizeof(struct cifs_acl) + acelen);
- buf->acl.AceCount = cpu_to_le16(1);
+ buf->acl.AceCount = cpu_to_le16(2);
return buf;
}
@@ -3352,6 +3385,7 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
+ /* See note 354 of MS-SMB2, 64K max */
req->OutputBufferLength =
cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
req->CompletionFilter = cpu_to_le32(completion_filter);
@@ -4018,6 +4052,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid, wdata->offset,
wdata->bytes, wdata->result);
+ if (wdata->result == -ENOSPC)
+ printk_once(KERN_WARNING "Out of space writing to %s\n",
+ tcon->treeName);
} else
trace_smb3_write_done(0 /* no xid */,
wdata->cfile->fid.persistent_fid,
@@ -4296,56 +4333,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
/*
* Readdir/FindFirst
*/
-int
-SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, int index,
- struct cifs_search_info *srch_inf)
+int SMB2_query_directory_init(const unsigned int xid,
+ struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid,
+ int index, int info_level)
{
- struct smb_rqst rqst;
+ struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_query_directory_req *req;
- struct smb2_query_directory_rsp *rsp = NULL;
- struct kvec iov[2];
- struct kvec rsp_iov;
- int rc = 0;
- int len;
- int resp_buftype = CIFS_NO_BUFFER;
unsigned char *bufptr;
- struct TCP_Server_Info *server;
- struct cifs_ses *ses = tcon->ses;
__le16 asteriks = cpu_to_le16('*');
- char *end_of_smb;
- unsigned int output_size = CIFSMaxBufSize;
- size_t info_buf_size;
- int flags = 0;
+ unsigned int output_size = CIFSMaxBufSize -
+ MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE;
unsigned int total_len;
-
- if (ses && (ses->server))
- server = ses->server;
- else
- return -EIO;
+ struct kvec *iov = rqst->rq_iov;
+ int len, rc;
rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
-
- switch (srch_inf->info_level) {
+ switch (info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
- info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
- info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
- srch_inf->info_level);
- rc = -EINVAL;
- goto qdir_exit;
+ info_level);
+ return -EINVAL;
}
req->FileIndex = cpu_to_le32(index);
@@ -4374,40 +4393,50 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
- memset(&rqst, 0, sizeof(struct smb_rqst));
- rqst.rq_iov = iov;
- rqst.rq_nvec = 2;
-
trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, output_size);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(req);
- rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
+ return 0;
+}
- if (rc) {
- if (rc == -ENODATA &&
- rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
- trace_smb3_query_dir_done(xid, persistent_fid,
- tcon->tid, tcon->ses->Suid, index, 0);
- srch_inf->endOfSearch = true;
- rc = 0;
- } else {
- trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
- tcon->ses->Suid, index, 0, rc);
- cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
- }
- goto qdir_exit;
+void SMB2_query_directory_free(struct smb_rqst *rqst)
+{
+ if (rqst && rqst->rq_iov) {
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ }
+}
+
+int
+smb2_parse_query_directory(struct cifs_tcon *tcon,
+ struct kvec *rsp_iov,
+ int resp_buftype,
+ struct cifs_search_info *srch_inf)
+{
+ struct smb2_query_directory_rsp *rsp;
+ size_t info_buf_size;
+ char *end_of_smb;
+ int rc;
+
+ rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
+
+ switch (srch_inf->info_level) {
+ case SMB_FIND_FILE_DIRECTORY_INFO:
+ info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
+ break;
+ case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+ info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
+ break;
+ default:
+ cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+ srch_inf->info_level);
+ return -EINVAL;
}
rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
- le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+ le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
info_buf_size);
- if (rc) {
- trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
- tcon->ses->Suid, index, 0, rc);
- goto qdir_exit;
- }
+ if (rc)
+ return rc;
srch_inf->unicode = true;
@@ -4420,7 +4449,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = srch_inf->last_entry =
(char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
- end_of_smb = rsp_iov.iov_len + (char *)rsp;
+ end_of_smb = rsp_iov->iov_len + (char *)rsp;
srch_inf->entries_in_buffer =
num_entries(srch_inf->srch_entries_start, end_of_smb,
&srch_inf->last_entry, info_buf_size);
@@ -4435,11 +4464,72 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
else
cifs_tcon_dbg(VFS, "illegal search buffer type\n");
+ return 0;
+}
+
+int
+SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, int index,
+ struct cifs_search_info *srch_inf)
+{
+ struct smb_rqst rqst;
+ struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
+ struct smb2_query_directory_rsp *rsp = NULL;
+ int resp_buftype = CIFS_NO_BUFFER;
+ struct kvec rsp_iov;
+ int rc = 0;
+ struct cifs_ses *ses = tcon->ses;
+ int flags = 0;
+
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
+
+ rc = SMB2_query_directory_init(xid, tcon, &rqst, persistent_fid,
+ volatile_fid, index,
+ srch_inf->info_level);
+ if (rc)
+ goto qdir_exit;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
+
+ if (rc) {
+ if (rc == -ENODATA &&
+ rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
+ trace_smb3_query_dir_done(xid, persistent_fid,
+ tcon->tid, tcon->ses->Suid, index, 0);
+ srch_inf->endOfSearch = true;
+ rc = 0;
+ } else {
+ trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
+ tcon->ses->Suid, index, 0, rc);
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+ }
+ goto qdir_exit;
+ }
+
+ rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
+ srch_inf);
+ if (rc) {
+ trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
+ tcon->ses->Suid, index, 0, rc);
+ goto qdir_exit;
+ }
+ resp_buftype = CIFS_NO_BUFFER;
+
trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
tcon->ses->Suid, index, srch_inf->entries_in_buffer);
- return rc;
qdir_exit:
+ SMB2_query_directory_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 7b1c379fdf7a..fa03df130f1a 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -1282,6 +1282,8 @@ struct smb2_echo_rsp {
#define SMB2_INDEX_SPECIFIED 0x04
#define SMB2_REOPEN 0x10
+#define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
+
struct smb2_query_directory_req {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
@@ -1517,6 +1519,7 @@ struct smb3_fs_vol_info {
#define FILE_NORMALIZED_NAME_INFORMATION 48
#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
#define FILE_STANDARD_LINK_INFORMATION 54
+#define FILE_ID_INFORMATION 59
struct smb2_file_internal_info {
__le64 IndexNumber;
@@ -1591,6 +1594,21 @@ struct smb2_file_network_open_info {
__le32 Reserved;
} __packed; /* level 34 Query also similar returned in close rsp and open rsp */
+/* See MS-FSCC 2.4.43 */
+struct smb2_file_id_information {
+ __le64 VolumeSerialNumber;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+} __packed; /* level 59 */
+
extern char smb2_padding[7];
+/* equivalent of the contents of SMB3.1.1 POSIX open context response */
+struct smb_posix_info {
+ __le32 nlink;
+ __le32 reparse_tag;
+ __le32 mode;
+ kuid_t uid;
+ kuid_t gid;
+};
#endif /* _SMB2PDU_H */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 27d29f2eb6c8..de6388ef344f 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -68,7 +68,7 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_fid *pfid);
+ struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid);
extern void close_shroot(struct cached_fid *cfid);
extern void close_shroot_lease(struct cached_fid *cfid);
extern void close_shroot_lease_locked(struct cached_fid *cfid);
@@ -197,6 +197,11 @@ extern int SMB2_echo(struct TCP_Server_Info *server);
extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf);
+extern int SMB2_query_directory_init(unsigned int xid, struct cifs_tcon *tcon,
+ struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid,
+ int index, int info_level);
+extern void SMB2_query_directory_free(struct smb_rqst *rqst);
extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid,
__le64 *eof);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 387c88704c52..08b703b7a15e 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -104,13 +104,14 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
{
struct cifs_chan *chan;
struct cifs_ses *ses = NULL;
+ struct TCP_Server_Info *it = NULL;
int i;
int rc = 0;
spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
- list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(it, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &it->smb_ses_list, smb_ses_list) {
if (ses->Suid == ses_id)
goto found;
}
@@ -685,6 +686,8 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
* The default is for the mid to be synchronous, so the
* default callback just wakes up the current task.
*/
+ get_task_struct(current);
+ temp->creator = current;
temp->callback = cifs_wake_up_task;
temp->callback_data = current;
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index e7e350b13d6a..4cb0d5f7ce45 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -547,6 +547,33 @@ DEFINE_EVENT(smb3_exit_err_class, smb3_##name, \
DEFINE_SMB3_EXIT_ERR_EVENT(exit_err);
+
+DECLARE_EVENT_CLASS(smb3_sync_err_class,
+ TP_PROTO(unsigned long ino,
+ int rc),
+ TP_ARGS(ino, rc),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(int, rc)
+ ),
+ TP_fast_assign(
+ __entry->ino = ino;
+ __entry->rc = rc;
+ ),
+ TP_printk("\tino=%lu rc=%d",
+ __entry->ino, __entry->rc)
+)
+
+#define DEFINE_SMB3_SYNC_ERR_EVENT(name) \
+DEFINE_EVENT(smb3_sync_err_class, cifs_##name, \
+ TP_PROTO(unsigned long ino, \
+ int rc), \
+ TP_ARGS(ino, rc))
+
+DEFINE_SMB3_SYNC_ERR_EVENT(fsync_err);
+DEFINE_SMB3_SYNC_ERR_EVENT(flush_err);
+
+
DECLARE_EVENT_CLASS(smb3_enter_exit_class,
TP_PROTO(unsigned int xid,
const char *func_name),
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 3d2e11f85cba..cb3ee916f527 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -76,6 +76,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
* The default is for the mid to be synchronous, so the
* default callback just wakes up the current task.
*/
+ get_task_struct(current);
+ temp->creator = current;
temp->callback = cifs_wake_up_task;
temp->callback_data = current;
@@ -158,6 +160,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
}
}
#endif
+ put_task_struct(midEntry->creator);
mempool_free(midEntry, cifs_mid_poolp);
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index db4ba8f6077e..b8299173ea7e 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -32,7 +32,8 @@
#include "cifs_unicode.h"
#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
-#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
+#define CIFS_XATTR_CIFS_ACL "system.cifs_acl" /* DACL only */
+#define CIFS_XATTR_CIFS_NTSD "system.cifs_ntsd" /* owner plus DACL */
#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
/*
@@ -40,12 +41,62 @@
* confusing users and using the 20+ year old term 'cifs' when it is no longer
* secure, replaced by SMB2 (then even more highly secure SMB3) many years ago
*/
-#define SMB3_XATTR_CIFS_ACL "system.smb3_acl"
+#define SMB3_XATTR_CIFS_ACL "system.smb3_acl" /* DACL only */
+#define SMB3_XATTR_CIFS_NTSD "system.smb3_ntsd" /* owner plus DACL */
#define SMB3_XATTR_ATTRIB "smb3.dosattrib" /* full name: user.smb3.dosattrib */
#define SMB3_XATTR_CREATETIME "smb3.creationtime" /* user.smb3.creationtime */
/* BB need to add server (Samba e.g) support for security and trusted prefix */
-enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
+enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT,
+ XATTR_CIFS_NTSD };
+
+static int cifs_attrib_set(unsigned int xid, struct cifs_tcon *pTcon,
+ struct inode *inode, char *full_path,
+ const void *value, size_t size)
+{
+ ssize_t rc = -EOPNOTSUPP;
+ __u32 *pattrib = (__u32 *)value;
+ __u32 attrib;
+ FILE_BASIC_INFO info_buf;
+
+ if ((value == NULL) || (size != sizeof(__u32)))
+ return -ERANGE;
+
+ memset(&info_buf, 0, sizeof(info_buf));
+ attrib = *pattrib;
+ info_buf.Attributes = cpu_to_le32(attrib);
+ if (pTcon->ses->server->ops->set_file_info)
+ rc = pTcon->ses->server->ops->set_file_info(inode, full_path,
+ &info_buf, xid);
+ if (rc == 0)
+ CIFS_I(inode)->cifsAttrs = attrib;
+
+ return rc;
+}
+
+static int cifs_creation_time_set(unsigned int xid, struct cifs_tcon *pTcon,
+ struct inode *inode, char *full_path,
+ const void *value, size_t size)
+{
+ ssize_t rc = -EOPNOTSUPP;
+ __u64 *pcreation_time = (__u64 *)value;
+ __u64 creation_time;
+ FILE_BASIC_INFO info_buf;
+
+ if ((value == NULL) || (size != sizeof(__u64)))
+ return -ERANGE;
+
+ memset(&info_buf, 0, sizeof(info_buf));
+ creation_time = *pcreation_time;
+ info_buf.CreationTime = cpu_to_le64(creation_time);
+ if (pTcon->ses->server->ops->set_file_info)
+ rc = pTcon->ses->server->ops->set_file_info(inode, full_path,
+ &info_buf, xid);
+ if (rc == 0)
+ CIFS_I(inode)->createtime = creation_time;
+
+ return rc;
+}
static int cifs_xattr_set(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
@@ -86,6 +137,23 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
switch (handler->flags) {
case XATTR_USER:
+ cifs_dbg(FYI, "%s:setting user xattr %s\n", __func__, name);
+ if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) ||
+ (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) {
+ rc = cifs_attrib_set(xid, pTcon, inode, full_path,
+ value, size);
+ if (rc == 0) /* force revalidate of the inode */
+ CIFS_I(inode)->time = 0;
+ break;
+ } else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) ||
+ (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) {
+ rc = cifs_creation_time_set(xid, pTcon, inode,
+ full_path, value, size);
+ if (rc == 0) /* force revalidate of the inode */
+ CIFS_I(inode)->time = 0;
+ break;
+ }
+
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto out;
@@ -95,7 +163,8 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
cifs_sb->local_nls, cifs_sb);
break;
- case XATTR_CIFS_ACL: {
+ case XATTR_CIFS_ACL:
+ case XATTR_CIFS_NTSD: {
struct cifs_ntsd *pacl;
if (!value)
@@ -106,12 +175,25 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
} else {
memcpy(pacl, value, size);
if (value &&
- pTcon->ses->server->ops->set_acl)
- rc = pTcon->ses->server->ops->set_acl(pacl,
- size, inode,
- full_path, CIFS_ACL_DACL);
- else
+ pTcon->ses->server->ops->set_acl) {
+ rc = 0;
+ if (handler->flags == XATTR_CIFS_NTSD) {
+ /* set owner and DACL */
+ rc = pTcon->ses->server->ops->set_acl(
+ pacl, size, inode,
+ full_path,
+ CIFS_ACL_OWNER);
+ }
+ if (rc == 0) {
+ /* set DACL */
+ rc = pTcon->ses->server->ops->set_acl(
+ pacl, size, inode,
+ full_path,
+ CIFS_ACL_DACL);
+ }
+ } else {
rc = -EOPNOTSUPP;
+ }
if (rc == 0) /* force revalidate of the inode */
CIFS_I(inode)->time = 0;
kfree(pacl);
@@ -179,7 +261,7 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
void *value, size_t size)
{
ssize_t rc;
- __u64 * pcreatetime;
+ __u64 *pcreatetime;
rc = cifs_revalidate_dentry_attr(dentry);
if (rc)
@@ -244,7 +326,9 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
full_path, name, value, size, cifs_sb);
break;
- case XATTR_CIFS_ACL: {
+ case XATTR_CIFS_ACL:
+ case XATTR_CIFS_NTSD: {
+ /* the whole ntsd is fetched regardless */
u32 acllen;
struct cifs_ntsd *pacl;
@@ -382,6 +466,26 @@ static const struct xattr_handler smb3_acl_xattr_handler = {
.set = cifs_xattr_set,
};
+static const struct xattr_handler cifs_cifs_ntsd_xattr_handler = {
+ .name = CIFS_XATTR_CIFS_NTSD,
+ .flags = XATTR_CIFS_NTSD,
+ .get = cifs_xattr_get,
+ .set = cifs_xattr_set,
+};
+
+/*
+ * Although this is just an alias for the above, need to move away from
+ * confusing users and using the 20 year old term 'cifs' when it is no
+ * longer secure and was replaced by SMB2/SMB3 a long time ago, and
+ * SMB3 and later are highly secure.
+ */
+static const struct xattr_handler smb3_ntsd_xattr_handler = {
+ .name = SMB3_XATTR_CIFS_NTSD,
+ .flags = XATTR_CIFS_NTSD,
+ .get = cifs_xattr_get,
+ .set = cifs_xattr_set,
+};
+
static const struct xattr_handler cifs_posix_acl_access_xattr_handler = {
.name = XATTR_NAME_POSIX_ACL_ACCESS,
.flags = XATTR_ACL_ACCESS,
@@ -401,6 +505,8 @@ const struct xattr_handler *cifs_xattr_handlers[] = {
&cifs_os2_xattr_handler,
&cifs_cifs_acl_xattr_handler,
&smb3_acl_xattr_handler, /* alias for above since avoiding "cifs" */
+ &cifs_cifs_ntsd_xattr_handler,
+ &smb3_ntsd_xattr_handler, /* alias for above since avoiding "cifs" */
&cifs_posix_acl_access_xattr_handler,
&cifs_posix_acl_default_xattr_handler,
NULL
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
deleted file mode 100644
index 358ea2ecf36b..000000000000
--- a/fs/compat_ioctl.c
+++ /dev/null
@@ -1,261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
- * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
- *
- * These routines maintain argument size conversion between 32bit and 64bit
- * ioctls.
- */
-
-#include <linux/types.h>
-#include <linux/compat.h>
-#include <linux/kernel.h>
-#include <linux/capability.h>
-#include <linux/compiler.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/ioctl.h>
-#include <linux/if.h>
-#include <linux/raid/md_u.h>
-#include <linux/falloc.h>
-#include <linux/file.h>
-#include <linux/ppp-ioctl.h>
-#include <linux/if_pppox.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h>
-#include <linux/blkdev.h>
-#include <linux/serial.h>
-#include <linux/ctype.h>
-#include <linux/syscalls.h>
-#include <linux/gfp.h>
-#include <linux/cec.h>
-
-#include "internal.h"
-
-#ifdef CONFIG_BLOCK
-#include <linux/cdrom.h>
-#include <linux/fd.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_ioctl.h>
-#include <scsi/sg.h>
-#endif
-
-#include <linux/uaccess.h>
-#include <linux/watchdog.h>
-
-#include <linux/hiddev.h>
-
-
-#include <linux/sort.h>
-
-/*
- * simple reversible transform to make our table more evenly
- * distributed after sorting.
- */
-#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
-
-#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
-static unsigned int ioctl_pointer[] = {
-#ifdef CONFIG_BLOCK
-/* Big S */
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
-COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK)
-COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK)
-COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY)
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER)
-COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
-COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
-#endif
-#ifdef CONFIG_BLOCK
-/* SG stuff */
-COMPATIBLE_IOCTL(SG_IO)
-COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
-COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
-COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
-COMPATIBLE_IOCTL(SG_EMULATED_HOST)
-COMPATIBLE_IOCTL(SG_GET_TRANSFORM)
-COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE)
-COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE)
-COMPATIBLE_IOCTL(SG_GET_SCSI_ID)
-COMPATIBLE_IOCTL(SG_SET_FORCE_LOW_DMA)
-COMPATIBLE_IOCTL(SG_GET_LOW_DMA)
-COMPATIBLE_IOCTL(SG_SET_FORCE_PACK_ID)
-COMPATIBLE_IOCTL(SG_GET_PACK_ID)
-COMPATIBLE_IOCTL(SG_GET_NUM_WAITING)
-COMPATIBLE_IOCTL(SG_SET_DEBUG)
-COMPATIBLE_IOCTL(SG_GET_SG_TABLESIZE)
-COMPATIBLE_IOCTL(SG_GET_COMMAND_Q)
-COMPATIBLE_IOCTL(SG_SET_COMMAND_Q)
-COMPATIBLE_IOCTL(SG_GET_VERSION_NUM)
-COMPATIBLE_IOCTL(SG_NEXT_CMD_LEN)
-COMPATIBLE_IOCTL(SG_SCSI_RESET)
-COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
-COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN)
-COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN)
-#endif
-};
-
-/*
- * Convert common ioctl arguments based on their command number
- *
- * Please do not add any code in here. Instead, implement
- * a compat_ioctl operation in the place that handleѕ the
- * ioctl for the native case.
- */
-static long do_ioctl_trans(unsigned int cmd,
- unsigned long arg, struct file *file)
-{
- return -ENOIOCTLCMD;
-}
-
-static int compat_ioctl_check_table(unsigned int xcmd)
-{
-#ifdef CONFIG_BLOCK
- int i;
- const int max = ARRAY_SIZE(ioctl_pointer) - 1;
-
- BUILD_BUG_ON(max >= (1 << 16));
-
- /* guess initial offset into table, assuming a
- normalized distribution */
- i = ((xcmd >> 16) * max) >> 16;
-
- /* do linear search up first, until greater or equal */
- while (ioctl_pointer[i] < xcmd && i < max)
- i++;
-
- /* then do linear search down */
- while (ioctl_pointer[i] > xcmd && i > 0)
- i--;
-
- return ioctl_pointer[i] == xcmd;
-#else
- return 0;
-#endif
-}
-
-COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
- compat_ulong_t, arg32)
-{
- unsigned long arg = arg32;
- struct fd f = fdget(fd);
- int error = -EBADF;
- if (!f.file)
- goto out;
-
- /* RED-PEN how should LSM module know it's handling 32bit? */
- error = security_file_ioctl(f.file, cmd, arg);
- if (error)
- goto out_fput;
-
- switch (cmd) {
- /* these are never seen by ->ioctl(), no argument or int argument */
- case FIOCLEX:
- case FIONCLEX:
- case FIFREEZE:
- case FITHAW:
- case FICLONE:
- goto do_ioctl;
- /* these are never seen by ->ioctl(), pointer argument */
- case FIONBIO:
- case FIOASYNC:
- case FIOQSIZE:
- case FS_IOC_FIEMAP:
- case FIGETBSZ:
- case FICLONERANGE:
- case FIDEDUPERANGE:
- goto found_handler;
- /*
- * The next group is the stuff handled inside file_ioctl().
- * For regular files these never reach ->ioctl(); for
- * devices, sockets, etc. they do and one (FIONREAD) is
- * even accepted in some cases. In all those cases
- * argument has the same type, so we can handle these
- * here, shunting them towards do_vfs_ioctl().
- * ->compat_ioctl() will never see any of those.
- */
- /* pointer argument, never actually handled by ->ioctl() */
- case FIBMAP:
- goto found_handler;
- /* handled by some ->ioctl(); always a pointer to int */
- case FIONREAD:
- goto found_handler;
- /* these get messy on amd64 due to alignment differences */
-#if defined(CONFIG_X86_64)
- case FS_IOC_RESVSP_32:
- case FS_IOC_RESVSP64_32:
- error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
- goto out_fput;
- case FS_IOC_UNRESVSP_32:
- case FS_IOC_UNRESVSP64_32:
- error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
- compat_ptr(arg));
- goto out_fput;
- case FS_IOC_ZERO_RANGE_32:
- error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
- compat_ptr(arg));
- goto out_fput;
-#else
- case FS_IOC_RESVSP:
- case FS_IOC_RESVSP64:
- case FS_IOC_UNRESVSP:
- case FS_IOC_UNRESVSP64:
- case FS_IOC_ZERO_RANGE:
- goto found_handler;
-#endif
-
- default:
- if (f.file->f_op->compat_ioctl) {
- error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
- if (error != -ENOIOCTLCMD)
- goto out_fput;
- }
-
- if (!f.file->f_op->unlocked_ioctl)
- goto do_ioctl;
- break;
- }
-
- if (compat_ioctl_check_table(XFORM(cmd)))
- goto found_handler;
-
- error = do_ioctl_trans(cmd, arg, f.file);
- if (error == -ENOIOCTLCMD)
- error = -ENOTTY;
-
- goto out_fput;
-
- found_handler:
- arg = (unsigned long)compat_ptr(arg);
- do_ioctl:
- error = do_vfs_ioctl(f.file, fd, cmd, arg);
- out_fput:
- fdput(f);
- out:
- return error;
-}
-
-static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
-{
- unsigned int a, b;
- a = *(unsigned int *)p;
- b = *(unsigned int *)q;
- if (a > b)
- return 1;
- if (a < b)
- return -1;
- return 0;
-}
-
-static int __init init_sys32_ioctl(void)
-{
- sort(ioctl_pointer, ARRAY_SIZE(ioctl_pointer), sizeof(*ioctl_pointer),
- init_sys32_ioctl_cmp, NULL);
- return 0;
-}
-__initcall(init_sys32_ioctl);
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 680aba9c00d5..fd0b5dd68f9e 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -76,14 +76,11 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
if (ia_valid & ATTR_GID)
sd_iattr->ia_gid = iattr->ia_gid;
if (ia_valid & ATTR_ATIME)
- sd_iattr->ia_atime = timestamp_truncate(iattr->ia_atime,
- inode);
+ sd_iattr->ia_atime = iattr->ia_atime;
if (ia_valid & ATTR_MTIME)
- sd_iattr->ia_mtime = timestamp_truncate(iattr->ia_mtime,
- inode);
+ sd_iattr->ia_mtime = iattr->ia_mtime;
if (ia_valid & ATTR_CTIME)
- sd_iattr->ia_ctime = timestamp_truncate(iattr->ia_ctime,
- inode);
+ sd_iattr->ia_ctime = iattr->ia_ctime;
if (ia_valid & ATTR_MODE) {
umode_t mode = iattr->ia_mode;
diff --git a/fs/coredump.c b/fs/coredump.c
index b1ea7dfbd149..f8296a82d01d 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -517,7 +517,7 @@ static void wait_for_dump_helpers(struct file *file)
pipe_lock(pipe);
pipe->readers++;
pipe->writers--;
- wake_up_interruptible_sync(&pipe->wait);
+ wake_up_interruptible_sync(&pipe->rd_wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
pipe_unlock(pipe);
@@ -525,7 +525,7 @@ static void wait_for_dump_helpers(struct file *file)
* We actually want wait_event_freezable() but then we need
* to clear TIF_SIGPENDING and improve dump_interrupted().
*/
- wait_event_interruptible(pipe->wait, pipe->readers == 1);
+ wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
pipe_lock(pipe);
pipe->readers--;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 2f04024c3588..912308600d39 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -534,7 +534,7 @@ static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
/* check for wrong endianness */
if (super->magic == CRAMFS_MAGIC_WEND) {
if (!silent)
- errorf(fc, "cramfs: wrong endianness");
+ errorfc(fc, "wrong endianness");
return -EINVAL;
}
@@ -546,22 +546,22 @@ static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
mutex_unlock(&read_mutex);
if (super->magic != CRAMFS_MAGIC) {
if (super->magic == CRAMFS_MAGIC_WEND && !silent)
- errorf(fc, "cramfs: wrong endianness");
+ errorfc(fc, "wrong endianness");
else if (!silent)
- errorf(fc, "cramfs: wrong magic");
+ errorfc(fc, "wrong magic");
return -EINVAL;
}
}
/* get feature flags first */
if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
- errorf(fc, "cramfs: unsupported filesystem features");
+ errorfc(fc, "unsupported filesystem features");
return -EINVAL;
}
/* Check that the root inode is in a sane state */
if (!S_ISDIR(super->root.mode)) {
- errorf(fc, "cramfs: root is not a directory");
+ errorfc(fc, "root is not a directory");
return -EINVAL;
}
/* correct strange, hard-coded permissions of mkcramfs */
@@ -580,12 +580,12 @@ static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
sbi->magic = super->magic;
sbi->flags = super->flags;
if (root_offset == 0)
- infof(fc, "cramfs: empty filesystem");
+ infofc(fc, "empty filesystem");
else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
((root_offset != sizeof(struct cramfs_super)) &&
(root_offset != 512 + sizeof(struct cramfs_super))))
{
- errorf(fc, "cramfs: bad root offset %lu", root_offset);
+ errorfc(fc, "bad root offset %lu", root_offset);
return -EINVAL;
}
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index ff5a1746cbae..8046d7c7a3e9 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -2,13 +2,8 @@
config FS_ENCRYPTION
bool "FS Encryption (Per-file encryption)"
select CRYPTO
- select CRYPTO_AES
- select CRYPTO_CBC
- select CRYPTO_ECB
- select CRYPTO_XTS
- select CRYPTO_CTS
- select CRYPTO_SHA512
- select CRYPTO_HMAC
+ select CRYPTO_HASH
+ select CRYPTO_SKCIPHER
select KEYS
help
Enable encryption of files and directories. This
@@ -16,3 +11,16 @@ config FS_ENCRYPTION
efficient since it avoids caching the encrypted and
decrypted pages in the page cache. Currently Ext4,
F2FS and UBIFS make use of this feature.
+
+# Filesystems supporting encryption must select this if FS_ENCRYPTION. This
+# allows the algorithms to be built as modules when all the filesystems are.
+config FS_ENCRYPTION_ALGS
+ tristate
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_CTS
+ select CRYPTO_ECB
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_XTS
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 1f4b8a277060..4fa18fff9c4e 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -41,53 +41,101 @@ void fscrypt_decrypt_bio(struct bio *bio)
}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
+/**
+ * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
+ * @inode: the file's inode
+ * @lblk: the first file logical block to zero out
+ * @pblk: the first filesystem physical block to zero out
+ * @len: number of blocks to zero out
+ *
+ * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
+ * ciphertext blocks which decrypt to the all-zeroes block. The blocks must be
+ * both logically and physically contiguous. It's also assumed that the
+ * filesystem only uses a single block device, ->s_bdev.
+ *
+ * Note that since each block uses a different IV, this involves writing a
+ * different ciphertext to each block; we can't simply reuse the same one.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
- sector_t pblk, unsigned int len)
+ sector_t pblk, unsigned int len)
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
- struct page *ciphertext_page;
+ const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
+ const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
+ struct page *pages[16]; /* write up to 16 pages at a time */
+ unsigned int nr_pages;
+ unsigned int i;
+ unsigned int offset;
struct bio *bio;
- int ret, err = 0;
+ int ret, err;
- ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
- if (!ciphertext_page)
- return -ENOMEM;
+ if (len == 0)
+ return 0;
- while (len--) {
- err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
- ZERO_PAGE(0), ciphertext_page,
- blocksize, 0, GFP_NOFS);
- if (err)
- goto errout;
+ BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES);
+ nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
+ (len + blocks_per_page - 1) >> blocks_per_page_bits);
- bio = bio_alloc(GFP_NOWAIT, 1);
- if (!bio) {
- err = -ENOMEM;
- goto errout;
- }
+ /*
+ * We need at least one page for ciphertext. Allocate the first one
+ * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
+ *
+ * Any additional page allocations are allowed to fail, as they only
+ * help performance, and waiting on the mempool for them could deadlock.
+ */
+ for (i = 0; i < nr_pages; i++) {
+ pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!pages[i])
+ break;
+ }
+ nr_pages = i;
+ if (WARN_ON(nr_pages <= 0))
+ return -EINVAL;
+
+ /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
+ bio = bio_alloc(GFP_NOFS, nr_pages);
+
+ do {
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
- if (WARN_ON(ret != blocksize)) {
- /* should never happen! */
- bio_put(bio);
- err = -EIO;
- goto errout;
- }
+
+ i = 0;
+ offset = 0;
+ do {
+ err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
+ ZERO_PAGE(0), pages[i],
+ blocksize, offset, GFP_NOFS);
+ if (err)
+ goto out;
+ lblk++;
+ pblk++;
+ len--;
+ offset += blocksize;
+ if (offset == PAGE_SIZE || len == 0) {
+ ret = bio_add_page(bio, pages[i++], offset, 0);
+ if (WARN_ON(ret != offset)) {
+ err = -EIO;
+ goto out;
+ }
+ offset = 0;
+ }
+ } while (i != nr_pages && len != 0);
+
err = submit_bio_wait(bio);
- if (err == 0 && bio->bi_status)
- err = -EIO;
- bio_put(bio);
if (err)
- goto errout;
- lblk++;
- pblk++;
- }
+ goto out;
+ bio_reset(bio);
+ } while (len != 0);
err = 0;
-errout:
- fscrypt_free_bounce_page(ciphertext_page);
+out:
+ bio_put(bio);
+ for (i = 0; i < nr_pages; i++)
+ fscrypt_free_bounce_page(pages[i]);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 3719efa546c6..1ecaac7ee3cb 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -25,8 +25,6 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
-#include <linux/dcache.h>
-#include <linux/namei.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
@@ -140,7 +138,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
* multiple of the filesystem's block size.
* @offs: Byte offset within @page of the first block to encrypt. Must be
* a multiple of the filesystem's block size.
- * @gfp_flags: Memory allocation flags
+ * @gfp_flags: Memory allocation flags. See details below.
*
* A new bounce page is allocated, and the specified block(s) are encrypted into
* it. In the bounce page, the ciphertext block(s) will be located at the same
@@ -150,6 +148,11 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
*
* This is for use by the filesystem's ->writepages() method.
*
+ * The bounce page allocation is mempool-backed, so it will always succeed when
+ * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However,
+ * only the first page of each bio can be allocated this way. To prevent
+ * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used.
+ *
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
*/
struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
@@ -286,54 +289,6 @@ int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
-/*
- * Validate dentries in encrypted directories to make sure we aren't potentially
- * caching stale dentries after a key has been added.
- */
-static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
-{
- struct dentry *dir;
- int err;
- int valid;
-
- /*
- * Plaintext names are always valid, since fscrypt doesn't support
- * reverting to ciphertext names without evicting the directory's inode
- * -- which implies eviction of the dentries in the directory.
- */
- if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
- return 1;
-
- /*
- * Ciphertext name; valid if the directory's key is still unavailable.
- *
- * Although fscrypt forbids rename() on ciphertext names, we still must
- * use dget_parent() here rather than use ->d_parent directly. That's
- * because a corrupted fs image may contain directory hard links, which
- * the VFS handles by moving the directory's dentry tree in the dcache
- * each time ->lookup() finds the directory and it already has a dentry
- * elsewhere. Thus ->d_parent can be changing, and we must safely grab
- * a reference to some ->d_parent to prevent it from being freed.
- */
-
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- dir = dget_parent(dentry);
- err = fscrypt_get_encryption_info(d_inode(dir));
- valid = !fscrypt_has_encryption_key(d_inode(dir));
- dput(dir);
-
- if (err < 0)
- return err;
-
- return valid;
-}
-
-const struct dentry_operations fscrypt_d_ops = {
- .d_revalidate = fscrypt_d_revalidate,
-};
-
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
* @cop_flags: fscrypt operations flags
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 3da3707c10e3..4c212442a8f7 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -11,10 +11,87 @@
* This has not yet undergone a rigorous security audit.
*/
+#include <linux/namei.h>
#include <linux/scatterlist.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
+/**
+ * struct fscrypt_nokey_name - identifier for directory entry when key is absent
+ *
+ * When userspace lists an encrypted directory without access to the key, the
+ * filesystem must present a unique "no-key name" for each filename that allows
+ * it to find the directory entry again if requested. Naively, that would just
+ * mean using the ciphertext filenames. However, since the ciphertext filenames
+ * can contain illegal characters ('\0' and '/'), they must be encoded in some
+ * way. We use base64. But that can cause names to exceed NAME_MAX (255
+ * bytes), so we also need to use a strong hash to abbreviate long names.
+ *
+ * The filesystem may also need another kind of hash, the "dirhash", to quickly
+ * find the directory entry. Since filesystems normally compute the dirhash
+ * over the on-disk filename (i.e. the ciphertext), it's not computable from
+ * no-key names that abbreviate the ciphertext using the strong hash to fit in
+ * NAME_MAX. It's also not computable if it's a keyed hash taken over the
+ * plaintext (but it may still be available in the on-disk directory entry);
+ * casefolded directories use this type of dirhash. At least in these cases,
+ * each no-key name must include the name's dirhash too.
+ *
+ * To meet all these requirements, we base64-encode the following
+ * variable-length structure. It contains the dirhash, or 0's if the filesystem
+ * didn't provide one; up to 149 bytes of the ciphertext name; and for
+ * ciphertexts longer than 149 bytes, also the SHA-256 of the remaining bytes.
+ *
+ * This ensures that each no-key name contains everything needed to find the
+ * directory entry again, contains only legal characters, doesn't exceed
+ * NAME_MAX, is unambiguous unless there's a SHA-256 collision, and that we only
+ * take the performance hit of SHA-256 on very long filenames (which are rare).
+ */
+struct fscrypt_nokey_name {
+ u32 dirhash[2];
+ u8 bytes[149];
+ u8 sha256[SHA256_DIGEST_SIZE];
+}; /* 189 bytes => 252 bytes base64-encoded, which is <= NAME_MAX (255) */
+
+/*
+ * Decoded size of max-size nokey name, i.e. a name that was abbreviated using
+ * the strong hash and thus includes the 'sha256' field. This isn't simply
+ * sizeof(struct fscrypt_nokey_name), as the padding at the end isn't included.
+ */
+#define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256)
+
+static struct crypto_shash *sha256_hash_tfm;
+
+static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result)
+{
+ struct crypto_shash *tfm = READ_ONCE(sha256_hash_tfm);
+
+ if (unlikely(!tfm)) {
+ struct crypto_shash *prev_tfm;
+
+ tfm = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(tfm)) {
+ fscrypt_err(NULL,
+ "Error allocating SHA-256 transform: %ld",
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ prev_tfm = cmpxchg(&sha256_hash_tfm, NULL, tfm);
+ if (prev_tfm) {
+ crypto_free_shash(tfm);
+ tfm = prev_tfm;
+ }
+ }
+ {
+ SHASH_DESC_ON_STACK(desc, tfm);
+
+ desc->tfm = tfm;
+
+ return crypto_shash_digest(desc, data, data_len, result);
+ }
+}
+
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
if (str->len == 1 && str->name[0] == '.')
@@ -27,19 +104,19 @@ static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
}
/**
- * fname_encrypt() - encrypt a filename
+ * fscrypt_fname_encrypt() - encrypt a filename
*
* The output buffer must be at least as large as the input buffer.
* Any extra space is filled with NUL padding before encryption.
*
* Return: 0 on success, -errno on failure
*/
-int fname_encrypt(struct inode *inode, const struct qstr *iname,
- u8 *out, unsigned int olen)
+int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
- struct fscrypt_info *ci = inode->i_crypt_info;
+ const struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
union fscrypt_iv iv;
struct scatterlist sg;
@@ -85,14 +162,14 @@ int fname_encrypt(struct inode *inode, const struct qstr *iname,
*
* Return: 0 on success, -errno on failure
*/
-static int fname_decrypt(struct inode *inode,
- const struct fscrypt_str *iname,
- struct fscrypt_str *oname)
+static int fname_decrypt(const struct inode *inode,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
- struct fscrypt_info *ci = inode->i_crypt_info;
+ const struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
union fscrypt_iv iv;
int res;
@@ -206,9 +283,7 @@ int fscrypt_fname_alloc_buffer(const struct inode *inode,
u32 max_encrypted_len,
struct fscrypt_str *crypto_str)
{
- const u32 max_encoded_len =
- max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE),
- 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)));
+ const u32 max_encoded_len = BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX);
u32 max_presented_len;
max_presented_len = max(max_encoded_len, max_encrypted_len);
@@ -241,19 +316,21 @@ EXPORT_SYMBOL(fscrypt_fname_free_buffer);
*
* The caller must have allocated sufficient memory for the @oname string.
*
- * If the key is available, we'll decrypt the disk name; otherwise, we'll encode
- * it for presentation. Short names are directly base64-encoded, while long
- * names are encoded in fscrypt_digested_name format.
+ * If the key is available, we'll decrypt the disk name. Otherwise, we'll
+ * encode it for presentation in fscrypt_nokey_name format.
+ * See struct fscrypt_nokey_name for details.
*
* Return: 0 on success, -errno on failure
*/
-int fscrypt_fname_disk_to_usr(struct inode *inode,
- u32 hash, u32 minor_hash,
- const struct fscrypt_str *iname,
- struct fscrypt_str *oname)
+int fscrypt_fname_disk_to_usr(const struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
{
const struct qstr qname = FSTR_TO_QSTR(iname);
- struct fscrypt_digested_name digested_name;
+ struct fscrypt_nokey_name nokey_name;
+ u32 size; /* size of the unencoded no-key name */
+ int err;
if (fscrypt_is_dot_dotdot(&qname)) {
oname->name[0] = '.';
@@ -268,24 +345,37 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
if (fscrypt_has_encryption_key(inode))
return fname_decrypt(inode, iname, oname);
- if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) {
- oname->len = base64_encode(iname->name, iname->len,
- oname->name);
- return 0;
- }
+ /*
+ * Sanity check that struct fscrypt_nokey_name doesn't have padding
+ * between fields and that its encoded size never exceeds NAME_MAX.
+ */
+ BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, dirhash) !=
+ offsetof(struct fscrypt_nokey_name, bytes));
+ BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, bytes) !=
+ offsetof(struct fscrypt_nokey_name, sha256));
+ BUILD_BUG_ON(BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX) > NAME_MAX);
+
if (hash) {
- digested_name.hash = hash;
- digested_name.minor_hash = minor_hash;
+ nokey_name.dirhash[0] = hash;
+ nokey_name.dirhash[1] = minor_hash;
+ } else {
+ nokey_name.dirhash[0] = 0;
+ nokey_name.dirhash[1] = 0;
+ }
+ if (iname->len <= sizeof(nokey_name.bytes)) {
+ memcpy(nokey_name.bytes, iname->name, iname->len);
+ size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]);
} else {
- digested_name.hash = 0;
- digested_name.minor_hash = 0;
+ memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes));
+ /* Compute strong hash of remaining part of name. */
+ err = fscrypt_do_sha256(&iname->name[sizeof(nokey_name.bytes)],
+ iname->len - sizeof(nokey_name.bytes),
+ nokey_name.sha256);
+ if (err)
+ return err;
+ size = FSCRYPT_NOKEY_NAME_MAX;
}
- memcpy(digested_name.digest,
- FSCRYPT_FNAME_DIGEST(iname->name, iname->len),
- FSCRYPT_FNAME_DIGEST_SIZE);
- oname->name[0] = '_';
- oname->len = 1 + base64_encode((const u8 *)&digested_name,
- sizeof(digested_name), oname->name + 1);
+ oname->len = base64_encode((const u8 *)&nokey_name, size, oname->name);
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
@@ -306,8 +396,7 @@ EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
* get the disk_name.
*
* Else, for keyless @lookup operations, @iname is the presented ciphertext, so
- * we decode it to get either the ciphertext disk_name (for short names) or the
- * fscrypt_digested_name (for long names). Non-@lookup operations will be
+ * we decode it to get the fscrypt_nokey_name. Non-@lookup operations will be
* impossible in this case, so we fail them with ENOKEY.
*
* If successful, fscrypt_free_filename() must be called later to clean up.
@@ -317,8 +406,8 @@ EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
int lookup, struct fscrypt_name *fname)
{
+ struct fscrypt_nokey_name *nokey_name;
int ret;
- int digested;
memset(fname, 0, sizeof(struct fscrypt_name));
fname->usr_fname = iname;
@@ -342,8 +431,8 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
if (!fname->crypto_buf.name)
return -ENOMEM;
- ret = fname_encrypt(dir, iname, fname->crypto_buf.name,
- fname->crypto_buf.len);
+ ret = fscrypt_fname_encrypt(dir, iname, fname->crypto_buf.name,
+ fname->crypto_buf.len);
if (ret)
goto errout;
fname->disk_name.name = fname->crypto_buf.name;
@@ -358,40 +447,31 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
* We don't have the key and we are doing a lookup; decode the
* user-supplied name
*/
- if (iname->name[0] == '_') {
- if (iname->len !=
- 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)))
- return -ENOENT;
- digested = 1;
- } else {
- if (iname->len >
- BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE))
- return -ENOENT;
- digested = 0;
- }
- fname->crypto_buf.name =
- kmalloc(max_t(size_t, FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE,
- sizeof(struct fscrypt_digested_name)),
- GFP_KERNEL);
+ if (iname->len > BASE64_CHARS(FSCRYPT_NOKEY_NAME_MAX))
+ return -ENOENT;
+
+ fname->crypto_buf.name = kmalloc(FSCRYPT_NOKEY_NAME_MAX, GFP_KERNEL);
if (fname->crypto_buf.name == NULL)
return -ENOMEM;
- ret = base64_decode(iname->name + digested, iname->len - digested,
- fname->crypto_buf.name);
- if (ret < 0) {
+ ret = base64_decode(iname->name, iname->len, fname->crypto_buf.name);
+ if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) ||
+ (ret > offsetof(struct fscrypt_nokey_name, sha256) &&
+ ret != FSCRYPT_NOKEY_NAME_MAX)) {
ret = -ENOENT;
goto errout;
}
fname->crypto_buf.len = ret;
- if (digested) {
- const struct fscrypt_digested_name *n =
- (const void *)fname->crypto_buf.name;
- fname->hash = n->hash;
- fname->minor_hash = n->minor_hash;
- } else {
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
+
+ nokey_name = (void *)fname->crypto_buf.name;
+ fname->hash = nokey_name->dirhash[0];
+ fname->minor_hash = nokey_name->dirhash[1];
+ if (ret != FSCRYPT_NOKEY_NAME_MAX) {
+ /* The full ciphertext filename is available. */
+ fname->disk_name.name = nokey_name->bytes;
+ fname->disk_name.len =
+ ret - offsetof(struct fscrypt_nokey_name, bytes);
}
return 0;
@@ -400,3 +480,109 @@ errout:
return ret;
}
EXPORT_SYMBOL(fscrypt_setup_filename);
+
+/**
+ * fscrypt_match_name() - test whether the given name matches a directory entry
+ * @fname: the name being searched for
+ * @de_name: the name from the directory entry
+ * @de_name_len: the length of @de_name in bytes
+ *
+ * Normally @fname->disk_name will be set, and in that case we simply compare
+ * that to the name stored in the directory entry. The only exception is that
+ * if we don't have the key for an encrypted directory and the name we're
+ * looking for is very long, then we won't have the full disk_name and instead
+ * we'll need to match against a fscrypt_nokey_name that includes a strong hash.
+ *
+ * Return: %true if the name matches, otherwise %false.
+ */
+bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ const struct fscrypt_nokey_name *nokey_name =
+ (const void *)fname->crypto_buf.name;
+ u8 sha256[SHA256_DIGEST_SIZE];
+
+ if (likely(fname->disk_name.name)) {
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, de_name_len);
+ }
+ if (de_name_len <= sizeof(nokey_name->bytes))
+ return false;
+ if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes)))
+ return false;
+ if (fscrypt_do_sha256(&de_name[sizeof(nokey_name->bytes)],
+ de_name_len - sizeof(nokey_name->bytes), sha256))
+ return false;
+ return !memcmp(sha256, nokey_name->sha256, sizeof(sha256));
+}
+EXPORT_SYMBOL_GPL(fscrypt_match_name);
+
+/**
+ * fscrypt_fname_siphash() - calculate the SipHash of a filename
+ * @dir: the parent directory
+ * @name: the filename to calculate the SipHash of
+ *
+ * Given a plaintext filename @name and a directory @dir which uses SipHash as
+ * its dirhash method and has had its fscrypt key set up, this function
+ * calculates the SipHash of that name using the directory's secret dirhash key.
+ *
+ * Return: the SipHash of @name using the hash key of @dir
+ */
+u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name)
+{
+ const struct fscrypt_info *ci = dir->i_crypt_info;
+
+ WARN_ON(!ci->ci_dirhash_key_initialized);
+
+ return siphash(name->name, name->len, &ci->ci_dirhash_key);
+}
+EXPORT_SYMBOL_GPL(fscrypt_fname_siphash);
+
+/*
+ * Validate dentries in encrypted directories to make sure we aren't potentially
+ * caching stale dentries after a key has been added.
+ */
+static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ struct dentry *dir;
+ int err;
+ int valid;
+
+ /*
+ * Plaintext names are always valid, since fscrypt doesn't support
+ * reverting to ciphertext names without evicting the directory's inode
+ * -- which implies eviction of the dentries in the directory.
+ */
+ if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
+ return 1;
+
+ /*
+ * Ciphertext name; valid if the directory's key is still unavailable.
+ *
+ * Although fscrypt forbids rename() on ciphertext names, we still must
+ * use dget_parent() here rather than use ->d_parent directly. That's
+ * because a corrupted fs image may contain directory hard links, which
+ * the VFS handles by moving the directory's dentry tree in the dcache
+ * each time ->lookup() finds the directory and it already has a dentry
+ * elsewhere. Thus ->d_parent can be changing, and we must safely grab
+ * a reference to some ->d_parent to prevent it from being freed.
+ */
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ dir = dget_parent(dentry);
+ err = fscrypt_get_encryption_info(d_inode(dir));
+ valid = !fscrypt_has_encryption_key(d_inode(dir));
+ dput(dir);
+
+ if (err < 0)
+ return err;
+
+ return valid;
+}
+
+const struct dentry_operations fscrypt_d_ops = {
+ .d_revalidate = fscrypt_d_revalidate,
+};
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 130b50e5a011..9aae851409e5 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -12,6 +12,7 @@
#define _FSCRYPT_PRIVATE_H
#include <linux/fscrypt.h>
+#include <linux/siphash.h>
#include <crypto/hash.h>
#define CONST_STRLEN(str) (sizeof(str) - 1)
@@ -136,12 +137,6 @@ fscrypt_policy_flags(const union fscrypt_policy *policy)
BUG();
}
-static inline bool
-fscrypt_is_direct_key_policy(const union fscrypt_policy *policy)
-{
- return fscrypt_policy_flags(policy) & FSCRYPT_POLICY_FLAG_DIRECT_KEY;
-}
-
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
@@ -194,6 +189,14 @@ struct fscrypt_info {
*/
struct fscrypt_direct_key *ci_direct_key;
+ /*
+ * This inode's hash key for filenames. This is a 128-bit SipHash-2-4
+ * key. This is only set for directories that use a keyed dirhash over
+ * the plaintext filenames -- currently just casefolded directories.
+ */
+ siphash_key_t ci_dirhash_key;
+ bool ci_dirhash_key_initialized;
+
/* The encryption policy used by this inode */
union fscrypt_policy ci_policy;
@@ -206,24 +209,6 @@ typedef enum {
FS_ENCRYPT,
} fscrypt_direction_t;
-static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
- u32 filenames_mode)
-{
- if (contents_mode == FSCRYPT_MODE_AES_128_CBC &&
- filenames_mode == FSCRYPT_MODE_AES_128_CTS)
- return true;
-
- if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
- filenames_mode == FSCRYPT_MODE_AES_256_CTS)
- return true;
-
- if (contents_mode == FSCRYPT_MODE_ADIANTUM &&
- filenames_mode == FSCRYPT_MODE_ADIANTUM)
- return true;
-
- return false;
-}
-
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
extern int fscrypt_initialize(unsigned int cop_flags);
@@ -233,7 +218,6 @@ extern int fscrypt_crypt_block(const struct inode *inode,
unsigned int len, unsigned int offs,
gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
-extern const struct dentry_operations fscrypt_d_ops;
extern void __printf(3, 4) __cold
fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...);
@@ -260,11 +244,13 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci);
/* fname.c */
-extern int fname_encrypt(struct inode *inode, const struct qstr *iname,
- u8 *out, unsigned int olen);
+extern int fscrypt_fname_encrypt(const struct inode *inode,
+ const struct qstr *iname,
+ u8 *out, unsigned int olen);
extern bool fscrypt_fname_encrypted_size(const struct inode *inode,
u32 orig_len, u32 max_len,
u32 *encrypted_len_ret);
+extern const struct dentry_operations fscrypt_d_ops;
/* hkdf.c */
@@ -283,11 +269,12 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
* output doesn't reveal another.
*/
#define HKDF_CONTEXT_KEY_IDENTIFIER 1
-#define HKDF_CONTEXT_PER_FILE_KEY 2
+#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2
#define HKDF_CONTEXT_DIRECT_KEY 3
#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4
+#define HKDF_CONTEXT_DIRHASH_KEY 5
-extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
+extern int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
const u8 *info, unsigned int infolen,
u8 *okm, unsigned int okmlen);
@@ -448,18 +435,17 @@ struct fscrypt_mode {
int logged_impl_name;
};
-static inline bool
-fscrypt_mode_supports_direct_key(const struct fscrypt_mode *mode)
-{
- return mode->ivsize >= offsetofend(union fscrypt_iv, nonce);
-}
+extern struct fscrypt_mode fscrypt_modes[];
extern struct crypto_skcipher *
fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key,
const struct inode *inode);
-extern int fscrypt_set_derived_key(struct fscrypt_info *ci,
- const u8 *derived_key);
+extern int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci,
+ const u8 *raw_key);
+
+extern int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
+ const struct fscrypt_master_key *mk);
/* keysetup_v1.c */
diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c
index f21873e1b467..efb95bd19a89 100644
--- a/fs/crypto/hkdf.c
+++ b/fs/crypto/hkdf.c
@@ -112,7 +112,7 @@ out:
* adds to its application-specific info strings to guarantee that it doesn't
* accidentally repeat an info string when using HKDF for different purposes.)
*/
-int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
+int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context,
const u8 *info, unsigned int infolen,
u8 *okm, unsigned int okmlen)
{
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index bb3b7fcfdd48..5ef861742921 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -5,6 +5,8 @@
* Encryption hooks for higher-level filesystem operations.
*/
+#include <linux/key.h>
+
#include "fscrypt_private.h"
/**
@@ -122,6 +124,48 @@ int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
}
EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup);
+/**
+ * fscrypt_prepare_setflags() - prepare to change flags with FS_IOC_SETFLAGS
+ * @inode: the inode on which flags are being changed
+ * @oldflags: the old flags
+ * @flags: the new flags
+ *
+ * The caller should be holding i_rwsem for write.
+ *
+ * Return: 0 on success; -errno if the flags change isn't allowed or if
+ * another error occurs.
+ */
+int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags, unsigned int flags)
+{
+ struct fscrypt_info *ci;
+ struct fscrypt_master_key *mk;
+ int err;
+
+ /*
+ * When the CASEFOLD flag is set on an encrypted directory, we must
+ * derive the secret key needed for the dirhash. This is only possible
+ * if the directory uses a v2 encryption policy.
+ */
+ if (IS_ENCRYPTED(inode) && (flags & ~oldflags & FS_CASEFOLD_FL)) {
+ err = fscrypt_require_key(inode);
+ if (err)
+ return err;
+ ci = inode->i_crypt_info;
+ if (ci->ci_policy.version != FSCRYPT_POLICY_V2)
+ return -EINVAL;
+ mk = ci->ci_master_key->payload.data[0];
+ down_read(&mk->mk_secret_sem);
+ if (is_master_key_secret_present(&mk->mk_secret))
+ err = fscrypt_derive_dirhash_key(ci, mk);
+ else
+ err = -ENOKEY;
+ up_read(&mk->mk_secret_sem);
+ return err;
+ }
+ return 0;
+}
+
int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link)
@@ -188,7 +232,8 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
ciphertext_len = disk_link->len - sizeof(*sd);
sd->len = cpu_to_le16(ciphertext_len);
- err = fname_encrypt(inode, &iname, sd->encrypted_path, ciphertext_len);
+ err = fscrypt_fname_encrypt(inode, &iname, sd->encrypted_path,
+ ciphertext_len);
if (err)
goto err_free_sd;
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 40cca351273f..ab41b25d4fa1 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -465,6 +465,109 @@ out_unlock:
return err;
}
+static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep)
+{
+ const struct fscrypt_provisioning_key_payload *payload = prep->data;
+
+ if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE ||
+ prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE)
+ return -EINVAL;
+
+ if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
+ payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER)
+ return -EINVAL;
+
+ if (payload->__reserved)
+ return -EINVAL;
+
+ prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL);
+ if (!prep->payload.data[0])
+ return -ENOMEM;
+
+ prep->quotalen = prep->datalen;
+ return 0;
+}
+
+static void fscrypt_provisioning_key_free_preparse(
+ struct key_preparsed_payload *prep)
+{
+ kzfree(prep->payload.data[0]);
+}
+
+static void fscrypt_provisioning_key_describe(const struct key *key,
+ struct seq_file *m)
+{
+ seq_puts(m, key->description);
+ if (key_is_positive(key)) {
+ const struct fscrypt_provisioning_key_payload *payload =
+ key->payload.data[0];
+
+ seq_printf(m, ": %u [%u]", key->datalen, payload->type);
+ }
+}
+
+static void fscrypt_provisioning_key_destroy(struct key *key)
+{
+ kzfree(key->payload.data[0]);
+}
+
+static struct key_type key_type_fscrypt_provisioning = {
+ .name = "fscrypt-provisioning",
+ .preparse = fscrypt_provisioning_key_preparse,
+ .free_preparse = fscrypt_provisioning_key_free_preparse,
+ .instantiate = generic_key_instantiate,
+ .describe = fscrypt_provisioning_key_describe,
+ .destroy = fscrypt_provisioning_key_destroy,
+};
+
+/*
+ * Retrieve the raw key from the Linux keyring key specified by 'key_id', and
+ * store it into 'secret'.
+ *
+ * The key must be of type "fscrypt-provisioning" and must have the field
+ * fscrypt_provisioning_key_payload::type set to 'type', indicating that it's
+ * only usable with fscrypt with the particular KDF version identified by
+ * 'type'. We don't use the "logon" key type because there's no way to
+ * completely restrict the use of such keys; they can be used by any kernel API
+ * that accepts "logon" keys and doesn't require a specific service prefix.
+ *
+ * The ability to specify the key via Linux keyring key is intended for cases
+ * where userspace needs to re-add keys after the filesystem is unmounted and
+ * re-mounted. Most users should just provide the raw key directly instead.
+ */
+static int get_keyring_key(u32 key_id, u32 type,
+ struct fscrypt_master_key_secret *secret)
+{
+ key_ref_t ref;
+ struct key *key;
+ const struct fscrypt_provisioning_key_payload *payload;
+ int err;
+
+ ref = lookup_user_key(key_id, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(ref))
+ return PTR_ERR(ref);
+ key = key_ref_to_ptr(ref);
+
+ if (key->type != &key_type_fscrypt_provisioning)
+ goto bad_key;
+ payload = key->payload.data[0];
+
+ /* Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. */
+ if (payload->type != type)
+ goto bad_key;
+
+ secret->size = key->datalen - sizeof(*payload);
+ memcpy(secret->raw, payload->raw, secret->size);
+ err = 0;
+ goto out_put;
+
+bad_key:
+ err = -EKEYREJECTED;
+out_put:
+ key_ref_put(ref);
+ return err;
+}
+
/*
* Add a master encryption key to the filesystem, causing all files which were
* encrypted with it to appear "unlocked" (decrypted) when accessed.
@@ -503,18 +606,25 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
if (!valid_key_spec(&arg.key_spec))
return -EINVAL;
- if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
- arg.raw_size > FSCRYPT_MAX_KEY_SIZE)
- return -EINVAL;
-
if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved)))
return -EINVAL;
memset(&secret, 0, sizeof(secret));
- secret.size = arg.raw_size;
- err = -EFAULT;
- if (copy_from_user(secret.raw, uarg->raw, secret.size))
- goto out_wipe_secret;
+ if (arg.key_id) {
+ if (arg.raw_size != 0)
+ return -EINVAL;
+ err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret);
+ if (err)
+ goto out_wipe_secret;
+ } else {
+ if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
+ arg.raw_size > FSCRYPT_MAX_KEY_SIZE)
+ return -EINVAL;
+ secret.size = arg.raw_size;
+ err = -EFAULT;
+ if (copy_from_user(secret.raw, uarg->raw, secret.size))
+ goto out_wipe_secret;
+ }
switch (arg.key_spec.type) {
case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR:
@@ -666,9 +776,6 @@ static int check_for_busy_inodes(struct super_block *sb,
struct list_head *pos;
size_t busy_count = 0;
unsigned long ino;
- struct dentry *dentry;
- char _path[256];
- char *path = NULL;
spin_lock(&mk->mk_decrypted_inodes_lock);
@@ -687,22 +794,14 @@ static int check_for_busy_inodes(struct super_block *sb,
struct fscrypt_info,
ci_master_key_link)->ci_inode;
ino = inode->i_ino;
- dentry = d_find_alias(inode);
}
spin_unlock(&mk->mk_decrypted_inodes_lock);
- if (dentry) {
- path = dentry_path(dentry, _path, sizeof(_path));
- dput(dentry);
- }
- if (IS_ERR_OR_NULL(path))
- path = "(unknown)";
-
fscrypt_warn(NULL,
- "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu (%s)",
+ "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu",
sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec),
master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u,
- ino, path);
+ ino);
return -EBUSY;
}
@@ -978,8 +1077,14 @@ int __init fscrypt_init_keyring(void)
if (err)
goto err_unregister_fscrypt;
+ err = register_key_type(&key_type_fscrypt_provisioning);
+ if (err)
+ goto err_unregister_fscrypt_user;
+
return 0;
+err_unregister_fscrypt_user:
+ unregister_key_type(&key_type_fscrypt_user);
err_unregister_fscrypt:
unregister_key_type(&key_type_fscrypt);
return err;
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index f577bb6613f9..65cb09fa6ead 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -13,7 +13,7 @@
#include "fscrypt_private.h"
-static struct fscrypt_mode available_modes[] = {
+struct fscrypt_mode fscrypt_modes[] = {
[FSCRYPT_MODE_AES_256_XTS] = {
.friendly_name = "AES-256-XTS",
.cipher_str = "xts(aes)",
@@ -51,10 +51,10 @@ select_encryption_mode(const union fscrypt_policy *policy,
const struct inode *inode)
{
if (S_ISREG(inode->i_mode))
- return &available_modes[fscrypt_policy_contents_mode(policy)];
+ return &fscrypt_modes[fscrypt_policy_contents_mode(policy)];
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- return &available_modes[fscrypt_policy_fnames_mode(policy)];
+ return &fscrypt_modes[fscrypt_policy_fnames_mode(policy)];
WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
inode->i_ino, (inode->i_mode & S_IFMT));
@@ -89,8 +89,11 @@ struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
* first time a mode is used.
*/
pr_info("fscrypt: %s using implementation \"%s\"\n",
- mode->friendly_name,
- crypto_skcipher_alg(tfm)->base.cra_driver_name);
+ mode->friendly_name, crypto_skcipher_driver_name(tfm));
+ }
+ if (WARN_ON(crypto_skcipher_ivsize(tfm) != mode->ivsize)) {
+ err = -EINVAL;
+ goto err_free_tfm;
}
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize);
@@ -104,12 +107,12 @@ err_free_tfm:
return ERR_PTR(err);
}
-/* Given the per-file key, set up the file's crypto transform object */
-int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
+/* Given a per-file encryption key, set up the file's crypto transform object */
+int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key)
{
struct crypto_skcipher *tfm;
- tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode);
+ tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
@@ -118,15 +121,15 @@ int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
return 0;
}
-static int setup_per_mode_key(struct fscrypt_info *ci,
- struct fscrypt_master_key *mk,
- struct crypto_skcipher **tfms,
- u8 hkdf_context, bool include_fs_uuid)
+static int setup_per_mode_enc_key(struct fscrypt_info *ci,
+ struct fscrypt_master_key *mk,
+ struct crypto_skcipher **tfms,
+ u8 hkdf_context, bool include_fs_uuid)
{
const struct inode *inode = ci->ci_inode;
const struct super_block *sb = inode->i_sb;
struct fscrypt_mode *mode = ci->ci_mode;
- u8 mode_num = mode - available_modes;
+ const u8 mode_num = mode - fscrypt_modes;
struct crypto_skcipher *tfm, *prev_tfm;
u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
@@ -171,29 +174,37 @@ done:
return 0;
}
+int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
+ const struct fscrypt_master_key *mk)
+{
+ int err;
+
+ err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY,
+ ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE,
+ (u8 *)&ci->ci_dirhash_key,
+ sizeof(ci->ci_dirhash_key));
+ if (err)
+ return err;
+ ci->ci_dirhash_key_initialized = true;
+ return 0;
+}
+
static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
struct fscrypt_master_key *mk)
{
- u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
int err;
if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
/*
- * DIRECT_KEY: instead of deriving per-file keys, the per-file
- * nonce will be included in all the IVs. But unlike v1
- * policies, for v2 policies in this case we don't encrypt with
- * the master key directly but rather derive a per-mode key.
- * This ensures that the master key is consistently used only
- * for HKDF, avoiding key reuse issues.
+ * DIRECT_KEY: instead of deriving per-file encryption keys, the
+ * per-file nonce will be included in all the IVs. But unlike
+ * v1 policies, for v2 policies in this case we don't encrypt
+ * with the master key directly but rather derive a per-mode
+ * encryption key. This ensures that the master key is
+ * consistently used only for HKDF, avoiding key reuse issues.
*/
- if (!fscrypt_mode_supports_direct_key(ci->ci_mode)) {
- fscrypt_warn(ci->ci_inode,
- "Direct key flag not allowed with %s",
- ci->ci_mode->friendly_name);
- return -EINVAL;
- }
- return setup_per_mode_key(ci, mk, mk->mk_direct_tfms,
- HKDF_CONTEXT_DIRECT_KEY, false);
+ err = setup_per_mode_enc_key(ci, mk, mk->mk_direct_tfms,
+ HKDF_CONTEXT_DIRECT_KEY, false);
} else if (ci->ci_policy.v2.flags &
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
/*
@@ -202,21 +213,34 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
* the IVs. This format is optimized for use with inline
* encryption hardware compliant with the UFS or eMMC standards.
*/
- return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms,
- HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
- true);
+ err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms,
+ HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
+ true);
+ } else {
+ u8 derived_key[FSCRYPT_MAX_KEY_SIZE];
+
+ err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
+ HKDF_CONTEXT_PER_FILE_ENC_KEY,
+ ci->ci_nonce,
+ FS_KEY_DERIVATION_NONCE_SIZE,
+ derived_key, ci->ci_mode->keysize);
+ if (err)
+ return err;
+
+ err = fscrypt_set_per_file_enc_key(ci, derived_key);
+ memzero_explicit(derived_key, ci->ci_mode->keysize);
}
-
- err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- HKDF_CONTEXT_PER_FILE_KEY,
- ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE,
- derived_key, ci->ci_mode->keysize);
if (err)
return err;
- err = fscrypt_set_derived_key(ci, derived_key);
- memzero_explicit(derived_key, ci->ci_mode->keysize);
- return err;
+ /* Derive a secret dirhash key for directories that need it. */
+ if (S_ISDIR(ci->ci_inode->i_mode) && IS_CASEFOLDED(ci->ci_inode)) {
+ err = fscrypt_derive_dirhash_key(ci, mk);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/*
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index 5298ef22aa85..801b48c0cd7f 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -9,7 +9,7 @@
* This file implements compatibility functions for the original encryption
* policy version ("v1"), including:
*
- * - Deriving per-file keys using the AES-128-ECB based KDF
+ * - Deriving per-file encryption keys using the AES-128-ECB based KDF
* (rather than the new method of using HKDF-SHA512)
*
* - Retrieving fscrypt master keys from process-subscribed keyrings
@@ -253,23 +253,8 @@ err_free_dk:
static int setup_v1_file_key_direct(struct fscrypt_info *ci,
const u8 *raw_master_key)
{
- const struct fscrypt_mode *mode = ci->ci_mode;
struct fscrypt_direct_key *dk;
- if (!fscrypt_mode_supports_direct_key(mode)) {
- fscrypt_warn(ci->ci_inode,
- "Direct key mode not allowed with %s",
- mode->friendly_name);
- return -EINVAL;
- }
-
- if (ci->ci_policy.v1.contents_encryption_mode !=
- ci->ci_policy.v1.filenames_encryption_mode) {
- fscrypt_warn(ci->ci_inode,
- "Direct key mode not allowed with different contents and filenames modes");
- return -EINVAL;
- }
-
dk = fscrypt_get_direct_key(ci, raw_master_key);
if (IS_ERR(dk))
return PTR_ERR(dk);
@@ -298,7 +283,7 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci,
if (err)
goto out;
- err = fscrypt_set_derived_key(ci, derived_key);
+ err = fscrypt_set_per_file_enc_key(ci, derived_key);
out:
kzfree(derived_key);
return err;
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 96f528071bed..cf2a9d26ef7d 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -29,6 +29,43 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
return !memcmp(policy1, policy2, fscrypt_policy_size(policy1));
}
+static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode)
+{
+ if (contents_mode == FSCRYPT_MODE_AES_256_XTS &&
+ filenames_mode == FSCRYPT_MODE_AES_256_CTS)
+ return true;
+
+ if (contents_mode == FSCRYPT_MODE_AES_128_CBC &&
+ filenames_mode == FSCRYPT_MODE_AES_128_CTS)
+ return true;
+
+ if (contents_mode == FSCRYPT_MODE_ADIANTUM &&
+ filenames_mode == FSCRYPT_MODE_ADIANTUM)
+ return true;
+
+ return false;
+}
+
+static bool supported_direct_key_modes(const struct inode *inode,
+ u32 contents_mode, u32 filenames_mode)
+{
+ const struct fscrypt_mode *mode;
+
+ if (contents_mode != filenames_mode) {
+ fscrypt_warn(inode,
+ "Direct key flag not allowed with different contents and filenames modes");
+ return false;
+ }
+ mode = &fscrypt_modes[contents_mode];
+
+ if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) {
+ fscrypt_warn(inode, "Direct key flag not allowed with %s",
+ mode->friendly_name);
+ return false;
+ }
+ return true;
+}
+
static bool supported_iv_ino_lblk_64_policy(
const struct fscrypt_policy_v2 *policy,
const struct inode *inode)
@@ -63,13 +100,82 @@ static bool supported_iv_ino_lblk_64_policy(
return true;
}
+static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy,
+ const struct inode *inode)
+{
+ if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
+ policy->filenames_encryption_mode)) {
+ fscrypt_warn(inode,
+ "Unsupported encryption modes (contents %d, filenames %d)",
+ policy->contents_encryption_mode,
+ policy->filenames_encryption_mode);
+ return false;
+ }
+
+ if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
+ FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
+ fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)",
+ policy->flags);
+ return false;
+ }
+
+ if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
+ !supported_direct_key_modes(inode, policy->contents_encryption_mode,
+ policy->filenames_encryption_mode))
+ return false;
+
+ if (IS_CASEFOLDED(inode)) {
+ /* With v1, there's no way to derive dirhash keys. */
+ fscrypt_warn(inode,
+ "v1 policies can't be used on casefolded directories");
+ return false;
+ }
+
+ return true;
+}
+
+static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
+ const struct inode *inode)
+{
+ if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
+ policy->filenames_encryption_mode)) {
+ fscrypt_warn(inode,
+ "Unsupported encryption modes (contents %d, filenames %d)",
+ policy->contents_encryption_mode,
+ policy->filenames_encryption_mode);
+ return false;
+ }
+
+ if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
+ fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)",
+ policy->flags);
+ return false;
+ }
+
+ if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
+ !supported_direct_key_modes(inode, policy->contents_encryption_mode,
+ policy->filenames_encryption_mode))
+ return false;
+
+ if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
+ !supported_iv_ino_lblk_64_policy(policy, inode))
+ return false;
+
+ if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
+ fscrypt_warn(inode, "Reserved bits set in encryption policy");
+ return false;
+ }
+
+ return true;
+}
+
/**
* fscrypt_supported_policy - check whether an encryption policy is supported
*
* Given an encryption policy, check whether all its encryption modes and other
- * settings are supported by this kernel. (But we don't currently don't check
- * for crypto API support here, so attempting to use an algorithm not configured
- * into the crypto API will still fail later.)
+ * settings are supported by this kernel on the given inode. (But we don't
+ * currently don't check for crypto API support here, so attempting to use an
+ * algorithm not configured into the crypto API will still fail later.)
*
* Return: %true if supported, else %false
*/
@@ -77,60 +183,10 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
const struct inode *inode)
{
switch (policy_u->version) {
- case FSCRYPT_POLICY_V1: {
- const struct fscrypt_policy_v1 *policy = &policy_u->v1;
-
- if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
- policy->filenames_encryption_mode)) {
- fscrypt_warn(inode,
- "Unsupported encryption modes (contents %d, filenames %d)",
- policy->contents_encryption_mode,
- policy->filenames_encryption_mode);
- return false;
- }
-
- if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
- FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
- fscrypt_warn(inode,
- "Unsupported encryption flags (0x%02x)",
- policy->flags);
- return false;
- }
-
- return true;
- }
- case FSCRYPT_POLICY_V2: {
- const struct fscrypt_policy_v2 *policy = &policy_u->v2;
-
- if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode,
- policy->filenames_encryption_mode)) {
- fscrypt_warn(inode,
- "Unsupported encryption modes (contents %d, filenames %d)",
- policy->contents_encryption_mode,
- policy->filenames_encryption_mode);
- return false;
- }
-
- if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
- fscrypt_warn(inode,
- "Unsupported encryption flags (0x%02x)",
- policy->flags);
- return false;
- }
-
- if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
- !supported_iv_ino_lblk_64_policy(policy, inode))
- return false;
-
- if (memchr_inv(policy->__reserved, 0,
- sizeof(policy->__reserved))) {
- fscrypt_warn(inode,
- "Reserved bits set in encryption policy");
- return false;
- }
-
- return true;
- }
+ case FSCRYPT_POLICY_V1:
+ return fscrypt_supported_v1_policy(&policy_u->v1, inode);
+ case FSCRYPT_POLICY_V2:
+ return fscrypt_supported_v2_policy(&policy_u->v2, inode);
}
return false;
}
diff --git a/fs/dax.c b/fs/dax.c
index 1f1f0201cad1..35da144375a0 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -937,12 +937,11 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
* on persistent storage prior to completion of the operation.
*/
int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc)
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
struct inode *inode = mapping->host;
pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
- struct dax_device *dax_dev;
void *entry;
int ret = 0;
unsigned int scanned = 0;
@@ -953,10 +952,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
return 0;
- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
- if (!dax_dev)
- return -EIO;
-
trace_dax_writeback_range(inode, xas.xa_index, end_index);
tag_pages_for_writeback(mapping, xas.xa_index, end_index);
@@ -977,7 +972,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
xas_lock_irq(&xas);
}
xas_unlock_irq(&xas);
- put_dax(dax_dev);
trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
return ret;
}
@@ -1207,6 +1201,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
lockdep_assert_held(&inode->i_rwsem);
}
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ flags |= IOMAP_NOWAIT;
+
while (iov_iter_count(iter)) {
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
iter, dax_iomap_actor);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index dede25247b81..634b09d18b77 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -142,18 +142,21 @@ EXPORT_SYMBOL_GPL(debugfs_file_put);
* We also need to exclude any file that has ways to write or alter it as root
* can bypass the permissions check.
*/
-static bool debugfs_is_locked_down(struct inode *inode,
- struct file *filp,
- const struct file_operations *real_fops)
+static int debugfs_locked_down(struct inode *inode,
+ struct file *filp,
+ const struct file_operations *real_fops)
{
if ((inode->i_mode & 07777) == 0444 &&
!(filp->f_mode & FMODE_WRITE) &&
!real_fops->unlocked_ioctl &&
!real_fops->compat_ioctl &&
!real_fops->mmap)
- return false;
+ return 0;
- return security_locked_down(LOCKDOWN_DEBUGFS);
+ if (security_locked_down(LOCKDOWN_DEBUGFS))
+ return -EPERM;
+
+ return 0;
}
static int open_proxy_open(struct inode *inode, struct file *filp)
@@ -168,7 +171,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
real_fops = debugfs_real_fops(filp);
- r = debugfs_is_locked_down(inode, filp, real_fops);
+ r = debugfs_locked_down(inode, filp, real_fops);
if (r)
goto out;
@@ -298,7 +301,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
real_fops = debugfs_real_fops(filp);
- r = debugfs_is_locked_down(inode, filp, real_fops);
+ r = debugfs_locked_down(inode, filp, real_fops);
if (r)
goto out;
@@ -496,10 +499,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n");
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
* be returned.
*/
struct dentry *debugfs_create_u32(const char *name, umode_t mode,
@@ -581,10 +584,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_wo, NULL, debugfs_ulong_set, "%llu\n");
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
* be returned.
*/
struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
@@ -846,10 +849,10 @@ static const struct file_operations fops_bool_wo = {
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
* be returned.
*/
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
@@ -899,10 +902,10 @@ static const struct file_operations fops_blob = {
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
* be returned.
*/
struct dentry *debugfs_create_blob(const char *name, umode_t mode,
@@ -1091,10 +1094,10 @@ static const struct file_operations fops_regset32 = {
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
+ * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
* be returned.
*/
struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
@@ -1158,4 +1161,3 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
&debugfs_devm_entry_ops);
}
EXPORT_SYMBOL_GPL(debugfs_create_devm_seqfile);
-
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index f4d8df5e4714..e742dfc66933 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -332,7 +332,10 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
parent = debugfs_mount->mnt_root;
inode_lock(d_inode(parent));
- dentry = lookup_one_len(name, parent, strlen(name));
+ if (unlikely(IS_DEADDIR(d_inode(parent))))
+ dentry = ERR_PTR(-ENOENT);
+ else
+ dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
if (d_is_dir(dentry))
pr_err("Directory '%s' with parent '%s' already present!\n",
@@ -423,7 +426,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
@@ -502,7 +505,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
@@ -534,7 +537,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
@@ -627,7 +630,7 @@ EXPORT_SYMBOL(debugfs_create_automount);
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the symbolic
* link is to be removed (no automatic cleanup happens if your module is
- * unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR)
+ * unloaded, you are responsible here.) If an error occurs, ERR_PTR(-ERROR)
* will be returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
@@ -681,62 +684,15 @@ static void __debugfs_file_removed(struct dentry *dentry)
wait_for_completion(&fsd->active_users_drained);
}
-static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
+static void remove_one(struct dentry *victim)
{
- int ret = 0;
-
- if (simple_positive(dentry)) {
- dget(dentry);
- if (d_is_dir(dentry)) {
- ret = simple_rmdir(d_inode(parent), dentry);
- if (!ret)
- fsnotify_rmdir(d_inode(parent), dentry);
- } else {
- simple_unlink(d_inode(parent), dentry);
- fsnotify_unlink(d_inode(parent), dentry);
- }
- if (!ret)
- d_delete(dentry);
- if (d_is_reg(dentry))
- __debugfs_file_removed(dentry);
- dput(dentry);
- }
- return ret;
-}
-
-/**
- * debugfs_remove - removes a file or directory from the debugfs filesystem
- * @dentry: a pointer to a the dentry of the file or directory to be
- * removed. If this parameter is NULL or an error value, nothing
- * will be done.
- *
- * This function removes a file or directory in debugfs that was previously
- * created with a call to another debugfs function (like
- * debugfs_create_file() or variants thereof.)
- *
- * This function is required to be called in order for the file to be
- * removed, no automatic cleanup of files will happen when a module is
- * removed, you are responsible here.
- */
-void debugfs_remove(struct dentry *dentry)
-{
- struct dentry *parent;
- int ret;
-
- if (IS_ERR_OR_NULL(dentry))
- return;
-
- parent = dentry->d_parent;
- inode_lock(d_inode(parent));
- ret = __debugfs_remove(dentry, parent);
- inode_unlock(d_inode(parent));
- if (!ret)
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ if (d_is_reg(victim))
+ __debugfs_file_removed(victim);
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
-EXPORT_SYMBOL_GPL(debugfs_remove);
/**
- * debugfs_remove_recursive - recursively removes a directory
+ * debugfs_remove - recursively removes a directory
* @dentry: a pointer to a the dentry of the directory to be removed. If this
* parameter is NULL or an error value, nothing will be done.
*
@@ -748,65 +704,16 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
* removed, no automatic cleanup of files will happen when a module is
* removed, you are responsible here.
*/
-void debugfs_remove_recursive(struct dentry *dentry)
+void debugfs_remove(struct dentry *dentry)
{
- struct dentry *child, *parent;
-
if (IS_ERR_OR_NULL(dentry))
return;
- parent = dentry;
- down:
- inode_lock(d_inode(parent));
- loop:
- /*
- * The parent->d_subdirs is protected by the d_lock. Outside that
- * lock, the child can be unlinked and set to be freed which can
- * use the d_u.d_child as the rcu head and corrupt this list.
- */
- spin_lock(&parent->d_lock);
- list_for_each_entry(child, &parent->d_subdirs, d_child) {
- if (!simple_positive(child))
- continue;
-
- /* perhaps simple_empty(child) makes more sense */
- if (!list_empty(&child->d_subdirs)) {
- spin_unlock(&parent->d_lock);
- inode_unlock(d_inode(parent));
- parent = child;
- goto down;
- }
-
- spin_unlock(&parent->d_lock);
-
- if (!__debugfs_remove(child, parent))
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-
- /*
- * The parent->d_lock protects agaist child from unlinking
- * from d_subdirs. When releasing the parent->d_lock we can
- * no longer trust that the next pointer is valid.
- * Restart the loop. We'll skip this one with the
- * simple_positive() check.
- */
- goto loop;
- }
- spin_unlock(&parent->d_lock);
-
- inode_unlock(d_inode(parent));
- child = parent;
- parent = parent->d_parent;
- inode_lock(d_inode(parent));
-
- if (child != dentry)
- /* go up */
- goto loop;
-
- if (!__debugfs_remove(child, parent))
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
- inode_unlock(d_inode(parent));
+ simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count);
+ simple_recursive_removal(dentry, remove_one);
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
-EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
+EXPORT_SYMBOL_GPL(debugfs_remove);
/**
* debugfs_rename - rename a file/directory in the debugfs filesystem
@@ -906,4 +813,3 @@ static int __init debugfs_init(void)
return retval;
}
core_initcall(debugfs_init);
-
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 3951d39b9b75..cdfaf4f0e11a 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1035,7 +1035,7 @@ static void sctp_connect_to_sock(struct connection *con)
int result;
int addr_len;
struct socket *sock;
- struct timeval tv = { .tv_sec = 5, .tv_usec = 0 };
+ struct __kernel_sock_timeval tv = { .tv_sec = 5, .tv_usec = 0 };
if (con->nodeid == 0) {
log_print("attempt to connect sock 0 foiled");
@@ -1087,12 +1087,12 @@ static void sctp_connect_to_sock(struct connection *con)
* since O_NONBLOCK argument in connect() function does not work here,
* then, we should restore the default value of this attribute.
*/
- kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_OLD, (char *)&tv,
+ kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv,
sizeof(tv));
result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
0);
memset(&tv, 0, sizeof(tv));
- kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_OLD, (char *)&tv,
+ kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv,
sizeof(tv));
if (result == -EINPROGRESS)
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f91db24bbf3b..db1ef144c63a 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1586,7 +1586,7 @@ ecryptfs_process_key_cipher(struct crypto_skcipher **key_tfm,
}
crypto_skcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
if (*key_size == 0)
- *key_size = crypto_skcipher_default_keysize(*key_tfm);
+ *key_size = crypto_skcipher_max_keysize(*key_tfm);
get_random_bytes(dummy_key, *key_size);
rc = crypto_skcipher_setkey(*key_tfm, dummy_key, *key_size);
if (rc) {
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 216fbe6a4837..7d326aa0308e 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -2204,9 +2204,9 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
if (mount_crypt_stat->global_default_cipher_key_size == 0) {
printk(KERN_WARNING "No key size specified at mount; "
"defaulting to [%d]\n",
- crypto_skcipher_default_keysize(tfm));
+ crypto_skcipher_max_keysize(tfm));
mount_crypt_stat->global_default_cipher_key_size =
- crypto_skcipher_default_keysize(tfm);
+ crypto_skcipher_max_keysize(tfm);
}
if (crypt_stat->key_size == 0)
crypt_stat->key_size =
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index cffa0c1ec829..019572c6b39a 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -524,16 +524,12 @@ out:
static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
{
- int rc = 0;
- struct inode *inode;
- struct inode *lower_inode;
-
- inode = (struct inode *)mapping->host;
- lower_inode = ecryptfs_inode_to_lower(inode);
- if (lower_inode->i_mapping->a_ops->bmap)
- rc = lower_inode->i_mapping->a_ops->bmap(lower_inode->i_mapping,
- block);
- return rc;
+ struct inode *lower_inode = ecryptfs_inode_to_lower(mapping->host);
+ int ret = bmap(lower_inode, &block);
+
+ if (ret)
+ return 0;
+ return block;
}
const struct address_space_operations ecryptfs_aops = {
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 2890a67a1ded..5779a15c2cd6 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
}
src = kmap_atomic(*rq->in);
- if (!rq->out[0]) {
- dst = NULL;
- } else {
+ if (rq->out[0]) {
dst = kmap_atomic(rq->out[0]);
memcpy(dst + rq->pageofs_out, src, righthalf);
+ kunmap_atomic(dst);
}
- if (rq->out[1] == *rq->in) {
- memmove(src, src + righthalf, rq->pageofs_out);
- } else if (nrpages_out == 2) {
- if (dst)
- kunmap_atomic(dst);
+ if (nrpages_out == 2) {
DBG_BUGON(!rq->out[1]);
- dst = kmap_atomic(rq->out[1]);
- memcpy(dst, src + righthalf, rq->pageofs_out);
+ if (rq->out[1] == *rq->in) {
+ memmove(src, src + righthalf, rq->pageofs_out);
+ } else {
+ dst = kmap_atomic(rq->out[1]);
+ memcpy(dst, src + righthalf, rq->pageofs_out);
+ kunmap_atomic(dst);
+ }
}
- if (dst)
- kunmap_atomic(dst);
kunmap_atomic(src);
return 0;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 1ed5beff7d11..c4c6dcdc89ad 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -401,9 +401,9 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr)
#ifdef CONFIG_EROFS_FS_ZIP
int erofs_workgroup_put(struct erofs_workgroup *grp);
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index, bool *tag);
+ pgoff_t index);
int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp, bool tag);
+ struct erofs_workgroup *grp);
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
void erofs_shrinker_register(struct super_block *sb);
void erofs_shrinker_unregister(struct super_block *sb);
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 1e8e1450d5b0..fddc5059c930 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -59,7 +59,7 @@ repeat:
}
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
- pgoff_t index, bool *tag)
+ pgoff_t index)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_workgroup *grp;
@@ -68,9 +68,6 @@ repeat:
rcu_read_lock();
grp = radix_tree_lookup(&sbi->workstn_tree, index);
if (grp) {
- *tag = xa_pointer_tag(grp);
- grp = xa_untag_pointer(grp);
-
if (erofs_workgroup_get(grp)) {
/* prefer to relax rcu read side */
rcu_read_unlock();
@@ -84,8 +81,7 @@ repeat:
}
int erofs_register_workgroup(struct super_block *sb,
- struct erofs_workgroup *grp,
- bool tag)
+ struct erofs_workgroup *grp)
{
struct erofs_sb_info *sbi;
int err;
@@ -103,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb,
sbi = EROFS_SB(sb);
xa_lock(&sbi->workstn_tree);
- grp = xa_tag_pointer(grp, tag);
-
/*
* Bump up reference count before making this workgroup
* visible to other users in order to avoid potential UAF
@@ -175,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
* however in order to avoid some race conditions, add a
* DBG_BUGON to observe this in advance.
*/
- DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
- grp->index)) != grp);
+ DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp);
/*
* If managed cache is on, last refcount should indicate
@@ -201,7 +194,7 @@ repeat:
batch, first_index, PAGEVEC_SIZE);
for (i = 0; i < found; ++i) {
- struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
+ struct erofs_workgroup *grp = batch[i];
first_index = grp->index + 1;
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index 3585b84d2f20..50966f1c676e 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -46,18 +46,19 @@ extern const struct xattr_handler erofs_xattr_security_handler;
static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
{
-static const struct xattr_handler *xattr_handler_map[] = {
- [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
+ static const struct xattr_handler *xattr_handler_map[] = {
+ [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
- [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
- &posix_acl_default_xattr_handler,
+ [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] =
+ &posix_acl_access_xattr_handler,
+ [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
+ &posix_acl_default_xattr_handler,
#endif
- [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
+ [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
#ifdef CONFIG_EROFS_FS_SECURITY
- [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
+ [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
#endif
-};
+ };
return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
xattr_handler_map[idx] : NULL;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index ca99425a4536..80e47f07d946 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
struct z_erofs_pcluster *pcl;
struct z_erofs_collection *cl;
unsigned int length;
- bool tag;
- grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
+ grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
if (!grp)
return -ENOENT;
@@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
*/
mutex_trylock(&cl->lock);
- err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
+ err = erofs_register_workgroup(inode->i_sb, &pcl->obj);
if (err) {
mutex_unlock(&cl->lock);
kmem_cache_free(pcluster_cachep, pcl);
@@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next;
}
-static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
- unsigned int nr_bios, bool force_fg)
-{
- /*
- * although background is preferred, no one is pending for submission.
- * don't issue workqueue for decompression but drop it directly instead.
- */
- if (force_fg || nr_bios)
- return false;
-
- kvfree(q[JQ_SUBMIT]);
- return true;
-}
-
-static bool z_erofs_submit_queue(struct super_block *sb,
+static void z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t owned_head,
struct list_head *pagepool,
struct z_erofs_decompressqueue *fgq,
@@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
struct erofs_sb_info *const sbi = EROFS_SB(sb);
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
- struct bio *bio;
void *bi_private;
/* since bio will be NULL, no need to initialize last_index */
pgoff_t uninitialized_var(last_index);
- bool force_submit = false;
- unsigned int nr_bios;
-
- if (owned_head == Z_EROFS_PCLUSTER_TAIL)
- return false;
+ unsigned int nr_bios = 0;
+ struct bio *bio = NULL;
- force_submit = false;
- bio = NULL;
- nr_bios = 0;
bi_private = jobqueueset_init(sb, q, fgq, force_fg);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
@@ -1194,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb,
do {
struct z_erofs_pcluster *pcl;
- unsigned int clusterpages;
- pgoff_t first_index;
- struct page *page;
- unsigned int i = 0, bypass = 0;
- int err;
+ pgoff_t cur, end;
+ unsigned int i = 0;
+ bool bypass = true;
/* no possible 'owned_head' equals the following */
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
@@ -1206,55 +1182,50 @@ static bool z_erofs_submit_queue(struct super_block *sb,
pcl = container_of(owned_head, struct z_erofs_pcluster, next);
- clusterpages = BIT(pcl->clusterbits);
+ cur = pcl->obj.index;
+ end = cur + BIT(pcl->clusterbits);
/* close the main owned chain at first */
owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
Z_EROFS_PCLUSTER_TAIL_CLOSED);
- first_index = pcl->obj.index;
- force_submit |= (first_index != last_index + 1);
+ do {
+ struct page *page;
+ int err;
-repeat:
- page = pickup_page_for_submission(pcl, i, pagepool,
- MNGD_MAPPING(sbi),
- GFP_NOFS);
- if (!page) {
- force_submit = true;
- ++bypass;
- goto skippage;
- }
+ page = pickup_page_for_submission(pcl, i++, pagepool,
+ MNGD_MAPPING(sbi),
+ GFP_NOFS);
+ if (!page)
+ continue;
- if (bio && force_submit) {
+ if (bio && cur != last_index + 1) {
submit_bio_retry:
- submit_bio(bio);
- bio = NULL;
- }
-
- if (!bio) {
- bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+ submit_bio(bio);
+ bio = NULL;
+ }
- bio->bi_end_io = z_erofs_decompressqueue_endio;
- bio_set_dev(bio, sb->s_bdev);
- bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
- LOG_SECTORS_PER_BLOCK;
- bio->bi_private = bi_private;
- bio->bi_opf = REQ_OP_READ;
+ if (!bio) {
+ bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
- ++nr_bios;
- }
+ bio->bi_end_io = z_erofs_decompressqueue_endio;
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_iter.bi_sector = (sector_t)cur <<
+ LOG_SECTORS_PER_BLOCK;
+ bio->bi_private = bi_private;
+ bio->bi_opf = REQ_OP_READ;
+ ++nr_bios;
+ }
- err = bio_add_page(bio, page, PAGE_SIZE, 0);
- if (err < PAGE_SIZE)
- goto submit_bio_retry;
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (err < PAGE_SIZE)
+ goto submit_bio_retry;
- force_submit = false;
- last_index = first_index + i;
-skippage:
- if (++i < clusterpages)
- goto repeat;
+ last_index = cur;
+ bypass = false;
+ } while (++cur < end);
- if (bypass < clusterpages)
+ if (!bypass)
qtail[JQ_SUBMIT] = &pcl->next;
else
move_to_bypass_jobqueue(pcl, qtail, owned_head);
@@ -1263,11 +1234,15 @@ skippage:
if (bio)
submit_bio(bio);
- if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
- return true;
-
+ /*
+ * although background is preferred, no one is pending for submission.
+ * don't issue workqueue for decompression but drop it directly instead.
+ */
+ if (!*force_fg && !nr_bios) {
+ kvfree(q[JQ_SUBMIT]);
+ return;
+ }
z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
- return true;
}
static void z_erofs_runqueue(struct super_block *sb,
@@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
- if (!z_erofs_submit_queue(sb, clt->owned_head,
- pagepool, io, &force_fg))
+ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
return;
+ z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
/* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 8aa0ea8c55e8..78e41c7c3d05 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -24,6 +24,8 @@
#include <linux/seq_file.h>
#include <linux/idr.h>
+DEFINE_PER_CPU(int, eventfd_wake_count);
+
static DEFINE_IDA(eventfd_ida);
struct eventfd_ctx {
@@ -60,12 +62,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
{
unsigned long flags;
+ /*
+ * Deadlock or stack overflow issues can happen if we recurse here
+ * through waitqueue wakeup handlers. If the caller users potentially
+ * nested waitqueues with custom wakeup handlers, then it should
+ * check eventfd_signal_count() before calling this function. If
+ * it returns true, the eventfd_signal() call should be deferred to a
+ * safe context.
+ */
+ if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+ return 0;
+
spin_lock_irqsave(&ctx->wqh.lock, flags);
+ this_cpu_inc(eventfd_wake_count);
if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count;
ctx->count += n;
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
+ this_cpu_dec(eventfd_wake_count);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 67a395039268..b041b66002db 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -354,12 +354,6 @@ static inline struct epitem *ep_item_from_epqueue(poll_table *p)
return container_of(p, struct ep_pqueue, pt)->epi;
}
-/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
-static inline int ep_op_has_event(int op)
-{
- return op != EPOLL_CTL_DEL;
-}
-
/* Initialize the poll safe wake up structure */
static void ep_nested_calls_init(struct nested_calls *ncalls)
{
@@ -2074,27 +2068,28 @@ SYSCALL_DEFINE1(epoll_create, int, size)
return do_epoll_create(0);
}
-/*
- * The following function implements the controller interface for
- * the eventpoll file that enables the insertion/removal/change of
- * file descriptors inside the interest set.
- */
-SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
- struct epoll_event __user *, event)
+static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
+ bool nonblock)
+{
+ if (!nonblock) {
+ mutex_lock_nested(mutex, depth);
+ return 0;
+ }
+ if (mutex_trylock(mutex))
+ return 0;
+ return -EAGAIN;
+}
+
+int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
+ bool nonblock)
{
int error;
int full_check = 0;
struct fd f, tf;
struct eventpoll *ep;
struct epitem *epi;
- struct epoll_event epds;
struct eventpoll *tep = NULL;
- error = -EFAULT;
- if (ep_op_has_event(op) &&
- copy_from_user(&epds, event, sizeof(struct epoll_event)))
- goto error_return;
-
error = -EBADF;
f = fdget(epfd);
if (!f.file)
@@ -2112,7 +2107,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
/* Check if EPOLLWAKEUP is allowed */
if (ep_op_has_event(op))
- ep_take_care_of_epollwakeup(&epds);
+ ep_take_care_of_epollwakeup(epds);
/*
* We have to check that the file structure underneath the file descriptor
@@ -2128,11 +2123,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
* so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
* Also, we do not currently supported nested exclusive wakeups.
*/
- if (ep_op_has_event(op) && (epds.events & EPOLLEXCLUSIVE)) {
+ if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
if (op == EPOLL_CTL_MOD)
goto error_tgt_fput;
if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
- (epds.events & ~EPOLLEXCLUSIVE_OK_BITS)))
+ (epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
goto error_tgt_fput;
}
@@ -2157,13 +2152,17 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
* deep wakeup paths from forming in parallel through multiple
* EPOLL_CTL_ADD operations.
*/
- mutex_lock_nested(&ep->mtx, 0);
+ error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
+ if (error)
+ goto error_tgt_fput;
if (op == EPOLL_CTL_ADD) {
if (!list_empty(&f.file->f_ep_links) ||
is_file_epoll(tf.file)) {
- full_check = 1;
mutex_unlock(&ep->mtx);
- mutex_lock(&epmutex);
+ error = epoll_mutex_lock(&epmutex, 0, nonblock);
+ if (error)
+ goto error_tgt_fput;
+ full_check = 1;
if (is_file_epoll(tf.file)) {
error = -ELOOP;
if (ep_loop_check(ep, tf.file) != 0) {
@@ -2173,10 +2172,19 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
} else
list_add(&tf.file->f_tfile_llink,
&tfile_check_list);
- mutex_lock_nested(&ep->mtx, 0);
+ error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
+ if (error) {
+out_del:
+ list_del(&tf.file->f_tfile_llink);
+ goto error_tgt_fput;
+ }
if (is_file_epoll(tf.file)) {
tep = tf.file->private_data;
- mutex_lock_nested(&tep->mtx, 1);
+ error = epoll_mutex_lock(&tep->mtx, 1, nonblock);
+ if (error) {
+ mutex_unlock(&ep->mtx);
+ goto out_del;
+ }
}
}
}
@@ -2192,8 +2200,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
- epds.events |= EPOLLERR | EPOLLHUP;
- error = ep_insert(ep, &epds, tf.file, fd, full_check);
+ epds->events |= EPOLLERR | EPOLLHUP;
+ error = ep_insert(ep, epds, tf.file, fd, full_check);
} else
error = -EEXIST;
if (full_check)
@@ -2208,8 +2216,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
case EPOLL_CTL_MOD:
if (epi) {
if (!(epi->event.events & EPOLLEXCLUSIVE)) {
- epds.events |= EPOLLERR | EPOLLHUP;
- error = ep_modify(ep, epi, &epds);
+ epds->events |= EPOLLERR | EPOLLHUP;
+ error = ep_modify(ep, epi, epds);
}
} else
error = -ENOENT;
@@ -2232,6 +2240,23 @@ error_return:
}
/*
+ * The following function implements the controller interface for
+ * the eventpoll file that enables the insertion/removal/change of
+ * file descriptors inside the interest set.
+ */
+SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ struct epoll_event __user *, event)
+{
+ struct epoll_event epds;
+
+ if (ep_op_has_event(op) &&
+ copy_from_user(&epds, event, sizeof(struct epoll_event)))
+ return -EFAULT;
+
+ return do_epoll_ctl(epfd, op, fd, &epds, false);
+}
+
+/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_wait(2).
*/
diff --git a/fs/exec.c b/fs/exec.c
index 74d88dab98dd..db17be51b112 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -272,7 +272,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
goto err;
mm->stack_vm = mm->total_vm = 1;
- arch_bprm_mm_init(mm, vma);
up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *);
return 0;
@@ -761,6 +760,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
goto out_unlock;
BUG_ON(prev != vma);
+ if (unlikely(vm_flags & VM_EXEC)) {
+ pr_warn_once("process '%pD4' started with executable stack\n",
+ bprm->file);
+ }
+
/* Move stack pages down in memory. */
if (stack_shift) {
ret = shift_arg_pages(vma, stack_shift);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 119667e65890..c885cf7d724b 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -960,8 +960,9 @@ ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
static int
ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
- return dax_writeback_mapping_range(mapping,
- mapping->host->i_sb->s_bdev, wbc);
+ struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb);
+
+ return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
}
const struct address_space_operations ext2_aops = {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index bcffe25da2f0..4a4ab683250d 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1073,9 +1073,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
- sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
- le32_to_cpu(es->s_first_data_block) - 1)
- / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
+ sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
+ le32_to_cpu(es->s_first_data_block) - 1)
+ / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc_array (db_count,
@@ -1138,6 +1138,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
ext2_count_dirs(sb), GFP_KERNEL);
}
if (err) {
+ ret = err;
ext2_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index ef42ab040905..2a592e38cdfe 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -4,12 +4,7 @@
# kernels after the removal of ext3 driver.
config EXT3_FS
tristate "The Extended 3 (ext3) filesystem"
- # These must match EXT4_FS selects...
select EXT4_FS
- select JBD2
- select CRC16
- select CRYPTO
- select CRYPTO_CRC32C
help
This config option is here only for backward compatibility. ext3
filesystem is now handled by the ext4 driver.
@@ -33,12 +28,12 @@ config EXT3_FS_SECURITY
config EXT4_FS
tristate "The Extended 4 (ext4) filesystem"
- # Please update EXT3_FS selects when changing these
select JBD2
select CRC16
select CRYPTO
select CRYPTO_CRC32C
select FS_IOMAP
+ select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
help
This is the next generation of the ext3 filesystem.
@@ -108,7 +103,7 @@ config EXT4_DEBUG
echo 1 > /sys/module/ext4/parameters/mballoc_debug
config EXT4_KUNIT_TESTS
- bool "KUnit tests for ext4"
+ tristate "KUnit tests for ext4"
select EXT4_FS
depends on KUNIT
help
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 840b91d040f1..4ccb3c9189d8 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -13,5 +13,6 @@ ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
-ext4-$(CONFIG_EXT4_KUNIT_TESTS) += inode-test.o
+ext4-inode-test-objs += inode-test.o
+obj-$(CONFIG_EXT4_KUNIT_TESTS) += ext4-inode-test.o
ext4-$(CONFIG_FS_VERITY) += verity.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 0b202e00d93f..5f993a411251 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -371,7 +371,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
if (buffer_verified(bh))
goto verified;
if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
- desc, bh))) {
+ desc, bh) ||
+ ext4_simulate_fail(sb, EXT4_SIM_BBITMAP_CRC))) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
ext4_mark_group_bitmap_corrupted(sb, block_group,
@@ -505,7 +506,9 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
if (!desc)
return -EFSCORRUPTED;
wait_on_buffer(bh);
+ ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO);
if (!buffer_uptodate(bh)) {
+ ext4_set_errno(sb, EIO);
ext4_error(sb, "Cannot read block bitmap - "
"block_group = %u, block_bitmap = %llu",
block_group, (unsigned long long) bh->b_blocknr);
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 1ee04e76bbe0..0a734ffb4310 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -207,6 +207,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
return PTR_ERR(inode);
num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
while (i < num) {
+ cond_resched();
map.m_lblk = i;
map.m_len = num - i;
n = ext4_map_blocks(NULL, inode, &map, 0);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 9f00fc0bf21d..9aa1f75409b0 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -120,7 +120,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (IS_ENCRYPTED(inode)) {
err = fscrypt_get_encryption_info(inode);
- if (err && err != -ENOKEY)
+ if (err)
return err;
}
@@ -129,12 +129,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (err != ERR_BAD_DX_DIR) {
return err;
}
- /*
- * We don't set the inode dirty flag since it's not
- * critical that it get flushed back to the disk.
- */
- ext4_clear_inode_flag(file_inode(file),
- EXT4_INODE_INDEX);
+ /* Can we just clear INDEX flag to ignore htree information? */
+ if (!ext4_has_metadata_csum(sb)) {
+ /*
+ * We don't set the inode dirty flag since it's not
+ * critical that it gets flushed back to the disk.
+ */
+ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+ }
}
if (ext4_has_inline_data(inode)) {
@@ -462,7 +464,6 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
new_fn->name_len = ent_name->len;
new_fn->file_type = dirent->file_type;
memcpy(new_fn->name, ent_name->name, ent_name->len);
- new_fn->name[ent_name->len] = 0;
while (*p) {
parent = *p;
@@ -672,9 +673,11 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name)
{
struct qstr qstr = {.name = str, .len = len };
- struct inode *inode = dentry->d_parent->d_inode;
+ const struct dentry *parent = READ_ONCE(dentry->d_parent);
+ const struct inode *inode = READ_ONCE(parent->d_inode);
- if (!IS_CASEFOLDED(inode) || !EXT4_SB(inode->i_sb)->s_encoding) {
+ if (!inode || !IS_CASEFOLDED(inode) ||
+ !EXT4_SB(inode->i_sb)->s_encoding) {
if (len != name->len)
return -1;
return memcmp(str, name->name, len);
@@ -687,10 +690,11 @@ static int ext4_d_hash(const struct dentry *dentry, struct qstr *str)
{
const struct ext4_sb_info *sbi = EXT4_SB(dentry->d_sb);
const struct unicode_map *um = sbi->s_encoding;
+ const struct inode *inode = READ_ONCE(dentry->d_inode);
unsigned char *norm;
int len, ret = 0;
- if (!IS_CASEFOLDED(dentry->d_inode) || !um)
+ if (!inode || !IS_CASEFOLDED(inode) || !um)
return 0;
norm = kmalloc(PATH_MAX, GFP_ATOMIC);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f8578caba40d..4441331d06cc 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1052,8 +1052,6 @@ struct ext4_inode_info {
/* allocation reservation info for delalloc */
/* In case of bigalloc, this refer to clusters rather than blocks */
unsigned int i_reserved_data_blocks;
- ext4_lblk_t i_da_metadata_calc_last_lblock;
- int i_da_metadata_calc_len;
/* pending cluster reservations for bigalloc file systems */
struct ext4_pending_tree i_pending_tree;
@@ -1343,7 +1341,8 @@ struct ext4_super_block {
__u8 s_lastcheck_hi;
__u8 s_first_error_time_hi;
__u8 s_last_error_time_hi;
- __u8 s_pad[2];
+ __u8 s_first_error_errcode;
+ __u8 s_last_error_errcode;
__le16 s_encoding; /* Filename charset encoding */
__le16 s_encoding_flags; /* Filename charset encoding flags */
__le32 s_reserved[95]; /* Padding to the end of the block */
@@ -1556,6 +1555,9 @@ struct ext4_sb_info {
/* Barrier between changing inodes' journal flags and writepages ops. */
struct percpu_rw_semaphore s_journal_flag_rwsem;
struct dax_device *s_daxdev;
+#ifdef CONFIG_EXT4_DEBUG
+ unsigned long s_simulate_fail;
+#endif
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1575,6 +1577,66 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
}
/*
+ * Simulate_fail codes
+ */
+#define EXT4_SIM_BBITMAP_EIO 1
+#define EXT4_SIM_BBITMAP_CRC 2
+#define EXT4_SIM_IBITMAP_EIO 3
+#define EXT4_SIM_IBITMAP_CRC 4
+#define EXT4_SIM_INODE_EIO 5
+#define EXT4_SIM_INODE_CRC 6
+#define EXT4_SIM_DIRBLOCK_EIO 7
+#define EXT4_SIM_DIRBLOCK_CRC 8
+
+static inline bool ext4_simulate_fail(struct super_block *sb,
+ unsigned long code)
+{
+#ifdef CONFIG_EXT4_DEBUG
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (unlikely(sbi->s_simulate_fail == code)) {
+ sbi->s_simulate_fail = 0;
+ return true;
+ }
+#endif
+ return false;
+}
+
+static inline void ext4_simulate_fail_bh(struct super_block *sb,
+ struct buffer_head *bh,
+ unsigned long code)
+{
+ if (!IS_ERR(bh) && ext4_simulate_fail(sb, code))
+ clear_buffer_uptodate(bh);
+}
+
+/*
+ * Error number codes for s_{first,last}_error_errno
+ *
+ * Linux errno numbers are architecture specific, so we need to translate
+ * them into something which is architecture independent. We don't define
+ * codes for all errno's; just the ones which are most likely to be the cause
+ * of an ext4_error() call.
+ */
+#define EXT4_ERR_UNKNOWN 1
+#define EXT4_ERR_EIO 2
+#define EXT4_ERR_ENOMEM 3
+#define EXT4_ERR_EFSBADCRC 4
+#define EXT4_ERR_EFSCORRUPTED 5
+#define EXT4_ERR_ENOSPC 6
+#define EXT4_ERR_ENOKEY 7
+#define EXT4_ERR_EROFS 8
+#define EXT4_ERR_EFBIG 9
+#define EXT4_ERR_EEXIST 10
+#define EXT4_ERR_ERANGE 11
+#define EXT4_ERR_EOVERFLOW 12
+#define EXT4_ERR_EBUSY 13
+#define EXT4_ERR_ENOTDIR 14
+#define EXT4_ERR_ENOTEMPTY 15
+#define EXT4_ERR_ESHUTDOWN 16
+#define EXT4_ERR_EFAULT 17
+
+/*
* Inode dynamic state flags
*/
enum {
@@ -2482,8 +2544,11 @@ void ext4_insert_dentry(struct inode *inode,
struct ext4_filename *fname);
static inline void ext4_update_dx_flag(struct inode *inode)
{
- if (!ext4_has_feature_dir_index(inode->i_sb))
+ if (!ext4_has_feature_dir_index(inode->i_sb)) {
+ /* ext4_iget() should have caught this... */
+ WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+ }
}
static const unsigned char ext4_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
@@ -2628,7 +2693,6 @@ extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
-extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
extern void ext4_ind_truncate(handle_t *, struct inode *inode);
extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
@@ -2679,8 +2743,6 @@ extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
extern int ext4_calculate_overhead(struct super_block *sb);
extern void ext4_superblock_csum_set(struct super_block *sb);
-extern void *ext4_kvmalloc(size_t size, gfp_t flags);
-extern void *ext4_kvzalloc(size_t size, gfp_t flags);
extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
extern const char *ext4_decode_error(struct super_block *sb, int errno,
@@ -2688,6 +2750,7 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno,
extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
ext4_group_t block_group,
unsigned int flags);
+extern void ext4_set_errno(struct super_block *sb, int err);
extern __printf(4, 5)
void __ext4_error(struct super_block *, const char *, unsigned int,
@@ -3254,7 +3317,6 @@ struct ext4_extent;
#define EXT_MAX_BLOCKS 0xffffffff
extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
-extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
@@ -3271,14 +3333,9 @@ extern int ext4_convert_unwritten_io_end_vec(handle_t *handle,
ext4_io_end_t *io_end);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
-extern int ext4_ext_calc_metadata_amount(struct inode *inode,
- ext4_lblk_t lblocks);
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
int num,
struct ext4_ext_path *path);
-extern int ext4_can_extents_be_merged(struct inode *inode,
- struct ext4_extent *ex1,
- struct ext4_extent *ex2);
extern int ext4_ext_insert_extent(handle_t *, struct inode *,
struct ext4_ext_path **,
struct ext4_extent *, int);
@@ -3294,8 +3351,6 @@ extern int ext4_get_es_cache(struct inode *inode,
struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
extern int ext4_ext_precache(struct inode *inode);
-extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
-extern int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
struct inode *inode2, ext4_lblk_t lblk1,
ext4_lblk_t lblk2, ext4_lblk_t count,
@@ -3390,6 +3445,7 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
}
extern const struct iomap_ops ext4_iomap_ops;
+extern const struct iomap_ops ext4_iomap_overwrite_ops;
extern const struct iomap_ops ext4_iomap_report_ops;
static inline int ext4_buffer_uptodate(struct buffer_head *bh)
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 98bd0e9ee7df..1c216fcc202a 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -267,10 +267,5 @@ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
0xffff);
}
-#define ext4_ext_dirty(handle, inode, path) \
- __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
-int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
- struct inode *inode, struct ext4_ext_path *path);
-
#endif /* _EXT4_EXTENTS */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index d3b8cdea5df7..1f53d64e42a5 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -7,6 +7,28 @@
#include <trace/events/ext4.h>
+int ext4_inode_journal_mode(struct inode *inode)
+{
+ if (EXT4_JOURNAL(inode) == NULL)
+ return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
+ /* We do not support data journalling with delayed allocation */
+ if (!S_ISREG(inode->i_mode) ||
+ ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
+ test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
+ (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
+ !test_opt(inode->i_sb, DELALLOC))) {
+ /* We do not support data journalling for encrypted data */
+ if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
+ return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
+ return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
+ }
+ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+ return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
+ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
+ return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
+ BUG();
+}
+
/* Just increment the non-pointer handle value */
static handle_t *ext4_get_nojournal(void)
{
@@ -58,6 +80,7 @@ static int ext4_journal_check_start(struct super_block *sb)
* take the FS itself readonly cleanly.
*/
if (journal && is_journal_aborted(journal)) {
+ ext4_set_errno(sb, -journal->j_errno);
ext4_abort(sb, "Detected aborted journal");
return -EROFS;
}
@@ -249,6 +272,7 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
if (err) {
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
+ ext4_set_errno(inode->i_sb, -err);
__ext4_abort(inode->i_sb, where, line,
"error %d when attempting revoke", err);
}
@@ -320,6 +344,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_block =
cpu_to_le64(bh->b_blocknr);
+ ext4_set_errno(inode->i_sb, EIO);
ext4_error_inode(inode, where, line,
bh->b_blocknr,
"IO error syncing itable block");
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index a6b9b66dbfad..7ea4f6fa173b 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -463,27 +463,7 @@ int ext4_force_commit(struct super_block *sb);
#define EXT4_INODE_ORDERED_DATA_MODE 0x02 /* ordered data mode */
#define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */
-static inline int ext4_inode_journal_mode(struct inode *inode)
-{
- if (EXT4_JOURNAL(inode) == NULL)
- return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
- /* We do not support data journalling with delayed allocation */
- if (!S_ISREG(inode->i_mode) ||
- ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
- test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
- (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
- !test_opt(inode->i_sb, DELALLOC))) {
- /* We do not support data journalling for encrypted data */
- if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode))
- return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
- return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
- }
- if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
- return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
- if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
- return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
- BUG();
-}
+int ext4_inode_journal_mode(struct inode *inode);
static inline int ext4_should_journal_data(struct inode *inode)
{
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0e8708b77da6..954013d6076b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -161,8 +161,9 @@ static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
* - ENOMEM
* - EIO
*/
-int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
- struct inode *inode, struct ext4_ext_path *path)
+static int __ext4_ext_dirty(const char *where, unsigned int line,
+ handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path)
{
int err;
@@ -179,6 +180,9 @@ int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
return err;
}
+#define ext4_ext_dirty(handle, inode, path) \
+ __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
+
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t block)
@@ -309,53 +313,6 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
(nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
}
-/*
- * Calculate the number of metadata blocks needed
- * to allocate @blocks
- * Worse case is one block per extent
- */
-int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- int idxs;
-
- idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
- / sizeof(struct ext4_extent_idx));
-
- /*
- * If the new delayed allocation block is contiguous with the
- * previous da block, it can share index blocks with the
- * previous block, so we only need to allocate a new index
- * block every idxs leaf blocks. At ldxs**2 blocks, we need
- * an additional index block, and at ldxs**3 blocks, yet
- * another index blocks.
- */
- if (ei->i_da_metadata_calc_len &&
- ei->i_da_metadata_calc_last_lblock+1 == lblock) {
- int num = 0;
-
- if ((ei->i_da_metadata_calc_len % idxs) == 0)
- num++;
- if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
- num++;
- if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
- num++;
- ei->i_da_metadata_calc_len = 0;
- } else
- ei->i_da_metadata_calc_len++;
- ei->i_da_metadata_calc_last_lblock++;
- return num;
- }
-
- /*
- * In the worst case we need a new set of index blocks at
- * every level of the inode's extent tree.
- */
- ei->i_da_metadata_calc_len = 1;
- ei->i_da_metadata_calc_last_lblock = lblock;
- return ext_depth(inode) + 1;
-}
-
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
@@ -492,6 +449,7 @@ static int __ext4_ext_check(const char *function, unsigned int line,
return 0;
corrupted:
+ ext4_set_errno(inode->i_sb, -err);
ext4_error_inode(inode, function, line, 0,
"pblk %llu bad header/extent: %s - magic %x, "
"entries %u, max %u(%u), depth %u(%u)",
@@ -510,6 +468,30 @@ int ext4_ext_check_inode(struct inode *inode)
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
}
+static void ext4_cache_extents(struct inode *inode,
+ struct ext4_extent_header *eh)
+{
+ struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
+ ext4_lblk_t prev = 0;
+ int i;
+
+ for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
+ unsigned int status = EXTENT_STATUS_WRITTEN;
+ ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
+ int len = ext4_ext_get_actual_len(ex);
+
+ if (prev && (prev != lblk))
+ ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
+ EXTENT_STATUS_HOLE);
+
+ if (ext4_ext_is_unwritten(ex))
+ status = EXTENT_STATUS_UNWRITTEN;
+ ext4_es_cache_extent(inode, lblk, len,
+ ext4_ext_pblock(ex), status);
+ prev = lblk + len;
+ }
+}
+
static struct buffer_head *
__read_extent_tree_block(const char *function, unsigned int line,
struct inode *inode, ext4_fsblk_t pblk, int depth,
@@ -544,26 +526,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
*/
if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
struct ext4_extent_header *eh = ext_block_hdr(bh);
- struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
- ext4_lblk_t prev = 0;
- int i;
-
- for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
- unsigned int status = EXTENT_STATUS_WRITTEN;
- ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
- int len = ext4_ext_get_actual_len(ex);
-
- if (prev && (prev != lblk))
- ext4_es_cache_extent(inode, prev,
- lblk - prev, ~0,
- EXTENT_STATUS_HOLE);
-
- if (ext4_ext_is_unwritten(ex))
- status = EXTENT_STATUS_UNWRITTEN;
- ext4_es_cache_extent(inode, lblk, len,
- ext4_ext_pblock(ex), status);
- prev = lblk + len;
- }
+ ext4_cache_extents(inode, eh);
}
return bh;
errout:
@@ -649,8 +612,9 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
ext_debug("path:");
for (k = 0; k <= l; k++, path++) {
if (path->p_idx) {
- ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
- ext4_idx_pblock(path->p_idx));
+ ext_debug(" %d->%llu",
+ le32_to_cpu(path->p_idx->ei_block),
+ ext4_idx_pblock(path->p_idx));
} else if (path->p_ext) {
ext_debug(" %d:[%d]%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
@@ -731,11 +695,12 @@ void ext4_ext_drop_refs(struct ext4_ext_path *path)
if (!path)
return;
depth = path->p_depth;
- for (i = 0; i <= depth; i++, path++)
+ for (i = 0; i <= depth; i++, path++) {
if (path->p_bh) {
brelse(path->p_bh);
path->p_bh = NULL;
}
+ }
}
/*
@@ -777,8 +742,8 @@ ext4_ext_binsearch_idx(struct inode *inode,
chix = ix = EXT_FIRST_INDEX(eh);
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
- if (k != 0 &&
- le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
+ if (k != 0 && le32_to_cpu(ix->ei_block) <=
+ le32_to_cpu(ix[-1].ei_block)) {
printk(KERN_DEBUG "k=%d, ix=0x%p, "
"first=0x%p\n", k,
ix, EXT_FIRST_INDEX(eh));
@@ -911,6 +876,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
path[0].p_bh = NULL;
i = depth;
+ if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
+ ext4_cache_extents(inode, eh);
/* walk through the tree */
while (i) {
ext_debug("depth %d: num %d, max %d\n",
@@ -1632,17 +1599,16 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
return EXT_MAX_BLOCKS;
while (depth >= 0) {
+ struct ext4_ext_path *p = &path[depth];
+
if (depth == path->p_depth) {
/* leaf */
- if (path[depth].p_ext &&
- path[depth].p_ext !=
- EXT_LAST_EXTENT(path[depth].p_hdr))
- return le32_to_cpu(path[depth].p_ext[1].ee_block);
+ if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
+ return le32_to_cpu(p->p_ext[1].ee_block);
} else {
/* index */
- if (path[depth].p_idx !=
- EXT_LAST_INDEX(path[depth].p_hdr))
- return le32_to_cpu(path[depth].p_idx[1].ei_block);
+ if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
+ return le32_to_cpu(p->p_idx[1].ei_block);
}
depth--;
}
@@ -1742,9 +1708,9 @@ static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
return err;
}
-int
-ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
- struct ext4_extent *ex2)
+static int ext4_can_extents_be_merged(struct inode *inode,
+ struct ext4_extent *ex1,
+ struct ext4_extent *ex2)
{
unsigned short ext1_ee_len, ext2_ee_len;
@@ -1758,11 +1724,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
le32_to_cpu(ex2->ee_block))
return 0;
- /*
- * To allow future support for preallocated extents to be added
- * as an RO_COMPAT feature, refuse to merge to extents if
- * this can result in the top bit of ee_len being set.
- */
if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
return 0;
@@ -1870,13 +1831,14 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
}
/*
- * This function tries to merge the @ex extent to neighbours in the tree.
- * return 1 if merge left else 0.
+ * This function tries to merge the @ex extent to neighbours in the tree, then
+ * tries to collapse the extent tree into the inode.
*/
static void ext4_ext_try_to_merge(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path,
- struct ext4_extent *ex) {
+ struct ext4_extent *ex)
+{
struct ext4_extent_header *eh;
unsigned int depth;
int merge_done = 0;
@@ -3718,9 +3680,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
max_zeroout = sbi->s_extent_max_zeroout_kb >>
(inode->i_sb->s_blocksize_bits - 10);
- if (IS_ENCRYPTED(inode))
- max_zeroout = 0;
-
/*
* five cases:
* 1. split the extent into three extents.
@@ -4706,6 +4665,10 @@ retry:
return ret > 0 ? ret2 : ret;
}
+static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
+
+static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
+
static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
{
@@ -4723,9 +4686,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
trace_ext4_zero_range(inode, offset, len, mode);
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
-
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
@@ -4765,7 +4725,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- (offset + len > i_size_read(inode) ||
+ (offset + len > inode->i_size ||
offset + len > EXT4_I(inode)->i_disksize)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
@@ -4849,7 +4809,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
* Mark that we allocate beyond EOF so the subsequent truncate
* can proceed even if the new size is the same as i_size.
*/
- if ((offset + len) > i_size_read(inode))
+ if (offset + len > inode->i_size)
ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
@@ -4890,14 +4850,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
* range since we would need to re-encrypt blocks with a
* different IV or XTS tweak (which are based on the logical
* block number).
- *
- * XXX It's not clear why zero range isn't working, but we'll
- * leave it disabled for encrypted inodes for now. This is a
- * bug we should fix....
*/
if (IS_ENCRYPTED(inode) &&
- (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE |
- FALLOC_FL_ZERO_RANGE)))
+ (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
/* Return error if mode is not supported */
@@ -4941,7 +4896,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- (offset + len > i_size_read(inode) ||
+ (offset + len > inode->i_size ||
offset + len > EXT4_I(inode)->i_disksize)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
@@ -5268,7 +5223,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
{
int depth, err = 0;
struct ext4_extent *ex_start, *ex_last;
- bool update = 0;
+ bool update = false;
depth = path->p_depth;
while (depth >= 0) {
@@ -5284,7 +5239,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
goto out;
if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
- update = 1;
+ update = true;
while (ex_start <= ex_last) {
if (SHIFT == SHIFT_LEFT) {
@@ -5472,7 +5427,7 @@ out:
* This implements the fallocate's collapse range functionality for ext4
* Returns: 0 and non-zero on error.
*/
-int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t punch_start, punch_stop;
@@ -5489,12 +5444,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return -EOPNOTSUPP;
- /* Collapse range works only on fs block size aligned offsets. */
- if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
- len & (EXT4_CLUSTER_SIZE(sb) - 1))
- return -EINVAL;
-
- if (!S_ISREG(inode->i_mode))
+ /* Collapse range works only on fs cluster size aligned regions. */
+ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
return -EINVAL;
trace_ext4_collapse_range(inode, offset, len);
@@ -5514,7 +5465,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
*/
- if (offset + len >= i_size_read(inode)) {
+ if (offset + len >= inode->i_size) {
ret = -EINVAL;
goto out_mutex;
}
@@ -5592,7 +5543,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
goto out_stop;
}
- new_size = i_size_read(inode) - len;
+ new_size = inode->i_size - len;
i_size_write(inode, new_size);
EXT4_I(inode)->i_disksize = new_size;
@@ -5620,7 +5571,7 @@ out_mutex:
* by len bytes.
* Returns 0 on success, error otherwise.
*/
-int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
struct super_block *sb = inode->i_sb;
handle_t *handle;
@@ -5639,14 +5590,10 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return -EOPNOTSUPP;
- /* Insert range works only on fs block size aligned offsets. */
- if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
- len & (EXT4_CLUSTER_SIZE(sb) - 1))
+ /* Insert range works only on fs cluster size aligned regions. */
+ if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
return -EINVAL;
- if (!S_ISREG(inode->i_mode))
- return -EOPNOTSUPP;
-
trace_ext4_insert_range(inode, offset, len);
offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
@@ -5666,14 +5613,14 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
goto out_mutex;
}
- /* Check for wrap through zero */
- if (inode->i_size + len > inode->i_sb->s_maxbytes) {
+ /* Check whether the maximum file size would be exceeded */
+ if (len > inode->i_sb->s_maxbytes - inode->i_size) {
ret = -EFBIG;
goto out_mutex;
}
- /* Offset should be less than i_size */
- if (offset >= i_size_read(inode)) {
+ /* Offset must be less than i_size */
+ if (offset >= inode->i_size) {
ret = -EINVAL;
goto out_mutex;
}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 825313c59752..4ec30a798260 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -209,6 +209,12 @@ static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
return es->es_pblk & ~ES_MASK;
}
+static inline ext4_fsblk_t ext4_es_show_pblock(struct extent_status *es)
+{
+ ext4_fsblk_t pblock = ext4_es_pblock(es);
+ return pblock == ~ES_MASK ? 0 : pblock;
+}
+
static inline void ext4_es_store_pblock(struct extent_status *es,
ext4_fsblk_t pb)
{
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6a7293a5cda2..5f225881176b 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -88,9 +88,10 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
- if (!inode_trylock_shared(inode)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock_shared(inode))
return -EAGAIN;
+ } else {
inode_lock_shared(inode);
}
/*
@@ -165,19 +166,25 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
* threads are at work on the same unwritten block, they must be synchronized
* or one thread will zero the other's data, causing corruption.
*/
-static int
-ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
+static bool
+ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
{
struct super_block *sb = inode->i_sb;
- int blockmask = sb->s_blocksize - 1;
-
- if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
- return 0;
+ unsigned long blockmask = sb->s_blocksize - 1;
if ((pos | iov_iter_alignment(from)) & blockmask)
- return 1;
+ return true;
- return 0;
+ return false;
+}
+
+static bool
+ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
+{
+ if (offset + len > i_size_read(inode) ||
+ offset + len > EXT4_I(inode)->i_disksize)
+ return true;
+ return false;
}
/* Is IO overwriting allocated and initialized blocks? */
@@ -203,7 +210,8 @@ static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
}
-static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
+ struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
@@ -227,11 +235,21 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
}
+ return iov_iter_count(from);
+}
+
+static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+ ssize_t ret, count;
+
+ count = ext4_generic_write_checks(iocb, from);
+ if (count <= 0)
+ return count;
+
ret = file_modified(iocb->ki_filp);
if (ret)
return ret;
-
- return iov_iter_count(from);
+ return count;
}
static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
@@ -363,62 +381,137 @@ static const struct iomap_dio_ops ext4_dio_write_ops = {
.end_io = ext4_dio_write_end_io,
};
+/*
+ * The intention here is to start with shared lock acquired then see if any
+ * condition requires an exclusive inode lock. If yes, then we restart the
+ * whole operation by releasing the shared lock and acquiring exclusive lock.
+ *
+ * - For unaligned_io we never take shared lock as it may cause data corruption
+ * when two unaligned IO tries to modify the same block e.g. while zeroing.
+ *
+ * - For extending writes case we don't take the shared lock, since it requires
+ * updating inode i_disksize and/or orphan handling with exclusive lock.
+ *
+ * - shared locking will only be true mostly with overwrites. Otherwise we will
+ * switch to exclusive i_rwsem lock.
+ */
+static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
+ bool *ilock_shared, bool *extend)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ loff_t offset;
+ size_t count;
+ ssize_t ret;
+
+restart:
+ ret = ext4_generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out;
+
+ offset = iocb->ki_pos;
+ count = ret;
+ if (ext4_extending_io(inode, offset, count))
+ *extend = true;
+ /*
+ * Determine whether the IO operation will overwrite allocated
+ * and initialized blocks.
+ * We need exclusive i_rwsem for changing security info
+ * in file_modified().
+ */
+ if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
+ !ext4_overwrite_io(inode, offset, count))) {
+ inode_unlock_shared(inode);
+ *ilock_shared = false;
+ inode_lock(inode);
+ goto restart;
+ }
+
+ ret = file_modified(file);
+ if (ret < 0)
+ goto out;
+
+ return count;
+out:
+ if (*ilock_shared)
+ inode_unlock_shared(inode);
+ else
+ inode_unlock(inode);
+ return ret;
+}
+
static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
ssize_t ret;
- size_t count;
- loff_t offset;
handle_t *handle;
struct inode *inode = file_inode(iocb->ki_filp);
- bool extend = false, overwrite = false, unaligned_aio = false;
+ loff_t offset = iocb->ki_pos;
+ size_t count = iov_iter_count(from);
+ const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
+ bool extend = false, unaligned_io = false;
+ bool ilock_shared = true;
+
+ /*
+ * We initially start with shared inode lock unless it is
+ * unaligned IO which needs exclusive lock anyways.
+ */
+ if (ext4_unaligned_io(inode, from, offset)) {
+ unaligned_io = true;
+ ilock_shared = false;
+ }
+ /*
+ * Quick check here without any i_rwsem lock to see if it is extending
+ * IO. A more reliable check is done in ext4_dio_write_checks() with
+ * proper locking in place.
+ */
+ if (offset + count > i_size_read(inode))
+ ilock_shared = false;
if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!inode_trylock(inode))
- return -EAGAIN;
+ if (ilock_shared) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ } else {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ }
} else {
- inode_lock(inode);
+ if (ilock_shared)
+ inode_lock_shared(inode);
+ else
+ inode_lock(inode);
}
+ /* Fallback to buffered I/O if the inode does not support direct I/O. */
if (!ext4_dio_supported(inode)) {
- inode_unlock(inode);
- /*
- * Fallback to buffered I/O if the inode does not support
- * direct I/O.
- */
+ if (ilock_shared)
+ inode_unlock_shared(inode);
+ else
+ inode_unlock(inode);
return ext4_buffered_write_iter(iocb, from);
}
- ret = ext4_write_checks(iocb, from);
- if (ret <= 0) {
- inode_unlock(inode);
+ ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
+ if (ret <= 0)
return ret;
- }
- /*
- * Unaligned asynchronous direct I/O must be serialized among each
- * other as the zeroing of partial blocks of two competing unaligned
- * asynchronous direct I/O writes can result in data corruption.
- */
offset = iocb->ki_pos;
- count = iov_iter_count(from);
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
- !is_sync_kiocb(iocb) && ext4_unaligned_aio(inode, from, offset)) {
- unaligned_aio = true;
- inode_dio_wait(inode);
- }
+ count = ret;
/*
- * Determine whether the I/O will overwrite allocated and initialized
- * blocks. If so, check to see whether it is possible to take the
- * dioread_nolock path.
+ * Unaligned direct IO must be serialized among each other as zeroing
+ * of partial blocks of two competing unaligned IOs can result in data
+ * corruption.
+ *
+ * So we make sure we don't allow any unaligned IO in flight.
+ * For IOs where we need not wait (like unaligned non-AIO DIO),
+ * below inode_dio_wait() may anyway become a no-op, since we start
+ * with exclusive lock.
*/
- if (!unaligned_aio && ext4_overwrite_io(inode, offset, count) &&
- ext4_should_dioread_nolock(inode)) {
- overwrite = true;
- downgrade_write(&inode->i_rwsem);
- }
+ if (unaligned_io)
+ inode_dio_wait(inode);
- if (offset + count > EXT4_I(inode)->i_disksize) {
+ if (extend) {
handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@@ -431,18 +524,19 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
- extend = true;
ext4_journal_stop(handle);
}
- ret = iomap_dio_rw(iocb, from, &ext4_iomap_ops, &ext4_dio_write_ops,
- is_sync_kiocb(iocb) || unaligned_aio || extend);
+ if (ilock_shared)
+ iomap_ops = &ext4_iomap_overwrite_ops;
+ ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
+ is_sync_kiocb(iocb) || unaligned_io || extend);
if (extend)
ret = ext4_handle_inode_extension(inode, offset, ret, count);
out:
- if (overwrite)
+ if (ilock_shared)
inode_unlock_shared(inode);
else
inode_unlock(inode);
@@ -487,9 +581,10 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
bool extend = false;
struct inode *inode = file_inode(iocb->ki_filp);
- if (!inode_trylock(inode)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
return -EAGAIN;
+ } else {
inode_lock(inode);
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 8ca4a23129aa..c66e8f9451a2 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -94,7 +94,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
goto verified;
blk = ext4_inode_bitmap(sb, desc);
if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
- EXT4_INODES_PER_GROUP(sb) / 8)) {
+ EXT4_INODES_PER_GROUP(sb) / 8) ||
+ ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
"inode_bitmap = %llu", block_group, blk);
@@ -192,8 +193,10 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
get_bh(bh);
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
+ ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
if (!buffer_uptodate(bh)) {
put_bh(bh);
+ ext4_set_errno(sb, EIO);
ext4_error(sb, "Cannot read inode bitmap - "
"block_group = %u, inode_bitmap = %llu",
block_group, bitmap_blk);
@@ -1223,6 +1226,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ ext4_set_errno(sb, -err);
ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
ino, err);
return inode;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 3a4ab70fe9e0..569fc68e8975 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -660,32 +660,6 @@ out:
}
/*
- * Calculate the number of metadata blocks need to reserve
- * to allocate a new block at @lblocks for non extent file based file
- */
-int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
- int blk_bits;
-
- if (lblock < EXT4_NDIR_BLOCKS)
- return 0;
-
- lblock -= EXT4_NDIR_BLOCKS;
-
- if (ei->i_da_metadata_calc_len &&
- (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
- ei->i_da_metadata_calc_len++;
- return 0;
- }
- ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
- ei->i_da_metadata_calc_len = 1;
- blk_bits = order_base_2(lblock);
- return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
-}
-
-/*
* Calculate number of indirect blocks touched by mapping @nrblocks logically
* contiguous blocks
*/
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 2fec62d764fa..fad82d08fca5 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -98,6 +98,7 @@ int ext4_get_max_inline_size(struct inode *inode)
error = ext4_get_inode_loc(inode, &iloc);
if (error) {
+ ext4_set_errno(inode->i_sb, -error);
ext4_error_inode(inode, __func__, __LINE__, 0,
"can't get inode location %lu",
inode->i_ino);
@@ -849,7 +850,7 @@ out:
/*
* Prepare the write for the inline data.
- * If the the data can be written into the inode, we just read
+ * If the data can be written into the inode, we just read
* the page and make it uptodate, and start the journal.
* Otherwise read the page, makes it dirty so that it can be
* handle in writepages(the i_disksize update is left to the
@@ -1761,6 +1762,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
err = ext4_get_inode_loc(dir, &iloc);
if (err) {
+ ext4_set_errno(dir->i_sb, -err);
EXT4_ERROR_INODE(dir, "error %d getting inode %lu block",
err, dir->i_ino);
return true;
diff --git a/fs/ext4/inode-test.c b/fs/ext4/inode-test.c
index bbce1c328d85..d62d802c9c12 100644
--- a/fs/ext4/inode-test.c
+++ b/fs/ext4/inode-test.c
@@ -269,4 +269,6 @@ static struct kunit_suite ext4_inode_test_suite = {
.test_cases = ext4_inode_test_cases,
};
-kunit_test_suite(ext4_inode_test_suite);
+kunit_test_suites(&ext4_inode_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 629a25d999f0..e60aca791d3f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -48,8 +48,6 @@
#include <trace/events/ext4.h>
-#define MPAGE_DA_EXTENT_TAIL 0x01
-
static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
@@ -271,6 +269,7 @@ void ext4_evict_inode(struct inode *inode)
if (inode->i_blocks) {
err = ext4_truncate(inode);
if (err) {
+ ext4_set_errno(inode->i_sb, -err);
ext4_error(inode->i_sb,
"couldn't truncate inode %lu (err %d)",
inode->i_ino, err);
@@ -402,7 +401,7 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
{
int ret;
- if (IS_ENCRYPTED(inode))
+ if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
return fscrypt_zeroout_range(inode, lblk, pblk, len);
ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
@@ -2478,10 +2477,12 @@ update_disksize:
EXT4_I(inode)->i_disksize = disksize;
up_write(&EXT4_I(inode)->i_data_sem);
err2 = ext4_mark_inode_dirty(handle, inode);
- if (err2)
+ if (err2) {
+ ext4_set_errno(inode->i_sb, -err2);
ext4_error(inode->i_sb,
"Failed to mark inode %lu dirty",
inode->i_ino);
+ }
if (!err)
err = err2;
}
@@ -2866,7 +2867,7 @@ static int ext4_dax_writepages(struct address_space *mapping,
percpu_down_read(&sbi->s_journal_flag_rwsem);
trace_ext4_writepages(inode, wbc);
- ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
+ ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
percpu_up_read(&sbi->s_journal_flag_rwsem);
@@ -3448,6 +3449,22 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
return 0;
}
+static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
+ loff_t length, unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ int ret;
+
+ /*
+ * Even for writes we don't need to allocate blocks, so just pretend
+ * we are reading to save overhead of starting a transaction.
+ */
+ flags &= ~IOMAP_WRITE;
+ ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
+ WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
+ return ret;
+}
+
static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
ssize_t written, unsigned flags, struct iomap *iomap)
{
@@ -3469,6 +3486,11 @@ const struct iomap_ops ext4_iomap_ops = {
.iomap_end = ext4_iomap_end,
};
+const struct iomap_ops ext4_iomap_overwrite_ops = {
+ .iomap_begin = ext4_iomap_overwrite_begin,
+ .iomap_end = ext4_iomap_end,
+};
+
static bool ext4_iomap_is_delalloc(struct inode *inode,
struct ext4_map_blocks *map)
{
@@ -3701,8 +3723,12 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
- WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
- page, blocksize, bh_offset(bh)));
+ err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
+ bh_offset(bh));
+ if (err) {
+ clear_buffer_uptodate(bh);
+ goto unlock;
+ }
}
}
if (ext4_should_journal_data(inode)) {
@@ -3912,9 +3938,6 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
unsigned int credits;
int ret = 0;
- if (!S_ISREG(inode->i_mode))
- return -EOPNOTSUPP;
-
trace_ext4_punch_hole(inode, offset, length, 0);
ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
@@ -4240,6 +4263,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
bh = sb_getblk(sb, block);
if (unlikely(!bh))
return -ENOMEM;
+ if (ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO))
+ goto simulate_eio;
if (!buffer_uptodate(bh)) {
lock_buffer(bh);
@@ -4338,6 +4363,8 @@ make_io:
blk_finish_plug(&plug);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
+ simulate_eio:
+ ext4_set_errno(inode->i_sb, EIO);
EXT4_ERROR_INODE_BLOCK(inode, block,
"unable to read itable block");
brelse(bh);
@@ -4551,7 +4578,9 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
sizeof(gen));
}
- if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
+ if (!ext4_inode_csum_verify(inode, raw_inode, ei) ||
+ ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) {
+ ext4_set_errno(inode->i_sb, EFSBADCRC);
ext4_error_inode(inode, function, line, 0,
"iget: checksum invalid");
ret = -EFSBADCRC;
@@ -4615,6 +4644,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = -EFSCORRUPTED;
goto bad_inode;
}
+ /*
+ * If dir_index is not enabled but there's dir with INDEX flag set,
+ * we'd normally treat htree data as empty space. But with metadata
+ * checksumming that corrupts checksums so forbid that.
+ */
+ if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
+ ext4_error_inode(inode, function, line, 0,
+ "iget: Dir with htree data on filesystem without dir_index feature.");
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
@@ -5090,6 +5131,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
+ ext4_set_errno(inode->i_sb, EIO);
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
"IO error syncing inode");
err = -EIO;
@@ -5368,7 +5410,8 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int flags;
- if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
+ if ((request_mask & STATX_BTIME) &&
+ EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = ei->i_crtime.tv_sec;
stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index e8870fff8224..a0ec750018dd 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1377,6 +1377,8 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case EXT4_IOC_CLEAR_ES_CACHE:
case EXT4_IOC_GETSTATE:
case EXT4_IOC_GET_ES_CACHE:
+ case EXT4_IOC_FSGETXATTR:
+ case EXT4_IOC_FSSETXATTR:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index a3e2767bdf2f..f64838187559 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3895,6 +3895,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh)) {
err = PTR_ERR(bitmap_bh);
+ ext4_set_errno(sb, -err);
ext4_error(sb, "Error %d reading block bitmap for %u",
err, group);
return 0;
@@ -4063,6 +4064,7 @@ repeat:
err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL);
if (err) {
+ ext4_set_errno(sb, -err);
ext4_error(sb, "Error %d loading buddy information for %u",
err, group);
continue;
@@ -4071,6 +4073,7 @@ repeat:
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh)) {
err = PTR_ERR(bitmap_bh);
+ ext4_set_errno(sb, -err);
ext4_error(sb, "Error %d reading block bitmap for %u",
err, group);
ext4_mb_unload_buddy(&e4b);
@@ -4325,6 +4328,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL);
if (err) {
+ ext4_set_errno(sb, -err);
ext4_error(sb, "Error %d loading buddy information for %u",
err, group);
continue;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 2305b4374fd3..87f7551c5132 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
{
__ext4_warning(sb, function, line, "%s", msg);
__ext4_warning(sb, function, line,
- "MMP failure info: last update time: %llu, last update "
- "node: %s, last update device: %s",
- (long long unsigned int) le64_to_cpu(mmp->mmp_time),
- mmp->mmp_nodename, mmp->mmp_bdevname);
+ "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
+ (unsigned long long)le64_to_cpu(mmp->mmp_time),
+ (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
+ (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
}
/*
@@ -154,6 +154,7 @@ static int kmmpd(void *data)
mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
EXT4_MMP_MIN_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
+ BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
bdevname(bh->b_bdev, mmp->mmp_bdevname);
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
@@ -173,8 +174,10 @@ static int kmmpd(void *data)
* (s_mmp_update_interval * 60) seconds.
*/
if (retval) {
- if ((failed_writes % 60) == 0)
+ if ((failed_writes % 60) == 0) {
+ ext4_set_errno(sb, -retval);
ext4_error(sb, "Error writing to MMP block");
+ }
failed_writes++;
}
@@ -205,6 +208,7 @@ static int kmmpd(void *data)
retval = read_mmp_block(sb, &bh_check, mmp_block);
if (retval) {
+ ext4_set_errno(sb, -retval);
ext4_error(sb, "error reading MMP data: %d",
retval);
goto exit_thread;
@@ -218,6 +222,7 @@ static int kmmpd(void *data)
"Error while updating MMP info. "
"The filesystem seems to have been"
" multiply mounted.");
+ ext4_set_errno(sb, EBUSY);
ext4_error(sb, "abort");
put_bh(bh_check);
retval = -EBUSY;
@@ -375,7 +380,8 @@ skip:
/*
* Start a kernel thread to update the MMP block periodically.
*/
- EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
+ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
+ (int)sizeof(mmp->mmp_bdevname),
bdevname(bh->b_bdev,
mmp->mmp_bdevname));
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 1cb42d940784..ceff4b4b1877 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -109,7 +109,10 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
struct ext4_dir_entry *dirent;
int is_dx_block = 0;
- bh = ext4_bread(NULL, inode, block, 0);
+ if (ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_EIO))
+ bh = ERR_PTR(-EIO);
+ else
+ bh = ext4_bread(NULL, inode, block, 0);
if (IS_ERR(bh)) {
__ext4_warning(inode->i_sb, func, line,
"inode #%lu: lblock %lu: comm %s: "
@@ -153,9 +156,11 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
* caller is sure it should be an index block.
*/
if (is_dx_block && type == INDEX) {
- if (ext4_dx_csum_verify(inode, dirent))
+ if (ext4_dx_csum_verify(inode, dirent) &&
+ !ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC))
set_buffer_verified(bh);
else {
+ ext4_set_errno(inode->i_sb, EFSBADCRC);
ext4_error_inode(inode, func, line, block,
"Directory index failed checksum");
brelse(bh);
@@ -163,9 +168,11 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
}
}
if (!is_dx_block) {
- if (ext4_dirblock_csum_verify(inode, bh))
+ if (ext4_dirblock_csum_verify(inode, bh) &&
+ !ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC))
set_buffer_verified(bh);
else {
+ ext4_set_errno(inode->i_sb, EFSBADCRC);
ext4_error_inode(inode, func, line, block,
"Directory block failed checksum");
brelse(bh);
@@ -1002,7 +1009,6 @@ static int htree_dirblock_to_tree(struct file *dir_file,
top = (struct ext4_dir_entry_2 *) ((char *) de +
dir->i_sb->s_blocksize -
EXT4_DIR_REC_LEN(0));
-#ifdef CONFIG_FS_ENCRYPTION
/* Check if the directory is encrypted */
if (IS_ENCRYPTED(dir)) {
err = fscrypt_get_encryption_info(dir);
@@ -1017,7 +1023,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
return err;
}
}
-#endif
+
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
bh->b_data, bh->b_size,
@@ -1065,9 +1071,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
}
errout:
brelse(bh);
-#ifdef CONFIG_FS_ENCRYPTION
fscrypt_fname_free_buffer(&fname_crypto_str);
-#endif
return count;
}
@@ -1527,6 +1531,7 @@ restart:
goto next;
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
+ ext4_set_errno(sb, EIO);
EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
(unsigned long) block);
brelse(bh);
@@ -1537,6 +1542,7 @@ restart:
!is_dx_internal_node(dir, block,
(struct ext4_dir_entry *)bh->b_data) &&
!ext4_dirblock_csum_verify(dir, bh)) {
+ ext4_set_errno(sb, EFSBADCRC);
EXT4_ERROR_INODE(dir, "checksumming directory "
"block %lu", (unsigned long)block);
brelse(bh);
@@ -2207,6 +2213,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
retval = ext4_dx_add_entry(handle, &fname, dir, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
goto out;
+ /* Can we just ignore htree data? */
+ if (ext4_has_metadata_csum(sb)) {
+ EXT4_ERROR_INODE(dir,
+ "Directory has corrupted htree index.");
+ retval = -EFSCORRUPTED;
+ goto out;
+ }
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 24aeedb8fc75..68b39e75446a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -512,17 +512,26 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_t gfp_flags = GFP_NOFS;
unsigned int enc_bytes = round_up(len, i_blocksize(inode));
+ /*
+ * Since bounce page allocation uses a mempool, we can only use
+ * a waiting mask (i.e. request guaranteed allocation) on the
+ * first page of the bio. Otherwise it can deadlock.
+ */
+ if (io->io_bio)
+ gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
retry_encrypt:
bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
- if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
- if (io->io_bio) {
+ if (ret == -ENOMEM &&
+ (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
+ gfp_flags = GFP_NOFS;
+ if (io->io_bio)
ext4_io_submit(io);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- }
- gfp_flags |= __GFP_NOFAIL;
+ else
+ gfp_flags |= __GFP_NOFAIL;
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry_encrypt;
}
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index fef7755300c3..c1769afbf799 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -57,6 +57,7 @@ enum bio_post_read_step {
STEP_INITIAL = 0,
STEP_DECRYPT,
STEP_VERITY,
+ STEP_MAX,
};
struct bio_post_read_ctx {
@@ -106,10 +107,22 @@ static void verity_work(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
+ struct bio *bio = ctx->bio;
- fsverity_verify_bio(ctx->bio);
+ /*
+ * fsverity_verify_bio() may call readpages() again, and although verity
+ * will be disabled for that, decryption may still be needed, causing
+ * another bio_post_read_ctx to be allocated. So to guarantee that
+ * mempool_alloc() never deadlocks we must free the current ctx first.
+ * This is safe because verity is the last post-read step.
+ */
+ BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
+ mempool_free(ctx, bio_post_read_ctx_pool);
+ bio->bi_private = NULL;
- bio_post_read_processing(ctx);
+ fsverity_verify_bio(bio);
+
+ __read_end_io(bio);
}
static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
@@ -176,12 +189,11 @@ static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
}
-static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
- struct bio *bio,
- pgoff_t first_idx)
+static void ext4_set_bio_post_read_ctx(struct bio *bio,
+ const struct inode *inode,
+ pgoff_t first_idx)
{
unsigned int post_read_steps = 0;
- struct bio_post_read_ctx *ctx = NULL;
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
post_read_steps |= 1 << STEP_DECRYPT;
@@ -190,14 +202,14 @@ static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
post_read_steps |= 1 << STEP_VERITY;
if (post_read_steps) {
- ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ /* Due to the mempool, this never fails. */
+ struct bio_post_read_ctx *ctx =
+ mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
+
ctx->bio = bio;
ctx->enabled_steps = post_read_steps;
bio->bi_private = ctx;
}
- return ctx;
}
static inline loff_t ext4_readpage_limit(struct inode *inode)
@@ -358,24 +370,16 @@ int ext4_mpage_readpages(struct address_space *mapping,
bio = NULL;
}
if (bio == NULL) {
- struct bio_post_read_ctx *ctx;
-
/*
* bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
*/
bio = bio_alloc(GFP_KERNEL,
min_t(int, nr_pages, BIO_MAX_PAGES));
- ctx = get_bio_post_read_ctx(inode, bio, page->index);
- if (IS_ERR(ctx)) {
- bio_put(bio);
- bio = NULL;
- goto set_error_page;
- }
+ ext4_set_bio_post_read_ctx(bio, inode, page->index);
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
- bio->bi_private = ctx;
bio_set_op_attrs(bio, REQ_OP_READ,
is_readahead ? REQ_RAHEAD : 0);
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index a8c0f2b5b6e1..86a2500ed292 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -824,9 +824,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
if (unlikely(err))
goto errout;
- n_group_desc = ext4_kvmalloc((gdb_num + 1) *
- sizeof(struct buffer_head *),
- GFP_NOFS);
+ n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
+ GFP_KERNEL);
if (!n_group_desc) {
err = -ENOMEM;
ext4_warning(sb, "not enough memory for %lu groups",
@@ -900,9 +899,8 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
gdb_bh = ext4_sb_bread(sb, gdblock, 0);
if (IS_ERR(gdb_bh))
return PTR_ERR(gdb_bh);
- n_group_desc = ext4_kvmalloc((gdb_num + 1) *
- sizeof(struct buffer_head *),
- GFP_NOFS);
+ n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
+ GFP_KERNEL);
if (!n_group_desc) {
brelse(gdb_bh);
err = -ENOMEM;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2937a8873fe1..f464dff09774 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -154,7 +154,7 @@ ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
if (bh == NULL)
return ERR_PTR(-ENOMEM);
- if (buffer_uptodate(bh))
+ if (ext4_buffer_uptodate(bh))
return bh;
ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
wait_on_buffer(bh);
@@ -204,26 +204,6 @@ void ext4_superblock_csum_set(struct super_block *sb)
es->s_checksum = ext4_superblock_csum(sb, es);
}
-void *ext4_kvmalloc(size_t size, gfp_t flags)
-{
- void *ret;
-
- ret = kmalloc(size, flags | __GFP_NOWARN);
- if (!ret)
- ret = __vmalloc(size, flags, PAGE_KERNEL);
- return ret;
-}
-
-void *ext4_kvzalloc(size_t size, gfp_t flags)
-{
- void *ret;
-
- ret = kzalloc(size, flags | __GFP_NOWARN);
- if (!ret)
- ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
- return ret;
-}
-
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
@@ -367,6 +347,8 @@ static void __save_error_info(struct super_block *sb, const char *func,
ext4_update_tstamp(es, s_last_error_time);
strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
es->s_last_error_line = cpu_to_le32(line);
+ if (es->s_last_error_errcode == 0)
+ es->s_last_error_errcode = EXT4_ERR_EFSCORRUPTED;
if (!es->s_first_error_time) {
es->s_first_error_time = es->s_last_error_time;
es->s_first_error_time_hi = es->s_last_error_time_hi;
@@ -375,6 +357,7 @@ static void __save_error_info(struct super_block *sb, const char *func,
es->s_first_error_line = cpu_to_le32(line);
es->s_first_error_ino = es->s_last_error_ino;
es->s_first_error_block = es->s_last_error_block;
+ es->s_first_error_errcode = es->s_last_error_errcode;
}
/*
* Start the daily error reporting function if it hasn't been
@@ -631,6 +614,66 @@ const char *ext4_decode_error(struct super_block *sb, int errno,
return errstr;
}
+void ext4_set_errno(struct super_block *sb, int err)
+{
+ if (err < 0)
+ err = -err;
+
+ switch (err) {
+ case EIO:
+ err = EXT4_ERR_EIO;
+ break;
+ case ENOMEM:
+ err = EXT4_ERR_ENOMEM;
+ break;
+ case EFSBADCRC:
+ err = EXT4_ERR_EFSBADCRC;
+ break;
+ case EFSCORRUPTED:
+ err = EXT4_ERR_EFSCORRUPTED;
+ break;
+ case ENOSPC:
+ err = EXT4_ERR_ENOSPC;
+ break;
+ case ENOKEY:
+ err = EXT4_ERR_ENOKEY;
+ break;
+ case EROFS:
+ err = EXT4_ERR_EROFS;
+ break;
+ case EFBIG:
+ err = EXT4_ERR_EFBIG;
+ break;
+ case EEXIST:
+ err = EXT4_ERR_EEXIST;
+ break;
+ case ERANGE:
+ err = EXT4_ERR_ERANGE;
+ break;
+ case EOVERFLOW:
+ err = EXT4_ERR_EOVERFLOW;
+ break;
+ case EBUSY:
+ err = EXT4_ERR_EBUSY;
+ break;
+ case ENOTDIR:
+ err = EXT4_ERR_ENOTDIR;
+ break;
+ case ENOTEMPTY:
+ err = EXT4_ERR_ENOTEMPTY;
+ break;
+ case ESHUTDOWN:
+ err = EXT4_ERR_ESHUTDOWN;
+ break;
+ case EFAULT:
+ err = EXT4_ERR_EFAULT;
+ break;
+ default:
+ err = EXT4_ERR_UNKNOWN;
+ }
+ EXT4_SB(sb)->s_es->s_last_error_errcode = err;
+}
+
/* __ext4_std_error decodes expected errors from journaling functions
* automatically and invokes the appropriate error response. */
@@ -655,6 +698,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
sb->s_id, function, line, errstr);
}
+ ext4_set_errno(sb, -errno);
save_error_info(sb, function, line);
ext4_handle_error(sb);
}
@@ -982,8 +1026,10 @@ static void ext4_put_super(struct super_block *sb)
aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
- if ((err < 0) && !aborted)
+ if ((err < 0) && !aborted) {
+ ext4_set_errno(sb, -err);
ext4_abort(sb, "Couldn't clean up the journal");
+ }
}
ext4_unregister_sysfs(sb);
@@ -1085,8 +1131,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_es_shk_nr = 0;
ei->i_es_shrink_lblk = 0;
ei->i_reserved_data_blocks = 0;
- ei->i_da_metadata_calc_len = 0;
- ei->i_da_metadata_calc_last_lblock = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
ext4_init_pending_tree(&ei->i_pending_tree);
#ifdef CONFIG_QUOTA
@@ -1548,6 +1592,7 @@ static const match_table_t tokens = {
{Opt_auto_da_alloc, "auto_da_alloc"},
{Opt_noauto_da_alloc, "noauto_da_alloc"},
{Opt_dioread_nolock, "dioread_nolock"},
+ {Opt_dioread_lock, "nodioread_nolock"},
{Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
@@ -2964,17 +3009,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0;
}
-#ifndef CONFIG_QUOTA
- if (ext4_has_feature_quota(sb) && !readonly) {
+#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
+ if (!readonly && (ext4_has_feature_quota(sb) ||
+ ext4_has_feature_project(sb))) {
ext4_msg(sb, KERN_ERR,
- "Filesystem with quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
- return 0;
- }
- if (ext4_has_feature_project(sb) && !readonly) {
- ext4_msg(sb, KERN_ERR,
- "Filesystem with project quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
+ "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
return 0;
}
#endif /* CONFIG_QUOTA */
@@ -3720,6 +3759,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_opt(sb, NO_UID32);
/* xattr user namespace & acls are now defaulted on */
set_opt(sb, XATTR_USER);
+ set_opt(sb, DIOREAD_NOLOCK);
#ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL);
#endif
@@ -3768,6 +3808,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ blocksize > EXT4_MAX_BLOCK_SIZE) {
+ ext4_msg(sb, KERN_ERR,
+ "Unsupported filesystem blocksize %d (%d log_block_size)",
+ blocksize, le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
+
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
@@ -3785,6 +3834,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_msg(sb, KERN_ERR,
"unsupported inode size: %d",
sbi->s_inode_size);
+ ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
goto failed_mount;
}
/*
@@ -3887,9 +3937,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
#endif
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
- printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
- "with data=journal disables delayed "
- "allocation and O_DIRECT support!\n");
+ printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support!\n");
+ clear_opt(sb, DIOREAD_NOLOCK);
if (test_opt2(sb, EXPLICIT_DELALLOC)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and delalloc");
@@ -3988,14 +4037,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
goto failed_mount;
- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
- if (blocksize < EXT4_MIN_BLOCK_SIZE ||
- blocksize > EXT4_MAX_BLOCK_SIZE) {
- ext4_msg(sb, KERN_ERR,
- "Unsupported filesystem blocksize %d (%d log_block_size)",
- blocksize, le32_to_cpu(es->s_log_block_size));
- goto failed_mount;
- }
if (le32_to_cpu(es->s_log_block_size) >
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
@@ -5540,9 +5581,12 @@ static int ext4_statfs_project(struct super_block *sb,
return PTR_ERR(dquot);
spin_lock(&dquot->dq_dqb_lock);
- limit = (dquot->dq_dqb.dqb_bsoftlimit ?
- dquot->dq_dqb.dqb_bsoftlimit :
- dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
+ limit = dquot->dq_dqb.dqb_bsoftlimit;
+ if (dquot->dq_dqb.dqb_bhardlimit &&
+ (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
+ limit = dquot->dq_dqb.dqb_bhardlimit;
+ limit >>= sb->s_blocksize_bits;
+
if (limit && buf->f_blocks > limit) {
curblock = (dquot->dq_dqb.dqb_curspace +
dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
@@ -5552,9 +5596,11 @@ static int ext4_statfs_project(struct super_block *sb,
(buf->f_blocks - curblock) : 0;
}
- limit = dquot->dq_dqb.dqb_isoftlimit ?
- dquot->dq_dqb.dqb_isoftlimit :
- dquot->dq_dqb.dqb_ihardlimit;
+ limit = dquot->dq_dqb.dqb_isoftlimit;
+ if (dquot->dq_dqb.dqb_ihardlimit &&
+ (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
+ limit = dquot->dq_dqb.dqb_ihardlimit;
+
if (limit && buf->f_files > limit) {
buf->f_files = limit;
buf->f_ffree =
@@ -5987,7 +6033,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
bh = ext4_bread(handle, inode, blk,
EXT4_GET_BLOCKS_CREATE |
EXT4_GET_BLOCKS_METADATA_NOFAIL);
- } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) &&
+ } while (PTR_ERR(bh) == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries));
if (IS_ERR(bh))
return PTR_ERR(bh);
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index eb1efad0e20a..d218ebdafa4a 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -29,6 +29,10 @@ typedef enum {
attr_last_error_time,
attr_feature,
attr_pointer_ui,
+ attr_pointer_ul,
+ attr_pointer_u64,
+ attr_pointer_u8,
+ attr_pointer_string,
attr_pointer_atomic,
attr_journal_task,
} attr_id_t;
@@ -46,6 +50,7 @@ struct ext4_attr {
struct attribute attr;
short attr_id;
short attr_ptr;
+ unsigned short attr_size;
union {
int offset;
void *explicit_ptr;
@@ -154,12 +159,35 @@ static struct ext4_attr ext4_attr_##_name = { \
}, \
}
+#define EXT4_ATTR_STRING(_name,_mode,_size,_struct,_elname) \
+static struct ext4_attr ext4_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .attr_id = attr_pointer_string, \
+ .attr_size = _size, \
+ .attr_ptr = ptr_##_struct##_offset, \
+ .u = { \
+ .offset = offsetof(struct _struct, _elname),\
+ }, \
+}
+
#define EXT4_RO_ATTR_ES_UI(_name,_elname) \
EXT4_ATTR_OFFSET(_name, 0444, pointer_ui, ext4_super_block, _elname)
+#define EXT4_RO_ATTR_ES_U8(_name,_elname) \
+ EXT4_ATTR_OFFSET(_name, 0444, pointer_u8, ext4_super_block, _elname)
+
+#define EXT4_RO_ATTR_ES_U64(_name,_elname) \
+ EXT4_ATTR_OFFSET(_name, 0444, pointer_u64, ext4_super_block, _elname)
+
+#define EXT4_RO_ATTR_ES_STRING(_name,_elname,_size) \
+ EXT4_ATTR_STRING(_name, 0444, _size, ext4_super_block, _elname)
+
#define EXT4_RW_ATTR_SBI_UI(_name,_elname) \
EXT4_ATTR_OFFSET(_name, 0644, pointer_ui, ext4_sb_info, _elname)
+#define EXT4_RW_ATTR_SBI_UL(_name,_elname) \
+ EXT4_ATTR_OFFSET(_name, 0644, pointer_ul, ext4_sb_info, _elname)
+
#define EXT4_ATTR_PTR(_name,_mode,_id,_ptr) \
static struct ext4_attr ext4_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
@@ -194,7 +222,20 @@ EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.int
EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
+#ifdef CONFIG_EXT4_DEBUG
+EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail);
+#endif
EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
+EXT4_RO_ATTR_ES_U8(first_error_errcode, s_first_error_errcode);
+EXT4_RO_ATTR_ES_U8(last_error_errcode, s_last_error_errcode);
+EXT4_RO_ATTR_ES_UI(first_error_ino, s_first_error_ino);
+EXT4_RO_ATTR_ES_UI(last_error_ino, s_last_error_ino);
+EXT4_RO_ATTR_ES_U64(first_error_block, s_first_error_block);
+EXT4_RO_ATTR_ES_U64(last_error_block, s_last_error_block);
+EXT4_RO_ATTR_ES_UI(first_error_line, s_first_error_line);
+EXT4_RO_ATTR_ES_UI(last_error_line, s_last_error_line);
+EXT4_RO_ATTR_ES_STRING(first_error_func, s_first_error_func, 32);
+EXT4_RO_ATTR_ES_STRING(last_error_func, s_last_error_func, 32);
EXT4_ATTR(first_error_time, 0444, first_error_time);
EXT4_ATTR(last_error_time, 0444, last_error_time);
EXT4_ATTR(journal_task, 0444, journal_task);
@@ -225,9 +266,22 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(msg_ratelimit_interval_ms),
ATTR_LIST(msg_ratelimit_burst),
ATTR_LIST(errors_count),
+ ATTR_LIST(first_error_ino),
+ ATTR_LIST(last_error_ino),
+ ATTR_LIST(first_error_block),
+ ATTR_LIST(last_error_block),
+ ATTR_LIST(first_error_line),
+ ATTR_LIST(last_error_line),
+ ATTR_LIST(first_error_func),
+ ATTR_LIST(last_error_func),
+ ATTR_LIST(first_error_errcode),
+ ATTR_LIST(last_error_errcode),
ATTR_LIST(first_error_time),
ATTR_LIST(last_error_time),
ATTR_LIST(journal_task),
+#ifdef CONFIG_EXT4_DEBUG
+ ATTR_LIST(simulate_fail),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(ext4);
@@ -280,7 +334,7 @@ static void *calc_ptr(struct ext4_attr *a, struct ext4_sb_info *sbi)
static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi)
{
- return snprintf(buf, PAGE_SIZE, "%lld",
+ return snprintf(buf, PAGE_SIZE, "%lld\n",
((time64_t)hi << 32) + le32_to_cpu(lo));
}
@@ -318,6 +372,30 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
else
return snprintf(buf, PAGE_SIZE, "%u\n",
*((unsigned int *) ptr));
+ case attr_pointer_ul:
+ if (!ptr)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%lu\n",
+ *((unsigned long *) ptr));
+ case attr_pointer_u8:
+ if (!ptr)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ *((unsigned char *) ptr));
+ case attr_pointer_u64:
+ if (!ptr)
+ return 0;
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ le64_to_cpup(ptr));
+ else
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ *((unsigned long long *) ptr));
+ case attr_pointer_string:
+ if (!ptr)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%.*s\n", a->attr_size,
+ (char *) ptr);
case attr_pointer_atomic:
if (!ptr)
return 0;
@@ -361,6 +439,14 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
else
*((unsigned int *) ptr) = t;
return len;
+ case attr_pointer_ul:
+ if (!ptr)
+ return 0;
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ *((unsigned long *) ptr) = t;
+ return len;
case attr_inode_readahead:
return inode_readahead_blks_store(sbi, buf, len);
case attr_trigger_test_error:
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index d0d8a9795dd6..dc5ec724d889 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -342,12 +342,55 @@ static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
return desc_size;
}
+/*
+ * Prefetch some pages from the file's Merkle tree.
+ *
+ * This is basically a stripped-down version of __do_page_cache_readahead()
+ * which works on pages past i_size.
+ */
+static void ext4_merkle_tree_readahead(struct address_space *mapping,
+ pgoff_t start_index, unsigned long count)
+{
+ LIST_HEAD(pages);
+ unsigned int nr_pages = 0;
+ struct page *page;
+ pgoff_t index;
+ struct blk_plug plug;
+
+ for (index = start_index; index < start_index + count; index++) {
+ page = xa_load(&mapping->i_pages, index);
+ if (!page || xa_is_value(page)) {
+ page = __page_cache_alloc(readahead_gfp_mask(mapping));
+ if (!page)
+ break;
+ page->index = index;
+ list_add(&page->lru, &pages);
+ nr_pages++;
+ }
+ }
+ blk_start_plug(&plug);
+ ext4_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
+ blk_finish_plug(&plug);
+}
+
static struct page *ext4_read_merkle_tree_page(struct inode *inode,
- pgoff_t index)
+ pgoff_t index,
+ unsigned long num_ra_pages)
{
+ struct page *page;
+
index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
- return read_mapping_page(inode->i_mapping, index, NULL);
+ page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
+ if (!page || !PageUptodate(page)) {
+ if (page)
+ put_page(page);
+ else if (num_ra_pages > 1)
+ ext4_merkle_tree_readahead(inode->i_mapping, index,
+ num_ra_pages);
+ page = read_mapping_page(inode->i_mapping, index, NULL);
+ }
+ return page;
}
static int ext4_write_merkle_tree_block(struct inode *inode, const void *buf,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 8966a5439a22..8cac7d95c3ad 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1456,7 +1456,7 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
if (!ce)
return NULL;
- ea_data = ext4_kvmalloc(value_len, GFP_NOFS);
+ ea_data = kvmalloc(value_len, GFP_KERNEL);
if (!ea_data) {
mb_cache_entry_put(ea_inode_cache, ce);
return NULL;
@@ -2879,9 +2879,11 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
if (IS_ERR(bh)) {
error = PTR_ERR(bh);
- if (error == -EIO)
+ if (error == -EIO) {
+ ext4_set_errno(inode->i_sb, EIO);
EXT4_ERROR_INODE(inode, "block %llu read error",
EXT4_I(inode)->i_file_acl);
+ }
bh = NULL;
goto cleanup;
}
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 652fd2e2b23d..f0faada30f30 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -6,6 +6,7 @@ config F2FS_FS
select CRYPTO
select CRYPTO_CRC32
select F2FS_FS_XATTR if FS_ENCRYPTION
+ select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
help
F2FS is based on Log-structured File System (LFS), which supports
versatile "flash-friendly" features. The design has been focused on
@@ -21,7 +22,7 @@ config F2FS_FS
config F2FS_STAT_FS
bool "F2FS Status Information"
- depends on F2FS_FS && DEBUG_FS
+ depends on F2FS_FS
default y
help
/sys/kernel/debug/f2fs/ contains information about all the partitions
@@ -92,3 +93,28 @@ config F2FS_FAULT_INJECTION
Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on.
If unsure, say N.
+
+config F2FS_FS_COMPRESSION
+ bool "F2FS compression feature"
+ depends on F2FS_FS
+ help
+ Enable filesystem-level compression on f2fs regular files,
+ multiple back-end compression algorithms are supported.
+
+config F2FS_FS_LZO
+ bool "LZO compression support"
+ depends on F2FS_FS_COMPRESSION
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ default y
+ help
+ Support LZO compress algorithm, if unsure, say Y.
+
+config F2FS_FS_LZ4
+ bool "LZ4 compression support"
+ depends on F2FS_FS_COMPRESSION
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+ default y
+ help
+ Support LZ4 compress algorithm, if unsure, say Y.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 2aaecc63834f..ee7316b42f69 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -9,3 +9,4 @@ f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
f2fs-$(CONFIG_FS_VERITY) += verity.o
+f2fs-$(CONFIG_F2FS_FS_COMPRESSION) += compress.o
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index ffdaba0c55d2..44e84ac5c941 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1509,10 +1509,10 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_wait_on_all_pages_writeback(sbi);
/*
- * invalidate intermediate page cache borrowed from meta inode
- * which are used for migration of encrypted inode's blocks.
+ * invalidate intermediate page cache borrowed from meta inode which are
+ * used for migration of encrypted or verity inode's blocks.
*/
- if (f2fs_sb_has_encrypt(sbi))
+ if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi))
invalidate_mapping_pages(META_MAPPING(sbi),
MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
new file mode 100644
index 000000000000..d8a64be90a50
--- /dev/null
+++ b/fs/f2fs/compress.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * f2fs compress support
+ *
+ * Copyright (c) 2019 Chao Yu <chao@kernel.org>
+ */
+
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/lzo.h>
+#include <linux/lz4.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include <trace/events/f2fs.h>
+
+struct f2fs_compress_ops {
+ int (*init_compress_ctx)(struct compress_ctx *cc);
+ void (*destroy_compress_ctx)(struct compress_ctx *cc);
+ int (*compress_pages)(struct compress_ctx *cc);
+ int (*decompress_pages)(struct decompress_io_ctx *dic);
+};
+
+static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
+{
+ return index & (cc->cluster_size - 1);
+}
+
+static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
+{
+ return index >> cc->log_cluster_size;
+}
+
+static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
+{
+ return cc->cluster_idx << cc->log_cluster_size;
+}
+
+bool f2fs_is_compressed_page(struct page *page)
+{
+ if (!PagePrivate(page))
+ return false;
+ if (!page_private(page))
+ return false;
+ if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
+ return false;
+ f2fs_bug_on(F2FS_M_SB(page->mapping),
+ *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
+ return true;
+}
+
+static void f2fs_set_compressed_page(struct page *page,
+ struct inode *inode, pgoff_t index, void *data, refcount_t *r)
+{
+ SetPagePrivate(page);
+ set_page_private(page, (unsigned long)data);
+
+ /* i_crypto_info and iv index */
+ page->index = index;
+ page->mapping = inode->i_mapping;
+ if (r)
+ refcount_inc(r);
+}
+
+static void f2fs_put_compressed_page(struct page *page)
+{
+ set_page_private(page, (unsigned long)NULL);
+ ClearPagePrivate(page);
+ page->mapping = NULL;
+ unlock_page(page);
+ put_page(page);
+}
+
+static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (!cc->rpages[i])
+ continue;
+ if (unlock)
+ unlock_page(cc->rpages[i]);
+ else
+ put_page(cc->rpages[i]);
+ }
+}
+
+static void f2fs_put_rpages(struct compress_ctx *cc)
+{
+ f2fs_drop_rpages(cc, cc->cluster_size, false);
+}
+
+static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
+{
+ f2fs_drop_rpages(cc, len, true);
+}
+
+static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
+ struct address_space *mapping,
+ pgoff_t start, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ struct page *page = find_get_page(mapping, start + i);
+
+ put_page(page);
+ put_page(page);
+ }
+}
+
+static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
+ struct writeback_control *wbc, bool redirty, int unlock)
+{
+ unsigned int i;
+
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (!cc->rpages[i])
+ continue;
+ if (redirty)
+ redirty_page_for_writepage(wbc, cc->rpages[i]);
+ f2fs_put_page(cc->rpages[i], unlock);
+ }
+}
+
+struct page *f2fs_compress_control_page(struct page *page)
+{
+ return ((struct compress_io_ctx *)page_private(page))->rpages[0];
+}
+
+int f2fs_init_compress_ctx(struct compress_ctx *cc)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+
+ if (cc->nr_rpages)
+ return 0;
+
+ cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+ cc->log_cluster_size, GFP_NOFS);
+ return cc->rpages ? 0 : -ENOMEM;
+}
+
+void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
+{
+ kfree(cc->rpages);
+ cc->rpages = NULL;
+ cc->nr_rpages = 0;
+ cc->nr_cpages = 0;
+ cc->cluster_idx = NULL_CLUSTER;
+}
+
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+{
+ unsigned int cluster_ofs;
+
+ if (!f2fs_cluster_can_merge_page(cc, page->index))
+ f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
+
+ cluster_ofs = offset_in_cluster(cc, page->index);
+ cc->rpages[cluster_ofs] = page;
+ cc->nr_rpages++;
+ cc->cluster_idx = cluster_idx(cc, page->index);
+}
+
+#ifdef CONFIG_F2FS_FS_LZO
+static int lzo_init_compress_ctx(struct compress_ctx *cc)
+{
+ cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+ LZO1X_MEM_COMPRESS, GFP_NOFS);
+ if (!cc->private)
+ return -ENOMEM;
+
+ cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
+ return 0;
+}
+
+static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
+{
+ kvfree(cc->private);
+ cc->private = NULL;
+}
+
+static int lzo_compress_pages(struct compress_ctx *cc)
+{
+ int ret;
+
+ ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
+ &cc->clen, cc->private);
+ if (ret != LZO_E_OK) {
+ printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
+ KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int lzo_decompress_pages(struct decompress_io_ctx *dic)
+{
+ int ret;
+
+ ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
+ dic->rbuf, &dic->rlen);
+ if (ret != LZO_E_OK) {
+ printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
+ KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
+ return -EIO;
+ }
+
+ if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
+ printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
+ "expected:%lu\n", KERN_ERR,
+ F2FS_I_SB(dic->inode)->sb->s_id,
+ dic->rlen,
+ PAGE_SIZE << dic->log_cluster_size);
+ return -EIO;
+ }
+ return 0;
+}
+
+static const struct f2fs_compress_ops f2fs_lzo_ops = {
+ .init_compress_ctx = lzo_init_compress_ctx,
+ .destroy_compress_ctx = lzo_destroy_compress_ctx,
+ .compress_pages = lzo_compress_pages,
+ .decompress_pages = lzo_decompress_pages,
+};
+#endif
+
+#ifdef CONFIG_F2FS_FS_LZ4
+static int lz4_init_compress_ctx(struct compress_ctx *cc)
+{
+ cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
+ LZ4_MEM_COMPRESS, GFP_NOFS);
+ if (!cc->private)
+ return -ENOMEM;
+
+ cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
+ return 0;
+}
+
+static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
+{
+ kvfree(cc->private);
+ cc->private = NULL;
+}
+
+static int lz4_compress_pages(struct compress_ctx *cc)
+{
+ int len;
+
+ len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
+ cc->clen, cc->private);
+ if (!len) {
+ printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
+ KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
+ return -EIO;
+ }
+ cc->clen = len;
+ return 0;
+}
+
+static int lz4_decompress_pages(struct decompress_io_ctx *dic)
+{
+ int ret;
+
+ ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
+ dic->clen, dic->rlen);
+ if (ret < 0) {
+ printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
+ KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
+ return -EIO;
+ }
+
+ if (ret != PAGE_SIZE << dic->log_cluster_size) {
+ printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
+ "expected:%lu\n", KERN_ERR,
+ F2FS_I_SB(dic->inode)->sb->s_id,
+ dic->rlen,
+ PAGE_SIZE << dic->log_cluster_size);
+ return -EIO;
+ }
+ return 0;
+}
+
+static const struct f2fs_compress_ops f2fs_lz4_ops = {
+ .init_compress_ctx = lz4_init_compress_ctx,
+ .destroy_compress_ctx = lz4_destroy_compress_ctx,
+ .compress_pages = lz4_compress_pages,
+ .decompress_pages = lz4_decompress_pages,
+};
+#endif
+
+static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
+#ifdef CONFIG_F2FS_FS_LZO
+ &f2fs_lzo_ops,
+#else
+ NULL,
+#endif
+#ifdef CONFIG_F2FS_FS_LZ4
+ &f2fs_lz4_ops,
+#else
+ NULL,
+#endif
+};
+
+bool f2fs_is_compress_backend_ready(struct inode *inode)
+{
+ if (!f2fs_compressed_file(inode))
+ return true;
+ return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
+}
+
+static struct page *f2fs_grab_page(void)
+{
+ struct page *page;
+
+ page = alloc_page(GFP_NOFS);
+ if (!page)
+ return NULL;
+ lock_page(page);
+ return page;
+}
+
+static int f2fs_compress_pages(struct compress_ctx *cc)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+ struct f2fs_inode_info *fi = F2FS_I(cc->inode);
+ const struct f2fs_compress_ops *cops =
+ f2fs_cops[fi->i_compress_algorithm];
+ unsigned int max_len, nr_cpages;
+ int i, ret;
+
+ trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
+ cc->cluster_size, fi->i_compress_algorithm);
+
+ ret = cops->init_compress_ctx(cc);
+ if (ret)
+ goto out;
+
+ max_len = COMPRESS_HEADER_SIZE + cc->clen;
+ cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+
+ cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
+ cc->nr_cpages, GFP_NOFS);
+ if (!cc->cpages) {
+ ret = -ENOMEM;
+ goto destroy_compress_ctx;
+ }
+
+ for (i = 0; i < cc->nr_cpages; i++) {
+ cc->cpages[i] = f2fs_grab_page();
+ if (!cc->cpages[i]) {
+ ret = -ENOMEM;
+ goto out_free_cpages;
+ }
+ }
+
+ cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
+ if (!cc->rbuf) {
+ ret = -ENOMEM;
+ goto out_free_cpages;
+ }
+
+ cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
+ if (!cc->cbuf) {
+ ret = -ENOMEM;
+ goto out_vunmap_rbuf;
+ }
+
+ ret = cops->compress_pages(cc);
+ if (ret)
+ goto out_vunmap_cbuf;
+
+ max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
+
+ if (cc->clen > max_len) {
+ ret = -EAGAIN;
+ goto out_vunmap_cbuf;
+ }
+
+ cc->cbuf->clen = cpu_to_le32(cc->clen);
+ cc->cbuf->chksum = cpu_to_le32(0);
+
+ for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
+ cc->cbuf->reserved[i] = cpu_to_le32(0);
+
+ vunmap(cc->cbuf);
+ vunmap(cc->rbuf);
+
+ nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+
+ for (i = nr_cpages; i < cc->nr_cpages; i++) {
+ f2fs_put_compressed_page(cc->cpages[i]);
+ cc->cpages[i] = NULL;
+ }
+
+ cc->nr_cpages = nr_cpages;
+
+ trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
+ cc->clen, ret);
+ return 0;
+
+out_vunmap_cbuf:
+ vunmap(cc->cbuf);
+out_vunmap_rbuf:
+ vunmap(cc->rbuf);
+out_free_cpages:
+ for (i = 0; i < cc->nr_cpages; i++) {
+ if (cc->cpages[i])
+ f2fs_put_compressed_page(cc->cpages[i]);
+ }
+ kfree(cc->cpages);
+ cc->cpages = NULL;
+destroy_compress_ctx:
+ cops->destroy_compress_ctx(cc);
+out:
+ trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
+ cc->clen, ret);
+ return ret;
+}
+
+void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
+{
+ struct decompress_io_ctx *dic =
+ (struct decompress_io_ctx *)page_private(page);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+ struct f2fs_inode_info *fi= F2FS_I(dic->inode);
+ const struct f2fs_compress_ops *cops =
+ f2fs_cops[fi->i_compress_algorithm];
+ int ret;
+
+ dec_page_count(sbi, F2FS_RD_DATA);
+
+ if (bio->bi_status || PageError(page))
+ dic->failed = true;
+
+ if (refcount_dec_not_one(&dic->ref))
+ return;
+
+ trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
+ dic->cluster_size, fi->i_compress_algorithm);
+
+ /* submit partial compressed pages */
+ if (dic->failed) {
+ ret = -EIO;
+ goto out_free_dic;
+ }
+
+ dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
+ if (!dic->rbuf) {
+ ret = -ENOMEM;
+ goto out_free_dic;
+ }
+
+ dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
+ if (!dic->cbuf) {
+ ret = -ENOMEM;
+ goto out_vunmap_rbuf;
+ }
+
+ dic->clen = le32_to_cpu(dic->cbuf->clen);
+ dic->rlen = PAGE_SIZE << dic->log_cluster_size;
+
+ if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
+ ret = -EFSCORRUPTED;
+ goto out_vunmap_cbuf;
+ }
+
+ ret = cops->decompress_pages(dic);
+
+out_vunmap_cbuf:
+ vunmap(dic->cbuf);
+out_vunmap_rbuf:
+ vunmap(dic->rbuf);
+out_free_dic:
+ if (!verity)
+ f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
+ ret, false);
+
+ trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
+ dic->clen, ret);
+ if (!verity)
+ f2fs_free_dic(dic);
+}
+
+static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
+{
+ if (cc->cluster_idx == NULL_CLUSTER)
+ return true;
+ return cc->cluster_idx == cluster_idx(cc, index);
+}
+
+bool f2fs_cluster_is_empty(struct compress_ctx *cc)
+{
+ return cc->nr_rpages == 0;
+}
+
+static bool f2fs_cluster_is_full(struct compress_ctx *cc)
+{
+ return cc->cluster_size == cc->nr_rpages;
+}
+
+bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
+{
+ if (f2fs_cluster_is_empty(cc))
+ return true;
+ return is_page_in_cluster(cc, index);
+}
+
+static bool __cluster_may_compress(struct compress_ctx *cc)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+ loff_t i_size = i_size_read(cc->inode);
+ unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
+ int i;
+
+ for (i = 0; i < cc->cluster_size; i++) {
+ struct page *page = cc->rpages[i];
+
+ f2fs_bug_on(sbi, !page);
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ return false;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ return false;
+
+ /* beyond EOF */
+ if (page->index >= nr_pages)
+ return false;
+ }
+ return true;
+}
+
+/* return # of compressed block addresses */
+static int f2fs_compressed_blocks(struct compress_ctx *cc)
+{
+ struct dnode_of_data dn;
+ int ret;
+
+ set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
+ LOOKUP_NODE);
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = 0;
+ goto fail;
+ }
+
+ if (dn.data_blkaddr == COMPRESS_ADDR) {
+ int i;
+
+ ret = 1;
+ for (i = 1; i < cc->cluster_size; i++) {
+ block_t blkaddr;
+
+ blkaddr = datablock_addr(dn.inode,
+ dn.node_page, dn.ofs_in_node + i);
+ if (blkaddr != NULL_ADDR)
+ ret++;
+ }
+ }
+fail:
+ f2fs_put_dnode(&dn);
+ return ret;
+}
+
+int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
+{
+ struct compress_ctx cc = {
+ .inode = inode,
+ .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+ .cluster_size = F2FS_I(inode)->i_cluster_size,
+ .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
+ };
+
+ return f2fs_compressed_blocks(&cc);
+}
+
+static bool cluster_may_compress(struct compress_ctx *cc)
+{
+ if (!f2fs_compressed_file(cc->inode))
+ return false;
+ if (f2fs_is_atomic_file(cc->inode))
+ return false;
+ if (f2fs_is_mmap_file(cc->inode))
+ return false;
+ if (!f2fs_cluster_is_full(cc))
+ return false;
+ return __cluster_may_compress(cc);
+}
+
+static void set_cluster_writeback(struct compress_ctx *cc)
+{
+ int i;
+
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (cc->rpages[i])
+ set_page_writeback(cc->rpages[i]);
+ }
+}
+
+static void set_cluster_dirty(struct compress_ctx *cc)
+{
+ int i;
+
+ for (i = 0; i < cc->cluster_size; i++)
+ if (cc->rpages[i])
+ set_page_dirty(cc->rpages[i]);
+}
+
+static int prepare_compress_overwrite(struct compress_ctx *cc,
+ struct page **pagep, pgoff_t index, void **fsdata)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+ struct address_space *mapping = cc->inode->i_mapping;
+ struct page *page;
+ struct dnode_of_data dn;
+ sector_t last_block_in_bio;
+ unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
+ pgoff_t start_idx = start_idx_of_cluster(cc);
+ int i, ret;
+ bool prealloc;
+
+retry:
+ ret = f2fs_compressed_blocks(cc);
+ if (ret <= 0)
+ return ret;
+
+ /* compressed case */
+ prealloc = (ret < cc->cluster_size);
+
+ ret = f2fs_init_compress_ctx(cc);
+ if (ret)
+ return ret;
+
+ /* keep page reference to avoid page reclaim */
+ for (i = 0; i < cc->cluster_size; i++) {
+ page = f2fs_pagecache_get_page(mapping, start_idx + i,
+ fgp_flag, GFP_NOFS);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unlock_pages;
+ }
+
+ if (PageUptodate(page))
+ unlock_page(page);
+ else
+ f2fs_compress_ctx_add_page(cc, page);
+ }
+
+ if (!f2fs_cluster_is_empty(cc)) {
+ struct bio *bio = NULL;
+
+ ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
+ &last_block_in_bio, false);
+ f2fs_destroy_compress_ctx(cc);
+ if (ret)
+ goto release_pages;
+ if (bio)
+ f2fs_submit_bio(sbi, bio, DATA);
+
+ ret = f2fs_init_compress_ctx(cc);
+ if (ret)
+ goto release_pages;
+ }
+
+ for (i = 0; i < cc->cluster_size; i++) {
+ f2fs_bug_on(sbi, cc->rpages[i]);
+
+ page = find_lock_page(mapping, start_idx + i);
+ f2fs_bug_on(sbi, !page);
+
+ f2fs_wait_on_page_writeback(page, DATA, true, true);
+
+ f2fs_compress_ctx_add_page(cc, page);
+ f2fs_put_page(page, 0);
+
+ if (!PageUptodate(page)) {
+ f2fs_unlock_rpages(cc, i + 1);
+ f2fs_put_rpages_mapping(cc, mapping, start_idx,
+ cc->cluster_size);
+ f2fs_destroy_compress_ctx(cc);
+ goto retry;
+ }
+ }
+
+ if (prealloc) {
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+
+ set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
+
+ for (i = cc->cluster_size - 1; i > 0; i--) {
+ ret = f2fs_get_block(&dn, start_idx + i);
+ if (ret) {
+ i = cc->cluster_size;
+ break;
+ }
+
+ if (dn.data_blkaddr != NEW_ADDR)
+ break;
+ }
+
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ }
+
+ if (likely(!ret)) {
+ *fsdata = cc->rpages;
+ *pagep = cc->rpages[offset_in_cluster(cc, index)];
+ return cc->cluster_size;
+ }
+
+unlock_pages:
+ f2fs_unlock_rpages(cc, i);
+release_pages:
+ f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
+ f2fs_destroy_compress_ctx(cc);
+ return ret;
+}
+
+int f2fs_prepare_compress_overwrite(struct inode *inode,
+ struct page **pagep, pgoff_t index, void **fsdata)
+{
+ struct compress_ctx cc = {
+ .inode = inode,
+ .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+ .cluster_size = F2FS_I(inode)->i_cluster_size,
+ .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
+ .rpages = NULL,
+ .nr_rpages = 0,
+ };
+
+ return prepare_compress_overwrite(&cc, pagep, index, fsdata);
+}
+
+bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+ pgoff_t index, unsigned copied)
+
+{
+ struct compress_ctx cc = {
+ .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+ .cluster_size = F2FS_I(inode)->i_cluster_size,
+ .rpages = fsdata,
+ };
+ bool first_index = (index == cc.rpages[0]->index);
+
+ if (copied)
+ set_cluster_dirty(&cc);
+
+ f2fs_put_rpages_wbc(&cc, NULL, false, 1);
+ f2fs_destroy_compress_ctx(&cc);
+
+ return first_index;
+}
+
+static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+ int *submitted,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
+{
+ struct inode *inode = cc->inode;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_io_info fio = {
+ .sbi = sbi,
+ .ino = cc->inode->i_ino,
+ .type = DATA,
+ .op = REQ_OP_WRITE,
+ .op_flags = wbc_to_write_flags(wbc),
+ .old_blkaddr = NEW_ADDR,
+ .page = NULL,
+ .encrypted_page = NULL,
+ .compressed_page = NULL,
+ .submitted = false,
+ .need_lock = LOCK_RETRY,
+ .io_type = io_type,
+ .io_wbc = wbc,
+ .encrypted = f2fs_encrypted_file(cc->inode),
+ };
+ struct dnode_of_data dn;
+ struct node_info ni;
+ struct compress_io_ctx *cic;
+ pgoff_t start_idx = start_idx_of_cluster(cc);
+ unsigned int last_index = cc->cluster_size - 1;
+ loff_t psize;
+ int i, err;
+
+ set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
+
+ f2fs_lock_op(sbi);
+
+ err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+ if (err)
+ goto out_unlock_op;
+
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (datablock_addr(dn.inode, dn.node_page,
+ dn.ofs_in_node + i) == NULL_ADDR)
+ goto out_put_dnode;
+ }
+
+ psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
+
+ err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ if (err)
+ goto out_put_dnode;
+
+ fio.version = ni.version;
+
+ cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
+ if (!cic)
+ goto out_put_dnode;
+
+ cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
+ cic->inode = inode;
+ refcount_set(&cic->ref, 1);
+ cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+ cc->log_cluster_size, GFP_NOFS);
+ if (!cic->rpages)
+ goto out_put_cic;
+
+ cic->nr_rpages = cc->cluster_size;
+
+ for (i = 0; i < cc->nr_cpages; i++) {
+ f2fs_set_compressed_page(cc->cpages[i], inode,
+ cc->rpages[i + 1]->index,
+ cic, i ? &cic->ref : NULL);
+ fio.compressed_page = cc->cpages[i];
+ if (fio.encrypted) {
+ fio.page = cc->rpages[i + 1];
+ err = f2fs_encrypt_one_page(&fio);
+ if (err)
+ goto out_destroy_crypt;
+ cc->cpages[i] = fio.encrypted_page;
+ }
+ }
+
+ set_cluster_writeback(cc);
+
+ for (i = 0; i < cc->cluster_size; i++)
+ cic->rpages[i] = cc->rpages[i];
+
+ for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
+ block_t blkaddr;
+
+ blkaddr = datablock_addr(dn.inode, dn.node_page,
+ dn.ofs_in_node);
+ fio.page = cic->rpages[i];
+ fio.old_blkaddr = blkaddr;
+
+ /* cluster header */
+ if (i == 0) {
+ if (blkaddr == COMPRESS_ADDR)
+ fio.compr_blocks++;
+ if (__is_valid_data_blkaddr(blkaddr))
+ f2fs_invalidate_blocks(sbi, blkaddr);
+ f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
+ goto unlock_continue;
+ }
+
+ if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
+ fio.compr_blocks++;
+
+ if (i > cc->nr_cpages) {
+ if (__is_valid_data_blkaddr(blkaddr)) {
+ f2fs_invalidate_blocks(sbi, blkaddr);
+ f2fs_update_data_blkaddr(&dn, NEW_ADDR);
+ }
+ goto unlock_continue;
+ }
+
+ f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
+
+ if (fio.encrypted)
+ fio.encrypted_page = cc->cpages[i - 1];
+ else
+ fio.compressed_page = cc->cpages[i - 1];
+
+ cc->cpages[i - 1] = NULL;
+ f2fs_outplace_write_data(&dn, &fio);
+ (*submitted)++;
+unlock_continue:
+ inode_dec_dirty_pages(cc->inode);
+ unlock_page(fio.page);
+ }
+
+ if (fio.compr_blocks)
+ f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
+ f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
+
+ set_inode_flag(cc->inode, FI_APPEND_WRITE);
+ if (cc->cluster_idx == 0)
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+
+ f2fs_put_dnode(&dn);
+ f2fs_unlock_op(sbi);
+
+ down_write(&fi->i_sem);
+ if (fi->last_disk_size < psize)
+ fi->last_disk_size = psize;
+ up_write(&fi->i_sem);
+
+ f2fs_put_rpages(cc);
+ f2fs_destroy_compress_ctx(cc);
+ return 0;
+
+out_destroy_crypt:
+ kfree(cic->rpages);
+
+ for (--i; i >= 0; i--)
+ fscrypt_finalize_bounce_page(&cc->cpages[i]);
+ for (i = 0; i < cc->nr_cpages; i++) {
+ if (!cc->cpages[i])
+ continue;
+ f2fs_put_page(cc->cpages[i], 1);
+ }
+out_put_cic:
+ kfree(cic);
+out_put_dnode:
+ f2fs_put_dnode(&dn);
+out_unlock_op:
+ f2fs_unlock_op(sbi);
+ return -EAGAIN;
+}
+
+void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+{
+ struct f2fs_sb_info *sbi = bio->bi_private;
+ struct compress_io_ctx *cic =
+ (struct compress_io_ctx *)page_private(page);
+ int i;
+
+ if (unlikely(bio->bi_status))
+ mapping_set_error(cic->inode->i_mapping, -EIO);
+
+ f2fs_put_compressed_page(page);
+
+ dec_page_count(sbi, F2FS_WB_DATA);
+
+ if (refcount_dec_not_one(&cic->ref))
+ return;
+
+ for (i = 0; i < cic->nr_rpages; i++) {
+ WARN_ON(!cic->rpages[i]);
+ clear_cold_data(cic->rpages[i]);
+ end_page_writeback(cic->rpages[i]);
+ }
+
+ kfree(cic->rpages);
+ kfree(cic);
+}
+
+static int f2fs_write_raw_pages(struct compress_ctx *cc,
+ int *submitted,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
+{
+ struct address_space *mapping = cc->inode->i_mapping;
+ int _submitted, compr_blocks, ret;
+ int i = -1, err = 0;
+
+ compr_blocks = f2fs_compressed_blocks(cc);
+ if (compr_blocks < 0) {
+ err = compr_blocks;
+ goto out_err;
+ }
+
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (!cc->rpages[i])
+ continue;
+retry_write:
+ if (cc->rpages[i]->mapping != mapping) {
+ unlock_page(cc->rpages[i]);
+ continue;
+ }
+
+ BUG_ON(!PageLocked(cc->rpages[i]));
+
+ ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
+ NULL, NULL, wbc, io_type,
+ compr_blocks);
+ if (ret) {
+ if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ unlock_page(cc->rpages[i]);
+ ret = 0;
+ } else if (ret == -EAGAIN) {
+ ret = 0;
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ lock_page(cc->rpages[i]);
+ clear_page_dirty_for_io(cc->rpages[i]);
+ goto retry_write;
+ }
+ err = ret;
+ goto out_fail;
+ }
+
+ *submitted += _submitted;
+ }
+ return 0;
+
+out_fail:
+ /* TODO: revoke partially updated block addresses */
+ BUG_ON(compr_blocks);
+out_err:
+ for (++i; i < cc->cluster_size; i++) {
+ if (!cc->rpages[i])
+ continue;
+ redirty_page_for_writepage(wbc, cc->rpages[i]);
+ unlock_page(cc->rpages[i]);
+ }
+ return err;
+}
+
+int f2fs_write_multi_pages(struct compress_ctx *cc,
+ int *submitted,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
+{
+ struct f2fs_inode_info *fi = F2FS_I(cc->inode);
+ const struct f2fs_compress_ops *cops =
+ f2fs_cops[fi->i_compress_algorithm];
+ int err;
+
+ *submitted = 0;
+ if (cluster_may_compress(cc)) {
+ err = f2fs_compress_pages(cc);
+ if (err == -EAGAIN) {
+ goto write;
+ } else if (err) {
+ f2fs_put_rpages_wbc(cc, wbc, true, 1);
+ goto destroy_out;
+ }
+
+ err = f2fs_write_compressed_pages(cc, submitted,
+ wbc, io_type);
+ cops->destroy_compress_ctx(cc);
+ if (!err)
+ return 0;
+ f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
+ }
+write:
+ f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
+
+ err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
+ f2fs_put_rpages_wbc(cc, wbc, false, 0);
+destroy_out:
+ f2fs_destroy_compress_ctx(cc);
+ return err;
+}
+
+struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+ struct decompress_io_ctx *dic;
+ pgoff_t start_idx = start_idx_of_cluster(cc);
+ int i;
+
+ dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
+ if (!dic)
+ return ERR_PTR(-ENOMEM);
+
+ dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+ cc->log_cluster_size, GFP_NOFS);
+ if (!dic->rpages) {
+ kfree(dic);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
+ dic->inode = cc->inode;
+ refcount_set(&dic->ref, 1);
+ dic->cluster_idx = cc->cluster_idx;
+ dic->cluster_size = cc->cluster_size;
+ dic->log_cluster_size = cc->log_cluster_size;
+ dic->nr_cpages = cc->nr_cpages;
+ dic->failed = false;
+
+ for (i = 0; i < dic->cluster_size; i++)
+ dic->rpages[i] = cc->rpages[i];
+ dic->nr_rpages = cc->cluster_size;
+
+ dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
+ dic->nr_cpages, GFP_NOFS);
+ if (!dic->cpages)
+ goto out_free;
+
+ for (i = 0; i < dic->nr_cpages; i++) {
+ struct page *page;
+
+ page = f2fs_grab_page();
+ if (!page)
+ goto out_free;
+
+ f2fs_set_compressed_page(page, cc->inode,
+ start_idx + i + 1,
+ dic, i ? &dic->ref : NULL);
+ dic->cpages[i] = page;
+ }
+
+ dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
+ dic->cluster_size, GFP_NOFS);
+ if (!dic->tpages)
+ goto out_free;
+
+ for (i = 0; i < dic->cluster_size; i++) {
+ if (cc->rpages[i])
+ continue;
+
+ dic->tpages[i] = f2fs_grab_page();
+ if (!dic->tpages[i])
+ goto out_free;
+ }
+
+ for (i = 0; i < dic->cluster_size; i++) {
+ if (dic->tpages[i])
+ continue;
+ dic->tpages[i] = cc->rpages[i];
+ }
+
+ return dic;
+
+out_free:
+ f2fs_free_dic(dic);
+ return ERR_PTR(-ENOMEM);
+}
+
+void f2fs_free_dic(struct decompress_io_ctx *dic)
+{
+ int i;
+
+ if (dic->tpages) {
+ for (i = 0; i < dic->cluster_size; i++) {
+ if (dic->rpages[i])
+ continue;
+ f2fs_put_page(dic->tpages[i], 1);
+ }
+ kfree(dic->tpages);
+ }
+
+ if (dic->cpages) {
+ for (i = 0; i < dic->nr_cpages; i++) {
+ if (!dic->cpages[i])
+ continue;
+ f2fs_put_compressed_page(dic->cpages[i]);
+ }
+ kfree(dic->cpages);
+ }
+
+ kfree(dic->rpages);
+ kfree(dic);
+}
+
+void f2fs_decompress_end_io(struct page **rpages,
+ unsigned int cluster_size, bool err, bool verity)
+{
+ int i;
+
+ for (i = 0; i < cluster_size; i++) {
+ struct page *rpage = rpages[i];
+
+ if (!rpage)
+ continue;
+
+ if (err || PageError(rpage)) {
+ ClearPageUptodate(rpage);
+ ClearPageError(rpage);
+ } else {
+ if (!verity || fsverity_verify_page(rpage))
+ SetPageUptodate(rpage);
+ else
+ SetPageError(rpage);
+ }
+ unlock_page(rpage);
+ }
+}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index a034cd0ce021..b27b72107911 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -31,6 +31,47 @@
static struct kmem_cache *bio_post_read_ctx_cache;
static struct kmem_cache *bio_entry_slab;
static mempool_t *bio_post_read_ctx_pool;
+static struct bio_set f2fs_bioset;
+
+#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
+
+int __init f2fs_init_bioset(void)
+{
+ if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
+ 0, BIOSET_NEED_BVECS))
+ return -ENOMEM;
+ return 0;
+}
+
+void f2fs_destroy_bioset(void)
+{
+ bioset_exit(&f2fs_bioset);
+}
+
+static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
+ unsigned int nr_iovecs)
+{
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
+}
+
+struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail)
+{
+ struct bio *bio;
+
+ if (no_fail) {
+ /* No failure on bio allocation */
+ bio = __f2fs_bio_alloc(GFP_NOIO, npages);
+ if (!bio)
+ bio = __f2fs_bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
+ return bio;
+ }
+ if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
+ f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
+ return NULL;
+ }
+
+ return __f2fs_bio_alloc(GFP_KERNEL, npages);
+}
static bool __is_cp_guaranteed(struct page *page)
{
@@ -41,6 +82,9 @@ static bool __is_cp_guaranteed(struct page *page)
if (!mapping)
return false;
+ if (f2fs_is_compressed_page(page))
+ return false;
+
inode = mapping->host;
sbi = F2FS_I_SB(inode);
@@ -73,19 +117,19 @@ static enum count_type __read_io_type(struct page *page)
/* postprocessing steps for read bios */
enum bio_post_read_step {
- STEP_INITIAL = 0,
STEP_DECRYPT,
+ STEP_DECOMPRESS,
STEP_VERITY,
};
struct bio_post_read_ctx {
struct bio *bio;
+ struct f2fs_sb_info *sbi;
struct work_struct work;
- unsigned int cur_step;
unsigned int enabled_steps;
};
-static void __read_end_io(struct bio *bio)
+static void __read_end_io(struct bio *bio, bool compr, bool verity)
{
struct page *page;
struct bio_vec *bv;
@@ -94,6 +138,13 @@ static void __read_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, iter_all) {
page = bv->bv_page;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (compr && f2fs_is_compressed_page(page)) {
+ f2fs_decompress_pages(bio, page, verity);
+ continue;
+ }
+#endif
+
/* PG_error was set if any post_read step failed */
if (bio->bi_status || PageError(page)) {
ClearPageUptodate(page);
@@ -105,31 +156,107 @@ static void __read_end_io(struct bio *bio)
dec_page_count(F2FS_P_SB(page), __read_io_type(page));
unlock_page(page);
}
- if (bio->bi_private)
- mempool_free(bio->bi_private, bio_post_read_ctx_pool);
- bio_put(bio);
+}
+
+static void f2fs_release_read_bio(struct bio *bio);
+static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
+{
+ if (!compr)
+ __read_end_io(bio, false, verity);
+ f2fs_release_read_bio(bio);
+}
+
+static void f2fs_decompress_bio(struct bio *bio, bool verity)
+{
+ __read_end_io(bio, true, verity);
}
static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
-static void decrypt_work(struct work_struct *work)
+static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
+{
+ fscrypt_decrypt_bio(ctx->bio);
+}
+
+static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
+{
+ f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
+}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
+{
+ f2fs_decompress_end_io(rpages, cluster_size, false, true);
+}
+
+static void f2fs_verify_bio(struct bio *bio)
+{
+ struct page *page = bio_first_page_all(bio);
+ struct decompress_io_ctx *dic =
+ (struct decompress_io_ctx *)page_private(page);
+
+ f2fs_verify_pages(dic->rpages, dic->cluster_size);
+ f2fs_free_dic(dic);
+}
+#endif
+
+static void f2fs_verity_work(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
+ struct bio *bio = ctx->bio;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ unsigned int enabled_steps = ctx->enabled_steps;
+#endif
- fscrypt_decrypt_bio(ctx->bio);
+ /*
+ * fsverity_verify_bio() may call readpages() again, and while verity
+ * will be disabled for this, decryption may still be needed, resulting
+ * in another bio_post_read_ctx being allocated. So to prevent
+ * deadlocks we need to release the current ctx to the mempool first.
+ * This assumes that verity is the last post-read step.
+ */
+ mempool_free(ctx, bio_post_read_ctx_pool);
+ bio->bi_private = NULL;
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ /* previous step is decompression */
+ if (enabled_steps & (1 << STEP_DECOMPRESS)) {
+ f2fs_verify_bio(bio);
+ f2fs_release_read_bio(bio);
+ return;
+ }
+#endif
- bio_post_read_processing(ctx);
+ fsverity_verify_bio(bio);
+ __f2fs_read_end_io(bio, false, false);
}
-static void verity_work(struct work_struct *work)
+static void f2fs_post_read_work(struct work_struct *work)
{
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
- fsverity_verify_bio(ctx->bio);
+ if (ctx->enabled_steps & (1 << STEP_DECRYPT))
+ f2fs_decrypt_work(ctx);
- bio_post_read_processing(ctx);
+ if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
+ f2fs_decompress_work(ctx);
+
+ if (ctx->enabled_steps & (1 << STEP_VERITY)) {
+ INIT_WORK(&ctx->work, f2fs_verity_work);
+ fsverity_enqueue_verify_work(&ctx->work);
+ return;
+ }
+
+ __f2fs_read_end_io(ctx->bio,
+ ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
+}
+
+static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
+ struct work_struct *work)
+{
+ queue_work(sbi->post_read_wq, work);
}
static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
@@ -139,31 +266,26 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
* verity may require reading metadata pages that need decryption, and
* we shouldn't recurse to the same workqueue.
*/
- switch (++ctx->cur_step) {
- case STEP_DECRYPT:
- if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
- INIT_WORK(&ctx->work, decrypt_work);
- fscrypt_enqueue_decrypt_work(&ctx->work);
- return;
- }
- ctx->cur_step++;
- /* fall-through */
- case STEP_VERITY:
- if (ctx->enabled_steps & (1 << STEP_VERITY)) {
- INIT_WORK(&ctx->work, verity_work);
- fsverity_enqueue_verify_work(&ctx->work);
- return;
- }
- ctx->cur_step++;
- /* fall-through */
- default:
- __read_end_io(ctx->bio);
+
+ if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
+ ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
+ INIT_WORK(&ctx->work, f2fs_post_read_work);
+ f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
+ return;
}
+
+ if (ctx->enabled_steps & (1 << STEP_VERITY)) {
+ INIT_WORK(&ctx->work, f2fs_verity_work);
+ fsverity_enqueue_verify_work(&ctx->work);
+ return;
+ }
+
+ __f2fs_read_end_io(ctx->bio, false, false);
}
static bool f2fs_bio_post_read_required(struct bio *bio)
{
- return bio->bi_private && !bio->bi_status;
+ return bio->bi_private;
}
static void f2fs_read_end_io(struct bio *bio)
@@ -178,12 +300,11 @@ static void f2fs_read_end_io(struct bio *bio)
if (f2fs_bio_post_read_required(bio)) {
struct bio_post_read_ctx *ctx = bio->bi_private;
- ctx->cur_step = STEP_INITIAL;
bio_post_read_processing(ctx);
return;
}
- __read_end_io(bio);
+ __f2fs_read_end_io(bio, false, false);
}
static void f2fs_write_end_io(struct bio *bio)
@@ -214,6 +335,13 @@ static void f2fs_write_end_io(struct bio *bio)
fscrypt_finalize_bounce_page(&page);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_is_compressed_page(page)) {
+ f2fs_compress_write_end_io(bio, page);
+ continue;
+ }
+#endif
+
if (unlikely(bio->bi_status)) {
mapping_set_error(page->mapping, -EIO);
if (type == F2FS_WB_CP_DATA)
@@ -358,6 +486,12 @@ submit_io:
submit_bio(bio);
}
+void f2fs_submit_bio(struct f2fs_sb_info *sbi,
+ struct bio *bio, enum page_type type)
+{
+ __submit_bio(sbi, bio, type);
+}
+
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -380,7 +514,6 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
struct page *page, nid_t ino)
{
struct bio_vec *bvec;
- struct page *target;
struct bvec_iter_all iter_all;
if (!bio)
@@ -390,10 +523,18 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
return true;
bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *target = bvec->bv_page;
- target = bvec->bv_page;
- if (fscrypt_is_bounce_page(target))
+ if (fscrypt_is_bounce_page(target)) {
target = fscrypt_pagecache_page(target);
+ if (IS_ERR(target))
+ continue;
+ }
+ if (f2fs_is_compressed_page(target)) {
+ target = f2fs_compress_control_page(target);
+ if (IS_ERR(target))
+ continue;
+ }
if (inode && inode == target->mapping->host)
return true;
@@ -588,7 +729,8 @@ static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio,
found = true;
- if (bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
+ if (bio_add_page(*bio, page, PAGE_SIZE, 0) ==
+ PAGE_SIZE) {
ret = 0;
break;
}
@@ -728,7 +870,12 @@ next:
verify_fio_blkaddr(fio);
- bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ if (fio->encrypted_page)
+ bio_page = fio->encrypted_page;
+ else if (fio->compressed_page)
+ bio_page = fio->compressed_page;
+ else
+ bio_page = fio->page;
/* set submitted = true as a return value */
fio->submitted = true;
@@ -797,17 +944,16 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
-
+ if (f2fs_compressed_file(inode))
+ post_read_steps |= 1 << STEP_DECOMPRESS;
if (f2fs_need_verity(inode, first_idx))
post_read_steps |= 1 << STEP_VERITY;
if (post_read_steps) {
+ /* Due to the mempool, this never fails. */
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
- if (!ctx) {
- bio_put(bio);
- return ERR_PTR(-ENOMEM);
- }
ctx->bio = bio;
+ ctx->sbi = sbi;
ctx->enabled_steps = post_read_steps;
bio->bi_private = ctx;
}
@@ -815,6 +961,13 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
return bio;
}
+static void f2fs_release_read_bio(struct bio *bio)
+{
+ if (bio->bi_private)
+ mempool_free(bio->bi_private, bio_post_read_ctx_pool);
+ bio_put(bio);
+}
+
/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
block_t blkaddr)
@@ -1180,19 +1333,6 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
int err = 0;
bool direct_io = iocb->ki_flags & IOCB_DIRECT;
- /* convert inline data for Direct I/O*/
- if (direct_io) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
-
- if (direct_io && allow_outplace_dio(inode, iocb, from))
- return 0;
-
- if (is_inode_flag_set(inode, FI_NO_PREALLOC))
- return 0;
-
map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
if (map.m_len > map.m_lblk)
@@ -1872,6 +2012,144 @@ out:
return ret;
}
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ unsigned nr_pages, sector_t *last_block_in_bio,
+ bool is_readahead)
+{
+ struct dnode_of_data dn;
+ struct inode *inode = cc->inode;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct bio *bio = *bio_ret;
+ unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
+ sector_t last_block_in_file;
+ const unsigned blkbits = inode->i_blkbits;
+ const unsigned blocksize = 1 << blkbits;
+ struct decompress_io_ctx *dic = NULL;
+ int i;
+ int ret = 0;
+
+ f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
+
+ last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+
+ /* get rid of pages beyond EOF */
+ for (i = 0; i < cc->cluster_size; i++) {
+ struct page *page = cc->rpages[i];
+
+ if (!page)
+ continue;
+ if ((sector_t)page->index >= last_block_in_file) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+ } else if (!PageUptodate(page)) {
+ continue;
+ }
+ unlock_page(page);
+ cc->rpages[i] = NULL;
+ cc->nr_rpages--;
+ }
+
+ /* we are done since all pages are beyond EOF */
+ if (f2fs_cluster_is_empty(cc))
+ goto out;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+ if (ret)
+ goto out;
+
+ /* cluster was overwritten as normal cluster */
+ if (dn.data_blkaddr != COMPRESS_ADDR)
+ goto out;
+
+ for (i = 1; i < cc->cluster_size; i++) {
+ block_t blkaddr;
+
+ blkaddr = datablock_addr(dn.inode, dn.node_page,
+ dn.ofs_in_node + i);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ break;
+
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+ ret = -EFAULT;
+ goto out_put_dnode;
+ }
+ cc->nr_cpages++;
+ }
+
+ /* nothing to decompress */
+ if (cc->nr_cpages == 0) {
+ ret = 0;
+ goto out_put_dnode;
+ }
+
+ dic = f2fs_alloc_dic(cc);
+ if (IS_ERR(dic)) {
+ ret = PTR_ERR(dic);
+ goto out_put_dnode;
+ }
+
+ for (i = 0; i < dic->nr_cpages; i++) {
+ struct page *page = dic->cpages[i];
+ block_t blkaddr;
+
+ blkaddr = datablock_addr(dn.inode, dn.node_page,
+ dn.ofs_in_node + i + 1);
+
+ if (bio && !page_is_mergeable(sbi, bio,
+ *last_block_in_bio, blkaddr)) {
+submit_and_realloc:
+ __submit_bio(sbi, bio, DATA);
+ bio = NULL;
+ }
+
+ if (!bio) {
+ bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
+ is_readahead ? REQ_RAHEAD : 0,
+ page->index);
+ if (IS_ERR(bio)) {
+ ret = PTR_ERR(bio);
+ bio = NULL;
+ dic->failed = true;
+ if (refcount_sub_and_test(dic->nr_cpages - i,
+ &dic->ref))
+ f2fs_decompress_end_io(dic->rpages,
+ cc->cluster_size, true,
+ false);
+ f2fs_free_dic(dic);
+ f2fs_put_dnode(&dn);
+ *bio_ret = bio;
+ return ret;
+ }
+ }
+
+ f2fs_wait_on_block_writeback(inode, blkaddr);
+
+ if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+ goto submit_and_realloc;
+
+ inc_page_count(sbi, F2FS_RD_DATA);
+ ClearPageError(page);
+ *last_block_in_bio = blkaddr;
+ }
+
+ f2fs_put_dnode(&dn);
+
+ *bio_ret = bio;
+ return 0;
+
+out_put_dnode:
+ f2fs_put_dnode(&dn);
+out:
+ f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
+ *bio_ret = bio;
+ return ret;
+}
+#endif
+
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
@@ -1881,7 +2159,7 @@ out:
* use ->readpage() or do the necessary surgery to decouple ->readpages()
* from read-ahead.
*/
-static int f2fs_mpage_readpages(struct address_space *mapping,
+int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages, bool is_readahead)
{
@@ -1889,6 +2167,19 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
struct f2fs_map_blocks map;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct compress_ctx cc = {
+ .inode = inode,
+ .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+ .cluster_size = F2FS_I(inode)->i_cluster_size,
+ .cluster_idx = NULL_CLUSTER,
+ .rpages = NULL,
+ .cpages = NULL,
+ .nr_rpages = 0,
+ .nr_cpages = 0,
+ };
+#endif
+ unsigned max_nr_pages = nr_pages;
int ret = 0;
map.m_pblk = 0;
@@ -1912,9 +2203,41 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
goto next_page;
}
- ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
- &last_block_in_bio, is_readahead);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ /* there are remained comressed pages, submit them */
+ if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
+ ret = f2fs_read_multi_pages(&cc, &bio,
+ max_nr_pages,
+ &last_block_in_bio,
+ is_readahead);
+ f2fs_destroy_compress_ctx(&cc);
+ if (ret)
+ goto set_error_page;
+ }
+ ret = f2fs_is_compressed_cluster(inode, page->index);
+ if (ret < 0)
+ goto set_error_page;
+ else if (!ret)
+ goto read_single_page;
+
+ ret = f2fs_init_compress_ctx(&cc);
+ if (ret)
+ goto set_error_page;
+
+ f2fs_compress_ctx_add_page(&cc, page);
+
+ goto next_page;
+ }
+read_single_page:
+#endif
+
+ ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
+ &bio, &last_block_in_bio, is_readahead);
if (ret) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+set_error_page:
+#endif
SetPageError(page);
zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
@@ -1922,6 +2245,19 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
next_page:
if (pages)
put_page(page);
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ /* last page */
+ if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
+ ret = f2fs_read_multi_pages(&cc, &bio,
+ max_nr_pages,
+ &last_block_in_bio,
+ is_readahead);
+ f2fs_destroy_compress_ctx(&cc);
+ }
+ }
+#endif
}
BUG_ON(pages && !list_empty(pages));
if (bio)
@@ -1936,6 +2272,11 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
trace_f2fs_readpage(page, DATA);
+ if (!f2fs_is_compress_backend_ready(inode)) {
+ unlock_page(page);
+ return -EOPNOTSUPP;
+ }
+
/* If the file has inline data, try to read it directly */
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
@@ -1954,6 +2295,9 @@ static int f2fs_read_data_pages(struct file *file,
trace_f2fs_readpages(inode, page, nr_pages);
+ if (!f2fs_is_compress_backend_ready(inode))
+ return 0;
+
/* If the file has inline data, skip readpages */
if (f2fs_has_inline_data(inode))
return 0;
@@ -1961,22 +2305,23 @@ static int f2fs_read_data_pages(struct file *file,
return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
}
-static int encrypt_one_page(struct f2fs_io_info *fio)
+int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
{
struct inode *inode = fio->page->mapping->host;
- struct page *mpage;
+ struct page *mpage, *page;
gfp_t gfp_flags = GFP_NOFS;
if (!f2fs_encrypted_file(inode))
return 0;
+ page = fio->compressed_page ? fio->compressed_page : fio->page;
+
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt:
- fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
- PAGE_SIZE, 0,
- gfp_flags);
+ fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
+ PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
@@ -2136,7 +2481,7 @@ got_it:
if (ipu_force ||
(__is_valid_data_blkaddr(fio->old_blkaddr) &&
need_inplace_update(fio))) {
- err = encrypt_one_page(fio);
+ err = f2fs_encrypt_one_page(fio);
if (err)
goto out_writepage;
@@ -2172,13 +2517,16 @@ got_it:
fio->version = ni.version;
- err = encrypt_one_page(fio);
+ err = f2fs_encrypt_one_page(fio);
if (err)
goto out_writepage;
set_page_writeback(page);
ClearPageError(page);
+ if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
+ f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
+
/* LFS mode write path */
f2fs_outplace_write_data(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
@@ -2193,16 +2541,17 @@ out:
return err;
}
-static int __write_data_page(struct page *page, bool *submitted,
+int f2fs_write_single_data_page(struct page *page, int *submitted,
struct bio **bio,
sector_t *last_block,
struct writeback_control *wbc,
- enum iostat_type io_type)
+ enum iostat_type io_type,
+ int compr_blocks)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
- const pgoff_t end_index = ((unsigned long long) i_size)
+ const pgoff_t end_index = ((unsigned long long)i_size)
>> PAGE_SHIFT;
loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
@@ -2218,6 +2567,7 @@ static int __write_data_page(struct page *page, bool *submitted,
.page = page,
.encrypted_page = NULL,
.submitted = false,
+ .compr_blocks = compr_blocks,
.need_lock = LOCK_RETRY,
.io_type = io_type,
.io_wbc = wbc,
@@ -2242,7 +2592,9 @@ static int __write_data_page(struct page *page, bool *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (page->index < end_index || f2fs_verity_in_progress(inode))
+ if (page->index < end_index ||
+ f2fs_verity_in_progress(inode) ||
+ compr_blocks)
goto write;
/*
@@ -2318,7 +2670,6 @@ out:
f2fs_remove_dirty_inode(inode);
submitted = NULL;
}
-
unlock_page(page);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
!F2FS_I(inode)->cp_task)
@@ -2331,7 +2682,7 @@ out:
}
if (submitted)
- *submitted = fio.submitted;
+ *submitted = fio.submitted ? 1 : 0;
return 0;
@@ -2352,7 +2703,23 @@ redirty_out:
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
- return __write_data_page(page, NULL, NULL, NULL, wbc, FS_DATA_IO);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct inode *inode = page->mapping->host;
+
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ goto out;
+
+ if (f2fs_compressed_file(inode)) {
+ if (f2fs_is_compressed_cluster(inode, page->index)) {
+ redirty_page_for_writepage(wbc, page);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
+ }
+out:
+#endif
+
+ return f2fs_write_single_data_page(page, NULL, NULL, NULL,
+ wbc, FS_DATA_IO, 0);
}
/*
@@ -2365,11 +2732,27 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
enum iostat_type io_type)
{
int ret = 0;
- int done = 0;
+ int done = 0, retry = 0;
struct pagevec pvec;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct bio *bio = NULL;
sector_t last_block;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct inode *inode = mapping->host;
+ struct compress_ctx cc = {
+ .inode = inode,
+ .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
+ .cluster_size = F2FS_I(inode)->i_cluster_size,
+ .cluster_idx = NULL_CLUSTER,
+ .rpages = NULL,
+ .nr_rpages = 0,
+ .cpages = NULL,
+ .rbuf = NULL,
+ .cbuf = NULL,
+ .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
+ .private = NULL,
+ };
+#endif
int nr_pages;
pgoff_t uninitialized_var(writeback_index);
pgoff_t index;
@@ -2379,6 +2762,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int range_whole = 0;
xa_mark_t tag;
int nwritten = 0;
+ int submitted = 0;
+ int i;
pagevec_init(&pvec);
@@ -2408,12 +2793,11 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
else
tag = PAGECACHE_TAG_DIRTY;
retry:
+ retry = 0;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
- while (!done && (index <= end)) {
- int i;
-
+ while (!done && !retry && (index <= end)) {
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
tag);
if (nr_pages == 0)
@@ -2421,15 +2805,62 @@ retry:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- bool submitted = false;
+ bool need_readd;
+readd:
+ need_readd = false;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ ret = f2fs_init_compress_ctx(&cc);
+ if (ret) {
+ done = 1;
+ break;
+ }
+ if (!f2fs_cluster_can_merge_page(&cc,
+ page->index)) {
+ ret = f2fs_write_multi_pages(&cc,
+ &submitted, wbc, io_type);
+ if (!ret)
+ need_readd = true;
+ goto result;
+ }
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto lock_page;
+
+ if (f2fs_cluster_is_empty(&cc)) {
+ void *fsdata = NULL;
+ struct page *pagep;
+ int ret2;
+
+ ret2 = f2fs_prepare_compress_overwrite(
+ inode, &pagep,
+ page->index, &fsdata);
+ if (ret2 < 0) {
+ ret = ret2;
+ done = 1;
+ break;
+ } else if (ret2 &&
+ !f2fs_compress_write_end(inode,
+ fsdata, page->index,
+ 1)) {
+ retry = 1;
+ break;
+ }
+ } else {
+ goto lock_page;
+ }
+ }
+#endif
/* give a priority to WB_SYNC threads */
if (atomic_read(&sbi->wb_sync_req[DATA]) &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
-
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+lock_page:
+#endif
done_index = page->index;
retry_write:
lock_page(page);
@@ -2456,45 +2887,71 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- ret = __write_data_page(page, &submitted, &bio,
- &last_block, wbc, io_type);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ get_page(page);
+ f2fs_compress_ctx_add_page(&cc, page);
+ continue;
+ }
+#endif
+ ret = f2fs_write_single_data_page(page, &submitted,
+ &bio, &last_block, wbc, io_type, 0);
+ if (ret == AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+result:
+#endif
+ nwritten += submitted;
+ wbc->nr_to_write -= submitted;
+
if (unlikely(ret)) {
/*
* keep nr_to_write, since vfs uses this to
* get # of written pages.
*/
if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
ret = 0;
- continue;
+ goto next;
} else if (ret == -EAGAIN) {
ret = 0;
if (wbc->sync_mode == WB_SYNC_ALL) {
cond_resched();
congestion_wait(BLK_RW_ASYNC,
- HZ/50);
+ HZ/50);
goto retry_write;
}
- continue;
+ goto next;
}
done_index = page->index + 1;
done = 1;
break;
- } else if (submitted) {
- nwritten++;
}
- if (--wbc->nr_to_write <= 0 &&
+ if (wbc->nr_to_write <= 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
+next:
+ if (need_readd)
+ goto readd;
}
pagevec_release(&pvec);
cond_resched();
}
-
- if (!cycled && !done) {
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ /* flush remained pages in compress cluster */
+ if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
+ ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
+ nwritten += submitted;
+ wbc->nr_to_write -= submitted;
+ if (ret) {
+ done = 1;
+ retry = 0;
+ }
+ }
+#endif
+ if ((!cycled && !done) || retry) {
cycled = 1;
index = 0;
end = writeback_index - 1;
@@ -2518,6 +2975,8 @@ static inline bool __should_serialize_io(struct inode *inode,
{
if (!S_ISREG(inode->i_mode))
return false;
+ if (f2fs_compressed_file(inode))
+ return true;
if (IS_NOQUOTA(inode))
return false;
/* to avoid deadlock in path of data flush */
@@ -2613,14 +3072,16 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
struct inode *inode = mapping->host;
loff_t i_size = i_size_read(inode);
+ if (IS_NOQUOTA(inode))
+ return;
+
/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
if (to > i_size && !f2fs_verity_in_progress(inode)) {
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache(inode, i_size);
- if (!IS_NOQUOTA(inode))
- f2fs_truncate_blocks(inode, i_size, true);
+ f2fs_truncate_blocks(inode, i_size, true);
up_write(&F2FS_I(inode)->i_mmap_sem);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2660,6 +3121,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
__do_map_lock(sbi, flag, true);
locked = true;
}
+
restart:
/* check inline_data */
ipage = f2fs_get_node_page(sbi, inode->i_ino);
@@ -2750,6 +3212,24 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
if (err)
goto fail;
}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ int ret;
+
+ *fsdata = NULL;
+
+ ret = f2fs_prepare_compress_overwrite(inode, pagep,
+ index, fsdata);
+ if (ret < 0) {
+ err = ret;
+ goto fail;
+ } else if (ret) {
+ return 0;
+ }
+ }
+#endif
+
repeat:
/*
* Do not use grab_cache_page_write_begin() to avoid deadlock due to
@@ -2762,6 +3242,8 @@ repeat:
goto fail;
}
+ /* TODO: cluster can be compressed due to race with .writepage */
+
*pagep = page;
err = prepare_write_begin(sbi, page, pos, len,
@@ -2845,6 +3327,16 @@ static int f2fs_write_end(struct file *file,
else
SetPageUptodate(page);
}
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ /* overwrite compressed file */
+ if (f2fs_compressed_file(inode) && fsdata) {
+ f2fs_compress_write_end(inode, fsdata, page->index, copied);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ return copied;
+ }
+#endif
+
if (!copied)
goto unlock_out;
@@ -3145,7 +3637,8 @@ int f2fs_migrate_page(struct address_space *mapping,
#ifdef CONFIG_SWAP
/* Copied from generic_swapfile_activate() to check any holes */
-static int check_swap_activate(struct file *swap_file, unsigned int max)
+static int check_swap_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *span)
{
struct address_space *mapping = swap_file->f_mapping;
struct inode *inode = mapping->host;
@@ -3156,6 +3649,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
sector_t last_block;
sector_t lowest_block = -1;
sector_t highest_block = 0;
+ int nr_extents = 0;
+ int ret;
blkbits = inode->i_blkbits;
blocks_per_page = PAGE_SIZE >> blkbits;
@@ -3167,15 +3662,20 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
probe_block = 0;
page_no = 0;
last_block = i_size_read(inode) >> blkbits;
- while ((probe_block + blocks_per_page) <= last_block && page_no < max) {
+ while ((probe_block + blocks_per_page) <= last_block &&
+ page_no < sis->max) {
unsigned block_in_page;
sector_t first_block;
+ sector_t block = 0;
+ int err = 0;
cond_resched();
- first_block = bmap(inode, probe_block);
- if (first_block == 0)
+ block = probe_block;
+ err = bmap(inode, &block);
+ if (err || !block)
goto bad_bmap;
+ first_block = block;
/*
* It must be PAGE_SIZE aligned on-disk
@@ -3187,11 +3687,13 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
for (block_in_page = 1; block_in_page < blocks_per_page;
block_in_page++) {
- sector_t block;
- block = bmap(inode, probe_block + block_in_page);
- if (block == 0)
+ block = probe_block + block_in_page;
+ err = bmap(inode, &block);
+
+ if (err || !block)
goto bad_bmap;
+
if (block != first_block + block_in_page) {
/* Discontiguity */
probe_block++;
@@ -3207,13 +3709,27 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
highest_block = first_block;
}
+ /*
+ * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
+ */
+ ret = add_swap_extent(sis, page_no, 1, first_block);
+ if (ret < 0)
+ goto out;
+ nr_extents += ret;
page_no++;
probe_block += blocks_per_page;
reprobe:
continue;
}
- return 0;
-
+ ret = nr_extents;
+ *span = 1 + highest_block - lowest_block;
+ if (page_no == 0)
+ page_no = 1; /* force Empty message */
+ sis->max = page_no;
+ sis->pages = page_no - 1;
+ sis->highest_bit = page_no - 1;
+out:
+ return ret;
bad_bmap:
pr_err("swapon: swapfile has holes\n");
return -EINVAL;
@@ -3235,14 +3751,17 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (ret)
return ret;
- ret = check_swap_activate(file, sis->max);
- if (ret)
+ if (f2fs_disable_compressed_file(inode))
+ return -EINVAL;
+
+ ret = check_swap_activate(sis, file, span);
+ if (ret < 0)
return ret;
set_inode_flag(inode, FI_PIN_FILE);
f2fs_precache_extents(inode);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- return 0;
+ return ret;
}
static void f2fs_swap_deactivate(struct file *file)
@@ -3319,6 +3838,27 @@ void f2fs_destroy_post_read_processing(void)
kmem_cache_destroy(bio_post_read_ctx_cache);
}
+int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
+{
+ if (!f2fs_sb_has_encrypt(sbi) &&
+ !f2fs_sb_has_verity(sbi) &&
+ !f2fs_sb_has_compression(sbi))
+ return 0;
+
+ sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
+ WQ_UNBOUND | WQ_HIGHPRI,
+ num_online_cpus());
+ if (!sbi->post_read_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
+{
+ if (sbi->post_read_wq)
+ destroy_workqueue(sbi->post_read_wq);
+}
+
int __init f2fs_init_bio_entry_cache(void)
{
bio_entry_slab = f2fs_kmem_cache_create("bio_entry_slab",
@@ -3328,7 +3868,7 @@ int __init f2fs_init_bio_entry_cache(void)
return 0;
}
-void __exit f2fs_destroy_bio_entry_cache(void)
+void f2fs_destroy_bio_entry_cache(void)
{
kmem_cache_destroy(bio_entry_slab);
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 9b0bedd82581..6b89eae5e4ca 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -21,9 +21,45 @@
#include "gc.h"
static LIST_HEAD(f2fs_stat_list);
-static struct dentry *f2fs_debugfs_root;
static DEFINE_MUTEX(f2fs_stat_mutex);
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *f2fs_debugfs_root;
+#endif
+
+/*
+ * This function calculates BDF of every segments
+ */
+void f2fs_update_sit_info(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
+ unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
+ unsigned long long bimodal, dist;
+ unsigned int segno, vblocks;
+ int ndirty = 0;
+
+ bimodal = 0;
+ total_vblocks = 0;
+ blks_per_sec = BLKS_PER_SEC(sbi);
+ hblks_per_sec = blks_per_sec / 2;
+ for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
+ vblocks = get_valid_blocks(sbi, segno, true);
+ dist = abs(vblocks - hblks_per_sec);
+ bimodal += dist * dist;
+
+ if (vblocks > 0 && vblocks < blks_per_sec) {
+ total_vblocks += vblocks;
+ ndirty++;
+ }
+ }
+ dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
+ si->bimodal = div64_u64(bimodal, dist);
+ if (si->dirty_count)
+ si->avg_vblocks = div_u64(total_vblocks, ndirty);
+ else
+ si->avg_vblocks = 0;
+}
+#ifdef CONFIG_DEBUG_FS
static void update_general_status(struct f2fs_sb_info *sbi)
{
struct f2fs_stat_info *si = F2FS_STAT(sbi);
@@ -56,7 +92,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->nquota_files = sbi->nquota_files;
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
- si->aw_cnt = atomic_read(&sbi->aw_cnt);
+ si->aw_cnt = sbi->atomic_files;
si->vw_cnt = atomic_read(&sbi->vw_cnt);
si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
@@ -94,6 +130,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->inline_xattr = atomic_read(&sbi->inline_xattr);
si->inline_inode = atomic_read(&sbi->inline_inode);
si->inline_dir = atomic_read(&sbi->inline_dir);
+ si->compr_inode = atomic_read(&sbi->compr_inode);
+ si->compr_blocks = atomic_read(&sbi->compr_blocks);
si->append = sbi->im[APPEND_INO].ino_num;
si->update = sbi->im[UPDATE_INO].ino_num;
si->orphans = sbi->im[ORPHAN_INO].ino_num;
@@ -114,7 +152,6 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID];
si->avail_nids = NM_I(sbi)->available_nids;
si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
- si->bg_gc = sbi->bg_gc;
si->io_skip_bggc = sbi->io_skip_bggc;
si->other_skip_bggc = sbi->other_skip_bggc;
si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
@@ -146,39 +183,6 @@ static void update_general_status(struct f2fs_sb_info *sbi)
}
/*
- * This function calculates BDF of every segments
- */
-static void update_sit_info(struct f2fs_sb_info *sbi)
-{
- struct f2fs_stat_info *si = F2FS_STAT(sbi);
- unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
- unsigned long long bimodal, dist;
- unsigned int segno, vblocks;
- int ndirty = 0;
-
- bimodal = 0;
- total_vblocks = 0;
- blks_per_sec = BLKS_PER_SEC(sbi);
- hblks_per_sec = blks_per_sec / 2;
- for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
- vblocks = get_valid_blocks(sbi, segno, true);
- dist = abs(vblocks - hblks_per_sec);
- bimodal += dist * dist;
-
- if (vblocks > 0 && vblocks < blks_per_sec) {
- total_vblocks += vblocks;
- ndirty++;
- }
- }
- dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
- si->bimodal = div64_u64(bimodal, dist);
- if (si->dirty_count)
- si->avg_vblocks = div_u64(total_vblocks, ndirty);
- else
- si->avg_vblocks = 0;
-}
-
-/*
* This function calculates memory footprint.
*/
static void update_mem_info(struct f2fs_sb_info *sbi)
@@ -315,6 +319,8 @@ static int stat_show(struct seq_file *s, void *v)
si->inline_inode);
seq_printf(s, " - Inline_dentry Inode: %u\n",
si->inline_dir);
+ seq_printf(s, " - Compressed Inode: %u, Blocks: %u\n",
+ si->compr_inode, si->compr_blocks);
seq_printf(s, " - Orphan/Append/Update Inode: %u, %u, %u\n",
si->orphans, si->append, si->update);
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
@@ -441,7 +447,7 @@ static int stat_show(struct seq_file *s, void *v)
si->block_count[LFS], si->segment_count[LFS]);
/* segment usage info */
- update_sit_info(si->sbi);
+ f2fs_update_sit_info(si->sbi);
seq_printf(s, "\nBDF: %u, avg. vblocks: %u\n",
si->bimodal, si->avg_vblocks);
@@ -461,6 +467,7 @@ static int stat_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(stat);
+#endif
int f2fs_build_stats(struct f2fs_sb_info *sbi)
{
@@ -491,11 +498,12 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->inline_xattr, 0);
atomic_set(&sbi->inline_inode, 0);
atomic_set(&sbi->inline_dir, 0);
+ atomic_set(&sbi->compr_inode, 0);
+ atomic_set(&sbi->compr_blocks, 0);
atomic_set(&sbi->inplace_count, 0);
for (i = META_CP; i < META_MAX; i++)
atomic_set(&sbi->meta_count[i], 0);
- atomic_set(&sbi->aw_cnt, 0);
atomic_set(&sbi->vw_cnt, 0);
atomic_set(&sbi->max_aw_cnt, 0);
atomic_set(&sbi->max_vw_cnt, 0);
@@ -520,14 +528,18 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
void __init f2fs_create_root_stats(void)
{
+#ifdef CONFIG_DEBUG_FS
f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root, NULL,
&stat_fops);
+#endif
}
void f2fs_destroy_root_stats(void)
{
+#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(f2fs_debugfs_root);
f2fs_debugfs_root = NULL;
+#endif
}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index c967cacf979e..27d0dd7a16d6 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -578,6 +578,20 @@ next:
goto next;
}
+bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+ struct fscrypt_name *fname)
+{
+ struct f2fs_dentry_ptr d;
+ unsigned int bit_pos;
+ int slots = GET_DENTRY_SLOTS(fname_len(fname));
+
+ make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage));
+
+ bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
+
+ return bit_pos < d.max;
+}
+
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
const struct qstr *name, f2fs_hash_t name_hash,
unsigned int bit_pos)
@@ -987,7 +1001,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
if (IS_ENCRYPTED(inode)) {
err = fscrypt_get_encryption_info(inode);
- if (err && err != -ENOKEY)
+ if (err)
goto out;
err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr);
@@ -1069,24 +1083,27 @@ static int f2fs_d_compare(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name)
{
struct qstr qstr = {.name = str, .len = len };
+ const struct dentry *parent = READ_ONCE(dentry->d_parent);
+ const struct inode *inode = READ_ONCE(parent->d_inode);
- if (!IS_CASEFOLDED(dentry->d_parent->d_inode)) {
+ if (!inode || !IS_CASEFOLDED(inode)) {
if (len != name->len)
return -1;
- return memcmp(str, name, len);
+ return memcmp(str, name->name, len);
}
- return f2fs_ci_compare(dentry->d_parent->d_inode, name, &qstr, false);
+ return f2fs_ci_compare(inode, name, &qstr, false);
}
static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str)
{
struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
const struct unicode_map *um = sbi->s_encoding;
+ const struct inode *inode = READ_ONCE(dentry->d_inode);
unsigned char *norm;
int len, ret = 0;
- if (!IS_CASEFOLDED(dentry->d_inode))
+ if (!inode || !IS_CASEFOLDED(inode))
return 0;
norm = f2fs_kmalloc(sbi, PATH_MAX, GFP_ATOMIC);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 5a888a063c7f..5355be6b6755 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -116,6 +116,8 @@ typedef u32 block_t; /*
*/
typedef u32 nid_t;
+#define COMPRESS_EXT_NUM 16
+
struct f2fs_mount_info {
unsigned int opt;
int write_io_size_bits; /* Write IO size bits */
@@ -140,6 +142,12 @@ struct f2fs_mount_info {
block_t unusable_cap; /* Amount of space allowed to be
* unusable when disabling checkpoint
*/
+
+ /* For compression */
+ unsigned char compress_algorithm; /* algorithm type */
+ unsigned compress_log_size; /* cluster log size */
+ unsigned char compress_ext_cnt; /* extension count */
+ unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
};
#define F2FS_FEATURE_ENCRYPT 0x0001
@@ -155,6 +163,7 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_VERITY 0x0400
#define F2FS_FEATURE_SB_CHKSUM 0x0800
#define F2FS_FEATURE_CASEFOLD 0x1000
+#define F2FS_FEATURE_COMPRESSION 0x2000
#define __F2FS_HAS_FEATURE(raw_super, mask) \
((raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -712,6 +721,12 @@ struct f2fs_inode_info {
int i_inline_xattr_size; /* inline xattr size */
struct timespec64 i_crtime; /* inode creation time */
struct timespec64 i_disk_time[4];/* inode disk times */
+
+ /* for file compress */
+ u64 i_compr_blocks; /* # of compressed blocks */
+ unsigned char i_compress_algorithm; /* algorithm type */
+ unsigned char i_log_cluster_size; /* log of cluster size */
+ unsigned int i_cluster_size; /* cluster size */
};
static inline void get_extent_info(struct extent_info *ext,
@@ -1018,6 +1033,7 @@ enum need_lock_type {
enum cp_reason_type {
CP_NO_NEEDED,
CP_NON_REGULAR,
+ CP_COMPRESSED,
CP_HARDLINK,
CP_SB_NEED_CP,
CP_WRONG_PINO,
@@ -1056,12 +1072,15 @@ struct f2fs_io_info {
block_t old_blkaddr; /* old block address before Cow */
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
+ struct page *compressed_page; /* compressed page */
struct list_head list; /* serialize IOs */
bool submitted; /* indicate IO submission */
int need_lock; /* indicate we need to lock cp_rwsem */
bool in_list; /* indicate fio is in io_list */
bool is_por; /* indicate IO is from recovery or not */
bool retry; /* need to reallocate block address */
+ int compr_blocks; /* # of compressed block addresses */
+ bool encrypted; /* indicate file is encrypted */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
struct bio **bio; /* bio for ipu */
@@ -1169,6 +1188,18 @@ enum fsync_mode {
FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
};
+/*
+ * this value is set in page as a private data which indicate that
+ * the page is atomically written, and it is in inmem_pages list.
+ */
+#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
+#define DUMMY_WRITTEN_PAGE ((unsigned long)-2)
+
+#define IS_ATOMIC_WRITTEN_PAGE(page) \
+ (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
+#define IS_DUMMY_WRITTEN_PAGE(page) \
+ (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
+
#ifdef CONFIG_FS_ENCRYPTION
#define DUMMY_ENCRYPTION_ENABLED(sbi) \
(unlikely(F2FS_OPTION(sbi).test_dummy_encryption))
@@ -1176,6 +1207,75 @@ enum fsync_mode {
#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
#endif
+/* For compression */
+enum compress_algorithm_type {
+ COMPRESS_LZO,
+ COMPRESS_LZ4,
+ COMPRESS_MAX,
+};
+
+#define COMPRESS_DATA_RESERVED_SIZE 4
+struct compress_data {
+ __le32 clen; /* compressed data size */
+ __le32 chksum; /* checksum of compressed data */
+ __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
+ u8 cdata[]; /* compressed data */
+};
+
+#define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
+
+#define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
+
+/* compress context */
+struct compress_ctx {
+ struct inode *inode; /* inode the context belong to */
+ pgoff_t cluster_idx; /* cluster index number */
+ unsigned int cluster_size; /* page count in cluster */
+ unsigned int log_cluster_size; /* log of cluster size */
+ struct page **rpages; /* pages store raw data in cluster */
+ unsigned int nr_rpages; /* total page number in rpages */
+ struct page **cpages; /* pages store compressed data in cluster */
+ unsigned int nr_cpages; /* total page number in cpages */
+ void *rbuf; /* virtual mapped address on rpages */
+ struct compress_data *cbuf; /* virtual mapped address on cpages */
+ size_t rlen; /* valid data length in rbuf */
+ size_t clen; /* valid data length in cbuf */
+ void *private; /* payload buffer for specified compression algorithm */
+};
+
+/* compress context for write IO path */
+struct compress_io_ctx {
+ u32 magic; /* magic number to indicate page is compressed */
+ struct inode *inode; /* inode the context belong to */
+ struct page **rpages; /* pages store raw data in cluster */
+ unsigned int nr_rpages; /* total page number in rpages */
+ refcount_t ref; /* referrence count of raw page */
+};
+
+/* decompress io context for read IO path */
+struct decompress_io_ctx {
+ u32 magic; /* magic number to indicate page is compressed */
+ struct inode *inode; /* inode the context belong to */
+ pgoff_t cluster_idx; /* cluster index number */
+ unsigned int cluster_size; /* page count in cluster */
+ unsigned int log_cluster_size; /* log of cluster size */
+ struct page **rpages; /* pages store raw data in cluster */
+ unsigned int nr_rpages; /* total page number in rpages */
+ struct page **cpages; /* pages store compressed data in cluster */
+ unsigned int nr_cpages; /* total page number in cpages */
+ struct page **tpages; /* temp pages to pad holes in cluster */
+ void *rbuf; /* virtual mapped address on rpages */
+ struct compress_data *cbuf; /* virtual mapped address on cpages */
+ size_t rlen; /* valid data length in rbuf */
+ size_t clen; /* valid data length in cbuf */
+ refcount_t ref; /* referrence count of compressed page */
+ bool failed; /* indicate IO error during decompression */
+};
+
+#define NULL_CLUSTER ((unsigned int)(~0))
+#define MIN_COMPRESS_LOG_SIZE 2
+#define MAX_COMPRESS_LOG_SIZE 8
+
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
@@ -1291,7 +1391,10 @@ struct f2fs_sb_info {
struct f2fs_mount_info mount_opt; /* mount options */
/* for cleaning operations */
- struct mutex gc_mutex; /* mutex for GC */
+ struct rw_semaphore gc_lock; /*
+ * semaphore for GC, avoid
+ * race between GC and GC or CP
+ */
struct f2fs_gc_kthread *gc_thread; /* GC thread */
unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */
@@ -1327,11 +1430,11 @@ struct f2fs_sb_info {
atomic_t inline_xattr; /* # of inline_xattr inodes */
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
- atomic_t aw_cnt; /* # of atomic writes */
+ atomic_t compr_inode; /* # of compressed inodes */
+ atomic_t compr_blocks; /* # of compressed blocks */
atomic_t vw_cnt; /* # of volatile writes */
atomic_t max_aw_cnt; /* max # of atomic writes */
atomic_t max_vw_cnt; /* max # of volatile writes */
- int bg_gc; /* background gc calls */
unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
unsigned int other_skip_bggc; /* skip background gc for other reasons */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
@@ -1365,6 +1468,8 @@ struct f2fs_sb_info {
/* Precomputed FS UUID checksum for seeding other checksums */
__u32 s_chksum_seed;
+
+ struct workqueue_struct *post_read_wq; /* post read workqueue */
};
struct f2fs_private_dio {
@@ -2222,26 +2327,6 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
return entry;
}
-static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
- int npages, bool no_fail)
-{
- struct bio *bio;
-
- if (no_fail) {
- /* No failure on bio allocation */
- bio = bio_alloc(GFP_NOIO, npages);
- if (!bio)
- bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
- return bio;
- }
- if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
- f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
- return NULL;
- }
-
- return bio_alloc(GFP_KERNEL, npages);
-}
-
static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{
if (sbi->gc_mode == GC_URGENT)
@@ -2378,11 +2463,13 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
/*
* On-disk inode flags (f2fs_inode::i_flags)
*/
+#define F2FS_COMPR_FL 0x00000004 /* Compress file */
#define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
#define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
#define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
#define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
#define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
+#define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
#define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
#define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
#define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
@@ -2391,7 +2478,7 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
/* Flags that should be inherited by new inodes from their parent. */
#define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
- F2FS_CASEFOLD_FL)
+ F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
/* Flags that are appropriate for regular files (all but dir-specific ones). */
#define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
@@ -2443,6 +2530,8 @@ enum {
FI_PIN_FILE, /* indicate file should not be gced */
FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
+ FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
+ FI_MMAP_FILE, /* indicate file was mmapped */
};
static inline void __mark_inode_dirty_flag(struct inode *inode,
@@ -2459,6 +2548,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
case FI_PIN_FILE:
+ case FI_COMPRESSED_FILE:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -2614,16 +2704,27 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
return is_inode_flag_set(inode, FI_INLINE_XATTR);
}
+static inline int f2fs_compressed_file(struct inode *inode)
+{
+ return S_ISREG(inode->i_mode) &&
+ is_inode_flag_set(inode, FI_COMPRESSED_FILE);
+}
+
static inline unsigned int addrs_per_inode(struct inode *inode)
{
unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
get_inline_xattr_addrs(inode);
- return ALIGN_DOWN(addrs, 1);
+
+ if (!f2fs_compressed_file(inode))
+ return addrs;
+ return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
}
static inline unsigned int addrs_per_block(struct inode *inode)
{
- return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, 1);
+ if (!f2fs_compressed_file(inode))
+ return DEF_ADDRS_PER_BLOCK;
+ return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
}
static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
@@ -2656,6 +2757,11 @@ static inline int f2fs_has_inline_dots(struct inode *inode)
return is_inode_flag_set(inode, FI_INLINE_DOTS);
}
+static inline int f2fs_is_mmap_file(struct inode *inode)
+{
+ return is_inode_flag_set(inode, FI_MMAP_FILE);
+}
+
static inline bool f2fs_is_pinned_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_PIN_FILE);
@@ -2783,7 +2889,8 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (!test_opt(sbi, EXTENT_CACHE) ||
- is_inode_flag_set(inode, FI_NO_EXTENT))
+ is_inode_flag_set(inode, FI_NO_EXTENT) ||
+ is_inode_flag_set(inode, FI_COMPRESSED_FILE))
return false;
/*
@@ -2903,7 +3010,8 @@ static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
static inline bool __is_valid_data_blkaddr(block_t blkaddr)
{
- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
+ blkaddr == COMPRESS_ADDR)
return false;
return true;
}
@@ -3001,6 +3109,8 @@ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
struct page **page);
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
struct page *page, struct inode *inode);
+bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
+ struct fscrypt_name *fname);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
const struct qstr *name, f2fs_hash_t name_hash,
unsigned int bit_pos);
@@ -3155,6 +3265,8 @@ void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
unsigned int val, int alloc);
void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
+int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init f2fs_create_segment_manager_caches(void);
@@ -3205,10 +3317,13 @@ void f2fs_destroy_checkpoint_caches(void);
/*
* data.c
*/
-int f2fs_init_post_read_processing(void);
-void f2fs_destroy_post_read_processing(void);
+int __init f2fs_init_bioset(void);
+void f2fs_destroy_bioset(void);
+struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool no_fail);
int f2fs_init_bio_entry_cache(void);
void f2fs_destroy_bio_entry_cache(void);
+void f2fs_submit_bio(struct f2fs_sb_info *sbi,
+ struct bio *bio, enum page_type type);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
@@ -3229,6 +3344,9 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn);
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
+int f2fs_mpage_readpages(struct address_space *mapping,
+ struct list_head *pages, struct page *page,
+ unsigned nr_pages, bool is_readahead);
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
int op_flags, bool for_write);
struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
@@ -3242,8 +3360,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
+int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
+int f2fs_write_single_data_page(struct page *page, int *submitted,
+ struct bio **bio, sector_t *last_block,
+ struct writeback_control *wbc,
+ enum iostat_type io_type,
+ int compr_blocks);
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length);
int f2fs_release_page(struct page *page, gfp_t wait);
@@ -3253,6 +3377,10 @@ int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
#endif
bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
void f2fs_clear_page_cache_dirty_tag(struct page *page);
+int f2fs_init_post_read_processing(void);
+void f2fs_destroy_post_read_processing(void);
+int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
+void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
/*
* gc.c
@@ -3299,6 +3427,7 @@ struct f2fs_stat_info {
int nr_discard_cmd;
unsigned int undiscard_blks;
int inline_xattr, inline_inode, inline_dir, append, update, orphans;
+ int compr_inode, compr_blocks;
int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
@@ -3330,7 +3459,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_cp_count(si) ((si)->cp_count++)
#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
#define stat_inc_call_count(si) ((si)->call_count++)
-#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
+#define stat_inc_bggc_count(si) ((si)->bg_gc++)
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
@@ -3369,6 +3498,20 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
if (f2fs_has_inline_dentry(inode)) \
(atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
} while (0)
+#define stat_inc_compr_inode(inode) \
+ do { \
+ if (f2fs_compressed_file(inode)) \
+ (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
+ } while (0)
+#define stat_dec_compr_inode(inode) \
+ do { \
+ if (f2fs_compressed_file(inode)) \
+ (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
+ } while (0)
+#define stat_add_compr_blocks(inode, blocks) \
+ (atomic_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
+#define stat_sub_compr_blocks(inode, blocks) \
+ (atomic_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
#define stat_inc_meta_count(sbi, blkaddr) \
do { \
if (blkaddr < SIT_I(sbi)->sit_base_addr) \
@@ -3386,13 +3529,9 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
((sbi)->block_count[(curseg)->alloc_type]++)
#define stat_inc_inplace_blocks(sbi) \
(atomic_inc(&(sbi)->inplace_count))
-#define stat_inc_atomic_write(inode) \
- (atomic_inc(&F2FS_I_SB(inode)->aw_cnt))
-#define stat_dec_atomic_write(inode) \
- (atomic_dec(&F2FS_I_SB(inode)->aw_cnt))
#define stat_update_max_atomic_write(inode) \
do { \
- int cur = atomic_read(&F2FS_I_SB(inode)->aw_cnt); \
+ int cur = F2FS_I_SB(inode)->atomic_files; \
int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
if (cur > max) \
atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
@@ -3444,6 +3583,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi);
void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
void __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
+void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#else
#define stat_inc_cp_count(si) do { } while (0)
#define stat_inc_bg_cp_count(si) do { } while (0)
@@ -3453,8 +3593,8 @@ void f2fs_destroy_root_stats(void);
#define stat_other_skip_bggc_count(sbi) do { } while (0)
#define stat_inc_dirty_inode(sbi, type) do { } while (0)
#define stat_dec_dirty_inode(sbi, type) do { } while (0)
-#define stat_inc_total_hit(sb) do { } while (0)
-#define stat_inc_rbtree_node_hit(sb) do { } while (0)
+#define stat_inc_total_hit(sbi) do { } while (0)
+#define stat_inc_rbtree_node_hit(sbi) do { } while (0)
#define stat_inc_largest_node_hit(sbi) do { } while (0)
#define stat_inc_cached_node_hit(sbi) do { } while (0)
#define stat_inc_inline_xattr(inode) do { } while (0)
@@ -3463,6 +3603,10 @@ void f2fs_destroy_root_stats(void);
#define stat_dec_inline_inode(inode) do { } while (0)
#define stat_inc_inline_dir(inode) do { } while (0)
#define stat_dec_inline_dir(inode) do { } while (0)
+#define stat_inc_compr_inode(inode) do { } while (0)
+#define stat_dec_compr_inode(inode) do { } while (0)
+#define stat_add_compr_blocks(inode, blocks) do { } while (0)
+#define stat_sub_compr_blocks(inode, blocks) do { } while (0)
#define stat_inc_atomic_write(inode) do { } while (0)
#define stat_dec_atomic_write(inode) do { } while (0)
#define stat_update_max_atomic_write(inode) do { } while (0)
@@ -3482,6 +3626,7 @@ static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
static inline void __init f2fs_create_root_stats(void) { }
static inline void f2fs_destroy_root_stats(void) { }
+static inline void update_sit_info(struct f2fs_sb_info *sbi) {}
#endif
extern const struct file_operations f2fs_dir_operations;
@@ -3510,6 +3655,7 @@ void f2fs_truncate_inline_inode(struct inode *inode,
int f2fs_read_inline_data(struct inode *inode, struct page *page);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode);
+int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
int f2fs_write_inline_data(struct inode *inode, struct page *page);
bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
@@ -3602,7 +3748,85 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
*/
static inline bool f2fs_post_read_required(struct inode *inode)
{
- return f2fs_encrypted_file(inode) || fsverity_active(inode);
+ return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
+ f2fs_compressed_file(inode);
+}
+
+/*
+ * compress.c
+ */
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+bool f2fs_is_compressed_page(struct page *page);
+struct page *f2fs_compress_control_page(struct page *page);
+int f2fs_prepare_compress_overwrite(struct inode *inode,
+ struct page **pagep, pgoff_t index, void **fsdata);
+bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+ pgoff_t index, unsigned copied);
+void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
+bool f2fs_is_compress_backend_ready(struct inode *inode);
+void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
+bool f2fs_cluster_is_empty(struct compress_ctx *cc);
+bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
+void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
+int f2fs_write_multi_pages(struct compress_ctx *cc,
+ int *submitted,
+ struct writeback_control *wbc,
+ enum iostat_type io_type);
+int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
+int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
+ unsigned nr_pages, sector_t *last_block_in_bio,
+ bool is_readahead);
+struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
+void f2fs_free_dic(struct decompress_io_ctx *dic);
+void f2fs_decompress_end_io(struct page **rpages,
+ unsigned int cluster_size, bool err, bool verity);
+int f2fs_init_compress_ctx(struct compress_ctx *cc);
+void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
+void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+#else
+static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
+static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
+{
+ if (!f2fs_compressed_file(inode))
+ return true;
+ /* not support compression */
+ return false;
+}
+static inline struct page *f2fs_compress_control_page(struct page *page)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static inline void set_compress_context(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ F2FS_I(inode)->i_compress_algorithm =
+ F2FS_OPTION(sbi).compress_algorithm;
+ F2FS_I(inode)->i_log_cluster_size =
+ F2FS_OPTION(sbi).compress_log_size;
+ F2FS_I(inode)->i_cluster_size =
+ 1 << F2FS_I(inode)->i_log_cluster_size;
+ F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
+ set_inode_flag(inode, FI_COMPRESSED_FILE);
+ stat_inc_compr_inode(inode);
+}
+
+static inline u64 f2fs_disable_compressed_file(struct inode *inode)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
+ if (!f2fs_compressed_file(inode))
+ return 0;
+ if (fi->i_compr_blocks)
+ return fi->i_compr_blocks;
+
+ fi->i_flags &= ~F2FS_COMPR_FL;
+ clear_inode_flag(inode, FI_COMPRESSED_FILE);
+ stat_dec_compr_inode(inode);
+ return 0;
}
#define F2FS_FEATURE_FUNCS(name, flagname) \
@@ -3623,6 +3847,7 @@ F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
F2FS_FEATURE_FUNCS(verity, VERITY);
F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
+F2FS_FEATURE_FUNCS(compression, COMPRESSION);
#ifdef CONFIG_BLK_DEV_ZONED
static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
@@ -3704,6 +3929,30 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
#endif
}
+static inline bool f2fs_may_compress(struct inode *inode)
+{
+ if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
+ f2fs_is_atomic_file(inode) ||
+ f2fs_is_volatile_file(inode))
+ return false;
+ return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
+}
+
+static inline void f2fs_i_compr_blocks_update(struct inode *inode,
+ u64 blocks, bool add)
+{
+ int diff = F2FS_I(inode)->i_cluster_size - blocks;
+
+ if (add) {
+ F2FS_I(inode)->i_compr_blocks += diff;
+ stat_add_compr_blocks(inode, diff);
+ } else {
+ F2FS_I(inode)->i_compr_blocks -= diff;
+ stat_sub_compr_blocks(inode, diff);
+ }
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
static inline int block_unaligned_IO(struct inode *inode,
struct kiocb *iocb, struct iov_iter *iter)
{
@@ -3735,6 +3984,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return true;
if (f2fs_is_multi_device(sbi))
return true;
+ if (f2fs_compressed_file(inode))
+ return true;
/*
* for blkzoned device, fallback direct IO to buffered IO, so
* all IOs can be serialized by log-structured write.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 85af112e868d..0d4da644df3b 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -50,8 +50,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dnode_of_data dn = { .node_changed = false };
- int err;
+ struct dnode_of_data dn;
+ bool need_alloc = true;
+ int err = 0;
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
@@ -63,6 +64,26 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
goto err;
}
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode)) {
+ int ret = f2fs_is_compressed_cluster(inode, page->index);
+
+ if (ret < 0) {
+ err = ret;
+ goto err;
+ } else if (ret) {
+ if (ret < F2FS_I(inode)->i_cluster_size) {
+ err = -EAGAIN;
+ goto err;
+ }
+ need_alloc = false;
+ }
+ }
+#endif
+ /* should do out of any locked page */
+ if (need_alloc)
+ f2fs_balance_fs(sbi, true);
+
sb_start_pagefault(inode->i_sb);
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@@ -78,15 +99,17 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
goto out_sem;
}
- /* block allocation */
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_get_block(&dn, page->index);
- f2fs_put_dnode(&dn);
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
- if (err) {
- unlock_page(page);
- goto out_sem;
+ if (need_alloc) {
+ /* block allocation */
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_block(&dn, page->index);
+ f2fs_put_dnode(&dn);
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ if (err) {
+ unlock_page(page);
+ goto out_sem;
+ }
}
/* fill the page */
@@ -120,8 +143,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
out_sem:
up_read(&F2FS_I(inode)->i_mmap_sem);
- f2fs_balance_fs(sbi, dn.node_changed);
-
sb_end_pagefault(inode->i_sb);
err:
return block_page_mkwrite_return(err);
@@ -155,6 +176,8 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
if (!S_ISREG(inode->i_mode))
cp_reason = CP_NON_REGULAR;
+ else if (f2fs_compressed_file(inode))
+ cp_reason = CP_COMPRESSED;
else if (inode->i_nlink != 1)
cp_reason = CP_HARDLINK;
else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
@@ -485,6 +508,9 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
+ if (!f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
+
/* we don't need to use inline_data strictly */
err = f2fs_convert_inline_inode(inode);
if (err)
@@ -492,6 +518,7 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
+ set_inode_flag(inode, FI_MMAP_FILE);
return 0;
}
@@ -502,6 +529,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
if (err)
return err;
+ if (!f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
+
err = fsverity_file_open(inode, filp);
if (err)
return err;
@@ -518,6 +548,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
int nr_free = 0, ofs = dn->ofs_in_node, len = count;
__le32 *addr;
int base = 0;
+ bool compressed_cluster = false;
+ int cluster_index = 0, valid_blocks = 0;
+ int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
base = get_extra_isize(dn->inode);
@@ -525,26 +558,43 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
raw_node = F2FS_NODE(dn->node_page);
addr = blkaddr_in_node(raw_node) + base + ofs;
- for (; count > 0; count--, addr++, dn->ofs_in_node++) {
+ /* Assumption: truncateion starts with cluster */
+ for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
block_t blkaddr = le32_to_cpu(*addr);
+ if (f2fs_compressed_file(dn->inode) &&
+ !(cluster_index & (cluster_size - 1))) {
+ if (compressed_cluster)
+ f2fs_i_compr_blocks_update(dn->inode,
+ valid_blocks, false);
+ compressed_cluster = (blkaddr == COMPRESS_ADDR);
+ valid_blocks = 0;
+ }
+
if (blkaddr == NULL_ADDR)
continue;
dn->data_blkaddr = NULL_ADDR;
f2fs_set_data_blkaddr(dn);
- if (__is_valid_data_blkaddr(blkaddr) &&
- !f2fs_is_valid_blkaddr(sbi, blkaddr,
+ if (__is_valid_data_blkaddr(blkaddr)) {
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
DATA_GENERIC_ENHANCE))
- continue;
+ continue;
+ if (compressed_cluster)
+ valid_blocks++;
+ }
- f2fs_invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
+
+ f2fs_invalidate_blocks(sbi, blkaddr);
nr_free++;
}
+ if (compressed_cluster)
+ f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
+
if (nr_free) {
pgoff_t fofs;
/*
@@ -587,6 +637,9 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0;
}
+ if (f2fs_compressed_file(inode))
+ return 0;
+
page = f2fs_get_lock_data_page(inode, index, true);
if (IS_ERR(page))
return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
@@ -602,7 +655,7 @@ truncate_out:
return 0;
}
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
+static int do_truncate_blocks(struct inode *inode, u64 from, bool lock)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -667,6 +720,28 @@ free_partial:
return err;
}
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
+{
+ u64 free_from = from;
+
+ /*
+ * for compressed file, only support cluster size
+ * aligned truncation.
+ */
+ if (f2fs_compressed_file(inode)) {
+ size_t cluster_shift = PAGE_SHIFT +
+ F2FS_I(inode)->i_log_cluster_size;
+ size_t cluster_mask = (1 << cluster_shift) - 1;
+
+ free_from = from >> cluster_shift;
+ if (from & cluster_mask)
+ free_from++;
+ free_from <<= cluster_shift;
+ }
+
+ return do_truncate_blocks(inode, free_from, lock);
+}
+
int f2fs_truncate(struct inode *inode)
{
int err;
@@ -754,18 +829,12 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (ia_valid & ATTR_ATIME) {
- inode->i_atime = timestamp_truncate(attr->ia_atime,
- inode);
- }
- if (ia_valid & ATTR_MTIME) {
- inode->i_mtime = timestamp_truncate(attr->ia_mtime,
- inode);
- }
- if (ia_valid & ATTR_CTIME) {
- inode->i_ctime = timestamp_truncate(attr->ia_ctime,
- inode);
- }
+ if (ia_valid & ATTR_ATIME)
+ inode->i_atime = attr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+ inode->i_mtime = attr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+ inode->i_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
@@ -786,6 +855,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
return -EIO;
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ !f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
+
err = setattr_prepare(dentry, attr);
if (err)
return err;
@@ -1026,8 +1099,8 @@ next_dnode:
} else if (ret == -ENOENT) {
if (dn.max_level == 0)
return -ENOENT;
- done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node,
- len);
+ done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
+ dn.ofs_in_node, len);
blkaddr += done;
do_replace += done;
goto next;
@@ -1190,13 +1263,13 @@ static int __exchange_data_block(struct inode *src_inode,
src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
array_size(olen, sizeof(block_t)),
- GFP_KERNEL);
+ GFP_NOFS);
if (!src_blkaddr)
return -ENOMEM;
do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
array_size(olen, sizeof(int)),
- GFP_KERNEL);
+ GFP_NOFS);
if (!do_replace) {
kvfree(src_blkaddr);
return -ENOMEM;
@@ -1563,7 +1636,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
next_alloc:
if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
err = f2fs_gc(sbi, true, false, NULL_SEGNO);
if (err && err != -ENODATA && err != -EAGAIN)
goto out_err;
@@ -1621,6 +1694,8 @@ static long f2fs_fallocate(struct file *file, int mode,
return -EIO;
if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
return -ENOSPC;
+ if (!f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
/* f2fs only support ->fallocate for regular file */
if (!S_ISREG(inode->i_mode))
@@ -1630,6 +1705,11 @@ static long f2fs_fallocate(struct file *file, int mode,
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
+ if (f2fs_compressed_file(inode) &&
+ (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+ FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
+ return -EOPNOTSUPP;
+
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
FALLOC_FL_INSERT_RANGE))
@@ -1719,7 +1799,40 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
return -ENOTEMPTY;
}
+ if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return -EOPNOTSUPP;
+ if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
+ return -EINVAL;
+ }
+
+ if ((iflags ^ fi->i_flags) & F2FS_COMPR_FL) {
+ if (S_ISREG(inode->i_mode) &&
+ (fi->i_flags & F2FS_COMPR_FL || i_size_read(inode) ||
+ F2FS_HAS_BLOCKS(inode)))
+ return -EINVAL;
+ if (iflags & F2FS_NOCOMP_FL)
+ return -EINVAL;
+ if (iflags & F2FS_COMPR_FL) {
+ int err = f2fs_convert_inline_inode(inode);
+
+ if (err)
+ return err;
+
+ if (!f2fs_may_compress(inode))
+ return -EINVAL;
+
+ set_compress_context(inode);
+ }
+ }
+ if ((iflags ^ fi->i_flags) & F2FS_NOCOMP_FL) {
+ if (fi->i_flags & F2FS_COMPR_FL)
+ return -EINVAL;
+ }
+
fi->i_flags = iflags | (fi->i_flags & ~mask);
+ f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
+ (fi->i_flags & F2FS_NOCOMP_FL));
if (fi->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1745,11 +1858,13 @@ static const struct {
u32 iflag;
u32 fsflag;
} f2fs_fsflags_map[] = {
+ { F2FS_COMPR_FL, FS_COMPR_FL },
{ F2FS_SYNC_FL, FS_SYNC_FL },
{ F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
{ F2FS_APPEND_FL, FS_APPEND_FL },
{ F2FS_NODUMP_FL, FS_NODUMP_FL },
{ F2FS_NOATIME_FL, FS_NOATIME_FL },
+ { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
{ F2FS_INDEX_FL, FS_INDEX_FL },
{ F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
{ F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
@@ -1757,11 +1872,13 @@ static const struct {
};
#define F2FS_GETTABLE_FS_FL ( \
+ FS_COMPR_FL | \
FS_SYNC_FL | \
FS_IMMUTABLE_FL | \
FS_APPEND_FL | \
FS_NODUMP_FL | \
FS_NOATIME_FL | \
+ FS_NOCOMP_FL | \
FS_INDEX_FL | \
FS_DIRSYNC_FL | \
FS_PROJINHERIT_FL | \
@@ -1772,11 +1889,13 @@ static const struct {
FS_CASEFOLD_FL)
#define F2FS_SETTABLE_FS_FL ( \
+ FS_COMPR_FL | \
FS_SYNC_FL | \
FS_IMMUTABLE_FL | \
FS_APPEND_FL | \
FS_NODUMP_FL | \
FS_NOATIME_FL | \
+ FS_NOCOMP_FL | \
FS_DIRSYNC_FL | \
FS_PROJINHERIT_FL | \
FS_CASEFOLD_FL)
@@ -1897,6 +2016,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
inode_lock(inode);
+ f2fs_disable_compressed_file(inode);
+
if (f2fs_is_atomic_file(inode)) {
if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
ret = -EINVAL;
@@ -1935,7 +2056,6 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
F2FS_I(inode)->inmem_task = current;
- stat_inc_atomic_write(inode);
stat_update_max_atomic_write(inode);
out:
inode_unlock(inode);
@@ -2324,12 +2444,12 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
return ret;
if (!sync) {
- if (!mutex_trylock(&sbi->gc_mutex)) {
+ if (!down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
} else {
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
}
ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
@@ -2367,12 +2487,12 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
do_more:
if (!range.sync) {
- if (!mutex_trylock(&sbi->gc_mutex)) {
+ if (!down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
} else {
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
}
ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
@@ -2803,7 +2923,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
end_segno = min(start_segno + range.segments, dev_end_segno);
while (start_segno < end_segno) {
- if (!mutex_trylock(&sbi->gc_mutex)) {
+ if (!down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY;
goto out;
}
@@ -3098,10 +3218,16 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
ret = -EAGAIN;
goto out;
}
+
ret = f2fs_convert_inline_inode(inode);
if (ret)
goto out;
+ if (f2fs_disable_compressed_file(inode)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
set_inode_flag(inode, FI_PIN_FILE);
ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
done:
@@ -3350,6 +3476,17 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
+static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+
+ if (!f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
+
+ return generic_file_read_iter(iocb, iter);
+}
+
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
@@ -3361,6 +3498,9 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
+ if (!f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
+
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode)) {
ret = -EAGAIN;
@@ -3389,18 +3529,41 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = -EAGAIN;
goto out;
}
- } else {
- preallocated = true;
- target_size = iocb->ki_pos + iov_iter_count(from);
+ goto write;
+ }
- err = f2fs_preallocate_blocks(iocb, from);
- if (err) {
- clear_inode_flag(inode, FI_NO_PREALLOC);
- inode_unlock(inode);
- ret = err;
- goto out;
- }
+ if (is_inode_flag_set(inode, FI_NO_PREALLOC))
+ goto write;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ /*
+ * Convert inline data for Direct I/O before entering
+ * f2fs_direct_IO().
+ */
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ goto out_err;
+ /*
+ * If force_buffere_io() is true, we have to allocate
+ * blocks all the time, since f2fs_direct_IO will fall
+ * back to buffered IO.
+ */
+ if (!f2fs_force_buffered_io(inode, iocb, from) &&
+ allow_outplace_dio(inode, iocb, from))
+ goto write;
+ }
+ preallocated = true;
+ target_size = iocb->ki_pos + iov_iter_count(from);
+
+ err = f2fs_preallocate_blocks(iocb, from);
+ if (err) {
+out_err:
+ clear_inode_flag(inode, FI_NO_PREALLOC);
+ inode_unlock(inode);
+ ret = err;
+ goto out;
}
+write:
ret = __generic_file_write_iter(iocb, from);
clear_inode_flag(inode, FI_NO_PREALLOC);
@@ -3475,7 +3638,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
const struct file_operations f2fs_file_operations = {
.llseek = f2fs_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = f2fs_file_read_iter,
.write_iter = f2fs_file_write_iter,
.open = f2fs_file_open,
.release = f2fs_release_file,
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index b3d399623290..db8725d473b5 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -78,18 +78,18 @@ static int gc_thread_func(void *data)
*/
if (sbi->gc_mode == GC_URGENT) {
wait_ms = gc_th->urgent_sleep_time;
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
goto do_gc;
}
- if (!mutex_trylock(&sbi->gc_mutex)) {
+ if (!down_write_trylock(&sbi->gc_lock)) {
stat_other_skip_bggc_count(sbi);
goto next;
}
if (!is_idle(sbi, GC_TIME)) {
increase_sleep_time(gc_th, &wait_ms);
- mutex_unlock(&sbi->gc_mutex);
+ up_write(&sbi->gc_lock);
stat_io_skip_bggc_count(sbi);
goto next;
}
@@ -99,7 +99,7 @@ static int gc_thread_func(void *data)
else
increase_sleep_time(gc_th, &wait_ms);
do_gc:
- stat_inc_bggc_count(sbi);
+ stat_inc_bggc_count(sbi->stat_info);
/* if return value is not zero, no victim was selected */
if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
@@ -1049,8 +1049,10 @@ next_step:
if (phase == 3) {
inode = f2fs_iget(sb, dni.ino);
- if (IS_ERR(inode) || is_bad_inode(inode))
+ if (IS_ERR(inode) || is_bad_inode(inode)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
continue;
+ }
if (!down_write_trylock(
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
@@ -1368,7 +1370,7 @@ stop:
reserved_segments(sbi),
prefree_segments(sbi));
- mutex_unlock(&sbi->gc_mutex);
+ up_write(&sbi->gc_lock);
put_gc_inode(&gc_list);
@@ -1407,9 +1409,9 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
do_garbage_collect(sbi, segno, &gc_list, FG_GC);
- mutex_unlock(&sbi->gc_mutex);
+ up_write(&sbi->gc_lock);
put_gc_inode(&gc_list);
if (get_valid_blocks(sbi, segno, true))
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 896db0416f0e..4167e5408151 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -368,7 +368,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
struct f2fs_dentry_ptr src, dst;
int err;
- page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
+ page = f2fs_grab_cache_page(dir->i_mapping, 0, true);
if (!page) {
f2fs_put_page(ipage, 1);
return -ENOMEM;
@@ -530,7 +530,7 @@ recover:
return err;
}
-static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+static int do_convert_inline_dir(struct inode *dir, struct page *ipage,
void *inline_dentry)
{
if (!F2FS_I(dir)->i_dir_level)
@@ -539,6 +539,44 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
}
+int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+ struct page *ipage;
+ struct fscrypt_name fname;
+ void *inline_dentry = NULL;
+ int err = 0;
+
+ if (!f2fs_has_inline_dentry(dir))
+ return 0;
+
+ f2fs_lock_op(sbi);
+
+ err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
+ if (err)
+ goto out;
+
+ ipage = f2fs_get_node_page(sbi, dir->i_ino);
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto out;
+ }
+
+ if (f2fs_has_enough_room(dir, ipage, &fname)) {
+ f2fs_put_page(ipage, 1);
+ goto out;
+ }
+
+ inline_dentry = inline_data_addr(dir, ipage);
+
+ err = do_convert_inline_dir(dir, ipage, inline_dentry);
+ if (!err)
+ f2fs_put_page(ipage, 1);
+out:
+ f2fs_unlock_op(sbi);
+ return err;
+}
+
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode)
@@ -562,7 +600,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
if (bit_pos >= d.max) {
- err = f2fs_convert_inline_dir(dir, ipage, inline_dentry);
+ err = do_convert_inline_dir(dir, ipage, inline_dentry);
if (err)
return err;
err = -EAGAIN;
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 502bd491336a..78c3f1d70f1d 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -200,6 +200,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_inode *ri = F2FS_INODE(node_page);
unsigned long long iblocks;
iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
@@ -286,6 +287,19 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
+ if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
+ fi->i_flags & F2FS_COMPR_FL &&
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+ i_log_cluster_size)) {
+ if (ri->i_compress_algorithm >= COMPRESS_MAX)
+ return false;
+ if (le64_to_cpu(ri->i_compr_blocks) > inode->i_blocks)
+ return false;
+ if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+ ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE)
+ return false;
+ }
+
return true;
}
@@ -407,6 +421,18 @@ static int do_read_inode(struct inode *inode)
fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
}
+ if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
+ (fi->i_flags & F2FS_COMPR_FL)) {
+ if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
+ i_log_cluster_size)) {
+ fi->i_compr_blocks = le64_to_cpu(ri->i_compr_blocks);
+ fi->i_compress_algorithm = ri->i_compress_algorithm;
+ fi->i_log_cluster_size = ri->i_log_cluster_size;
+ fi->i_cluster_size = 1 << fi->i_log_cluster_size;
+ set_inode_flag(inode, FI_COMPRESSED_FILE);
+ }
+ }
+
F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
@@ -416,6 +442,8 @@ static int do_read_inode(struct inode *inode)
stat_inc_inline_xattr(inode);
stat_inc_inline_inode(inode);
stat_inc_inline_dir(inode);
+ stat_inc_compr_inode(inode);
+ stat_add_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
return 0;
}
@@ -569,6 +597,17 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
ri->i_crtime_nsec =
cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
}
+
+ if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
+ F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
+ i_log_cluster_size)) {
+ ri->i_compr_blocks =
+ cpu_to_le64(F2FS_I(inode)->i_compr_blocks);
+ ri->i_compress_algorithm =
+ F2FS_I(inode)->i_compress_algorithm;
+ ri->i_log_cluster_size =
+ F2FS_I(inode)->i_log_cluster_size;
+ }
}
__set_inode_rdev(inode, ri);
@@ -711,6 +750,8 @@ no_delete:
stat_dec_inline_xattr(inode);
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
+ stat_dec_compr_inode(inode);
+ stat_sub_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
if (likely(!f2fs_cp_error(sbi) &&
!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index a1c507b0b4ac..2aa035422c0f 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -119,6 +119,13 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
+ if (f2fs_sb_has_compression(sbi)) {
+ /* Inherit the compression flag in directory */
+ if ((F2FS_I(dir)->i_flags & F2FS_COMPR_FL) &&
+ f2fs_may_compress(inode))
+ set_compress_context(inode);
+ }
+
f2fs_set_inode_flags(inode);
trace_f2fs_new_inode(inode, 0);
@@ -149,6 +156,9 @@ static inline int is_extension_exist(const unsigned char *s, const char *sub)
size_t sublen = strlen(sub);
int i;
+ if (sublen == 1 && *sub == '*')
+ return 1;
+
/*
* filename format of multimedia file should be defined as:
* "filename + '.' + extension + (optional: '.' + temp extension)".
@@ -262,6 +272,45 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
return 0;
}
+static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
+ const unsigned char *name)
+{
+ __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
+ unsigned char (*ext)[F2FS_EXTENSION_LEN];
+ unsigned int ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+ int i, cold_count, hot_count;
+
+ if (!f2fs_sb_has_compression(sbi) ||
+ is_inode_flag_set(inode, FI_COMPRESSED_FILE) ||
+ F2FS_I(inode)->i_flags & F2FS_NOCOMP_FL ||
+ !f2fs_may_compress(inode))
+ return;
+
+ down_read(&sbi->sb_lock);
+
+ cold_count = le32_to_cpu(sbi->raw_super->extension_count);
+ hot_count = sbi->raw_super->hot_ext_count;
+
+ for (i = cold_count; i < cold_count + hot_count; i++) {
+ if (is_extension_exist(name, extlist[i])) {
+ up_read(&sbi->sb_lock);
+ return;
+ }
+ }
+
+ up_read(&sbi->sb_lock);
+
+ ext = F2FS_OPTION(sbi).extensions;
+
+ for (i = 0; i < ext_cnt; i++) {
+ if (!is_extension_exist(name, ext[i]))
+ continue;
+
+ set_compress_context(inode);
+ return;
+ }
+}
+
static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
@@ -286,6 +335,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
set_file_temperature(sbi, inode, dentry->d_name.name);
+ set_compress_inode(sbi, inode, dentry->d_name.name);
+
inode->i_op = &f2fs_file_inode_operations;
inode->i_fop = &f2fs_file_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
@@ -797,6 +848,7 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
if (whiteout) {
f2fs_i_links_write(inode, false);
+ inode->i_state |= I_LINKABLE;
*whiteout = inode;
} else {
d_tmpfile(dentry, inode);
@@ -849,12 +901,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
struct inode *whiteout = NULL;
- struct page *old_dir_page;
+ struct page *old_dir_page = NULL;
struct page *old_page, *new_page = NULL;
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
- bool is_old_inline = f2fs_has_inline_dentry(old_dir);
int err;
if (unlikely(f2fs_cp_error(sbi)))
@@ -867,6 +918,26 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
F2FS_I(old_dentry->d_inode)->i_projid)))
return -EXDEV;
+ /*
+ * If new_inode is null, the below renaming flow will
+ * add a link in old_dir which can conver inline_dir.
+ * After then, if we failed to get the entry due to other
+ * reasons like ENOMEM, we had to remove the new entry.
+ * Instead of adding such the error handling routine, let's
+ * simply convert first here.
+ */
+ if (old_dir == new_dir && !new_inode) {
+ err = f2fs_try_convert_inline_dir(old_dir, new_dentry);
+ if (err)
+ return err;
+ }
+
+ if (flags & RENAME_WHITEOUT) {
+ err = f2fs_create_whiteout(old_dir, &whiteout);
+ if (err)
+ return err;
+ }
+
err = dquot_initialize(old_dir);
if (err)
goto out;
@@ -898,17 +969,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
- if (flags & RENAME_WHITEOUT) {
- err = f2fs_create_whiteout(old_dir, &whiteout);
- if (err)
- goto out_dir;
- }
-
if (new_inode) {
err = -ENOTEMPTY;
if (old_dir_entry && !f2fs_empty_dir(new_inode))
- goto out_whiteout;
+ goto out_dir;
err = -ENOENT;
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
@@ -916,7 +981,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!new_entry) {
if (IS_ERR(new_page))
err = PTR_ERR(new_page);
- goto out_whiteout;
+ goto out_dir;
}
f2fs_balance_fs(sbi, true);
@@ -928,6 +993,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto put_out_dir;
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
+ new_page = NULL;
new_inode->i_ctime = current_time(new_inode);
down_write(&F2FS_I(new_inode)->i_sem);
@@ -948,33 +1014,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
err = f2fs_add_link(new_dentry, old_inode);
if (err) {
f2fs_unlock_op(sbi);
- goto out_whiteout;
+ goto out_dir;
}
if (old_dir_entry)
f2fs_i_links_write(new_dir, true);
-
- /*
- * old entry and new entry can locate in the same inline
- * dentry in inode, when attaching new entry in inline dentry,
- * it could force inline dentry conversion, after that,
- * old_entry and old_page will point to wrong address, in
- * order to avoid this, let's do the check and update here.
- */
- if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) {
- f2fs_put_page(old_page, 0);
- old_page = NULL;
-
- old_entry = f2fs_find_entry(old_dir,
- &old_dentry->d_name, &old_page);
- if (!old_entry) {
- err = -ENOENT;
- if (IS_ERR(old_page))
- err = PTR_ERR(old_page);
- f2fs_unlock_op(sbi);
- goto out_whiteout;
- }
- }
}
down_write(&F2FS_I(old_inode)->i_sem);
@@ -989,9 +1033,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_mark_inode_dirty_sync(old_inode, false);
f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
+ old_page = NULL;
if (whiteout) {
- whiteout->i_state |= I_LINKABLE;
set_inode_flag(whiteout, FI_INC_LINK);
err = f2fs_add_link(old_dentry, whiteout);
if (err)
@@ -1025,17 +1069,15 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
put_out_dir:
f2fs_unlock_op(sbi);
- if (new_page)
- f2fs_put_page(new_page, 0);
-out_whiteout:
- if (whiteout)
- iput(whiteout);
+ f2fs_put_page(new_page, 0);
out_dir:
if (old_dir_entry)
f2fs_put_page(old_dir_page, 0);
out_old:
f2fs_put_page(old_page, 0);
out:
+ if (whiteout)
+ iput(whiteout);
return err;
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3314a0f3405e..9d02cdcdbb07 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -875,7 +875,7 @@ static int truncate_dnode(struct dnode_of_data *dn)
/* get direct node */
page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
- if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
+ if (PTR_ERR(page) == -ENOENT)
return 1;
else if (IS_ERR(page))
return PTR_ERR(page);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 76477f71d4ee..763d5c0951d1 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -723,6 +723,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
int ret = 0;
unsigned long s_flags = sbi->sb->s_flags;
bool need_writecp = false;
+ bool fix_curseg_write_pointer = false;
#ifdef CONFIG_QUOTA
int quota_enabled;
#endif
@@ -774,6 +775,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
sbi->sb->s_flags = s_flags;
}
skip:
+ fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
+
destroy_fsync_dnodes(&inode_list, err);
destroy_fsync_dnodes(&tmp_inode_list, err);
@@ -784,9 +787,22 @@ skip:
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi));
- } else {
- clear_sbi_flag(sbi, SBI_POR_DOING);
}
+
+ /*
+ * If fsync data succeeds or there is no fsync data to recover,
+ * and the f2fs is not read only, check and fix zoned block devices'
+ * write pointer consistency.
+ */
+ if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
+ f2fs_sb_has_blkzoned(sbi)) {
+ err = f2fs_fix_curseg_write_pointer(sbi);
+ ret = err;
+ }
+
+ if (!err)
+ clear_sbi_flag(sbi, SBI_POR_DOING);
+
mutex_unlock(&sbi->cp_mutex);
/* let's drop all the directory inodes for clean checkpoint */
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 56e81447e2f3..cf0eb002cfd4 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -334,7 +334,6 @@ void f2fs_drop_inmem_pages(struct inode *inode)
}
fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
- stat_dec_atomic_write(inode);
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
if (!list_empty(&fi->inmem_ilist))
@@ -505,7 +504,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
* dir/node pages without enough free segments.
*/
if (has_not_enough_free_secs(sbi, 0, 0)) {
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
f2fs_gc(sbi, false, false, NULL_SEGNO);
}
}
@@ -2225,7 +2224,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
struct sit_info *sit_i = SIT_I(sbi);
f2fs_bug_on(sbi, addr == NULL_ADDR);
- if (addr == NEW_ADDR)
+ if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
return;
invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
@@ -2861,9 +2860,9 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (sbi->discard_blks == 0)
goto out;
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
err = f2fs_write_checkpoint(sbi, &cpc);
- mutex_unlock(&sbi->gc_mutex);
+ up_write(&sbi->gc_lock);
if (err)
goto out;
@@ -3036,7 +3035,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (fio->type == DATA) {
struct inode *inode = fio->page->mapping->host;
- if (is_cold_data(fio->page) || file_is_cold(inode))
+ if (is_cold_data(fio->page) || file_is_cold(inode) ||
+ f2fs_compressed_file(inode))
return CURSEG_COLD_DATA;
if (file_is_hot(inode) ||
is_inode_flag_set(inode, FI_HOT_DATA) ||
@@ -3289,7 +3289,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
stat_inc_inplace_blocks(fio->sbi);
- if (fio->bio)
+ if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
err = f2fs_merge_page_bio(fio);
else
err = f2fs_submit_page_bio(fio);
@@ -4368,6 +4368,263 @@ out:
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+
+static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
+ struct f2fs_dev_info *fdev,
+ struct blk_zone *zone)
+{
+ unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
+ block_t zone_block, wp_block, last_valid_block;
+ unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
+ int i, s, b, ret;
+ struct seg_entry *se;
+
+ if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+ return 0;
+
+ wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
+ wp_segno = GET_SEGNO(sbi, wp_block);
+ wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+ zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
+ zone_segno = GET_SEGNO(sbi, zone_block);
+ zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
+
+ if (zone_segno >= MAIN_SEGS(sbi))
+ return 0;
+
+ /*
+ * Skip check of zones cursegs point to, since
+ * fix_curseg_write_pointer() checks them.
+ */
+ for (i = 0; i < NO_CHECK_TYPE; i++)
+ if (zone_secno == GET_SEC_FROM_SEG(sbi,
+ CURSEG_I(sbi, i)->segno))
+ return 0;
+
+ /*
+ * Get last valid block of the zone.
+ */
+ last_valid_block = zone_block - 1;
+ for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
+ segno = zone_segno + s;
+ se = get_seg_entry(sbi, segno);
+ for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
+ if (f2fs_test_bit(b, se->cur_valid_map)) {
+ last_valid_block = START_BLOCK(sbi, segno) + b;
+ break;
+ }
+ if (last_valid_block >= zone_block)
+ break;
+ }
+
+ /*
+ * If last valid block is beyond the write pointer, report the
+ * inconsistency. This inconsistency does not cause write error
+ * because the zone will not be selected for write operation until
+ * it get discarded. Just report it.
+ */
+ if (last_valid_block >= wp_block) {
+ f2fs_notice(sbi, "Valid block beyond write pointer: "
+ "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
+ GET_SEGNO(sbi, last_valid_block),
+ GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
+ wp_segno, wp_blkoff);
+ return 0;
+ }
+
+ /*
+ * If there is no valid block in the zone and if write pointer is
+ * not at zone start, reset the write pointer.
+ */
+ if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
+ f2fs_notice(sbi,
+ "Zone without valid block has non-zero write "
+ "pointer. Reset the write pointer: wp[0x%x,0x%x]",
+ wp_segno, wp_blkoff);
+ ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
+ zone->len >> log_sectors_per_block);
+ if (ret) {
+ f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
+ fdev->path, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
+ block_t zone_blkaddr)
+{
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ if (!bdev_is_zoned(FDEV(i).bdev))
+ continue;
+ if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
+ zone_blkaddr <= FDEV(i).end_blk))
+ return &FDEV(i);
+ }
+
+ return NULL;
+}
+
+static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
+ void *data) {
+ memcpy(data, zone, sizeof(struct blk_zone));
+ return 0;
+}
+
+static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
+{
+ struct curseg_info *cs = CURSEG_I(sbi, type);
+ struct f2fs_dev_info *zbd;
+ struct blk_zone zone;
+ unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
+ block_t cs_zone_block, wp_block;
+ unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
+ sector_t zone_sector;
+ int err;
+
+ cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
+ cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
+
+ zbd = get_target_zoned_dev(sbi, cs_zone_block);
+ if (!zbd)
+ return 0;
+
+ /* report zone for the sector the curseg points to */
+ zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
+ << log_sectors_per_block;
+ err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
+ report_one_zone_cb, &zone);
+ if (err != 1) {
+ f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
+ zbd->path, err);
+ return err;
+ }
+
+ if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+ return 0;
+
+ wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
+ wp_segno = GET_SEGNO(sbi, wp_block);
+ wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+ wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
+
+ if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
+ wp_sector_off == 0)
+ return 0;
+
+ f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
+ "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
+ type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
+
+ f2fs_notice(sbi, "Assign new section to curseg[%d]: "
+ "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
+ allocate_segment_by_default(sbi, type, true);
+
+ /* check consistency of the zone curseg pointed to */
+ if (check_zone_write_pointer(sbi, zbd, &zone))
+ return -EIO;
+
+ /* check newly assigned zone */
+ cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
+ cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
+
+ zbd = get_target_zoned_dev(sbi, cs_zone_block);
+ if (!zbd)
+ return 0;
+
+ zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
+ << log_sectors_per_block;
+ err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
+ report_one_zone_cb, &zone);
+ if (err != 1) {
+ f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
+ zbd->path, err);
+ return err;
+ }
+
+ if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+ return 0;
+
+ if (zone.wp != zone.start) {
+ f2fs_notice(sbi,
+ "New zone for curseg[%d] is not yet discarded. "
+ "Reset the zone: curseg[0x%x,0x%x]",
+ type, cs->segno, cs->next_blkoff);
+ err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
+ zone_sector >> log_sectors_per_block,
+ zone.len >> log_sectors_per_block);
+ if (err) {
+ f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
+ zbd->path, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
+{
+ int i, ret;
+
+ for (i = 0; i < NO_CHECK_TYPE; i++) {
+ ret = fix_curseg_write_pointer(sbi, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct check_zone_write_pointer_args {
+ struct f2fs_sb_info *sbi;
+ struct f2fs_dev_info *fdev;
+};
+
+static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
+ void *data) {
+ struct check_zone_write_pointer_args *args;
+ args = (struct check_zone_write_pointer_args *)data;
+
+ return check_zone_write_pointer(args->sbi, args->fdev, zone);
+}
+
+int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
+{
+ int i, ret;
+ struct check_zone_write_pointer_args args;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ if (!bdev_is_zoned(FDEV(i).bdev))
+ continue;
+
+ args.sbi = sbi;
+ args.fdev = &FDEV(i);
+ ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
+ check_zone_write_pointer_cb, &args);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+#else
+int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
+{
+ return 0;
+}
+
+int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
+{
+ return 0;
+}
+#endif
+
/*
* Update min, max modified time for cost-benefit GC algorithm
*/
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index a95467b202ea..459dc3901a57 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -200,18 +200,6 @@ struct segment_allocation {
void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
};
-/*
- * this value is set in page as a private data which indicate that
- * the page is atomically written, and it is in inmem_pages list.
- */
-#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
-#define DUMMY_WRITTEN_PAGE ((unsigned long)-2)
-
-#define IS_ATOMIC_WRITTEN_PAGE(page) \
- (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
-#define IS_DUMMY_WRITTEN_PAGE(page) \
- (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
-
#define MAX_SKIP_GC_COUNT 16
struct inmem_pages {
@@ -619,8 +607,10 @@ static inline int utilization(struct f2fs_sb_info *sbi)
* threashold,
* F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
* storages. IPU will be triggered only if the # of dirty
- * pages over min_fsync_blocks.
- * F2FS_IPUT_DISABLE - disable IPU. (=default option)
+ * pages over min_fsync_blocks. (=default option)
+ * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
+ * F2FS_IPU_NOCACHE - disable IPU bio cache.
+ * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
*/
#define DEF_MIN_IPU_UTIL 70
#define DEF_MIN_FSYNC_BLOCKS 8
@@ -635,6 +625,7 @@ enum {
F2FS_IPU_SSR_UTIL,
F2FS_IPU_FSYNC,
F2FS_IPU_ASYNC,
+ F2FS_IPU_NOCACHE,
};
static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 5111e1ffe58a..65a7a432dfee 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -141,6 +141,9 @@ enum {
Opt_checkpoint_disable_cap,
Opt_checkpoint_disable_cap_perc,
Opt_checkpoint_enable,
+ Opt_compress_algorithm,
+ Opt_compress_log_size,
+ Opt_compress_extension,
Opt_err,
};
@@ -203,6 +206,9 @@ static match_table_t f2fs_tokens = {
{Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
{Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
{Opt_checkpoint_enable, "checkpoint=enable"},
+ {Opt_compress_algorithm, "compress_algorithm=%s"},
+ {Opt_compress_log_size, "compress_log_size=%u"},
+ {Opt_compress_extension, "compress_extension=%s"},
{Opt_err, NULL},
};
@@ -391,8 +397,9 @@ static int parse_options(struct super_block *sb, char *options)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
substring_t args[MAX_OPT_ARGS];
+ unsigned char (*ext)[F2FS_EXTENSION_LEN];
char *p, *name;
- int arg = 0;
+ int arg = 0, ext_cnt;
kuid_t uid;
kgid_t gid;
#ifdef CONFIG_QUOTA
@@ -810,6 +817,66 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_checkpoint_enable:
clear_opt(sbi, DISABLE_CHECKPOINT);
break;
+ case Opt_compress_algorithm:
+ if (!f2fs_sb_has_compression(sbi)) {
+ f2fs_err(sbi, "Compression feature if off");
+ return -EINVAL;
+ }
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 3 && !strcmp(name, "lzo")) {
+ F2FS_OPTION(sbi).compress_algorithm =
+ COMPRESS_LZO;
+ } else if (strlen(name) == 3 &&
+ !strcmp(name, "lz4")) {
+ F2FS_OPTION(sbi).compress_algorithm =
+ COMPRESS_LZ4;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_compress_log_size:
+ if (!f2fs_sb_has_compression(sbi)) {
+ f2fs_err(sbi, "Compression feature is off");
+ return -EINVAL;
+ }
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (arg < MIN_COMPRESS_LOG_SIZE ||
+ arg > MAX_COMPRESS_LOG_SIZE) {
+ f2fs_err(sbi,
+ "Compress cluster log size is out of range");
+ return -EINVAL;
+ }
+ F2FS_OPTION(sbi).compress_log_size = arg;
+ break;
+ case Opt_compress_extension:
+ if (!f2fs_sb_has_compression(sbi)) {
+ f2fs_err(sbi, "Compression feature is off");
+ return -EINVAL;
+ }
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+
+ ext = F2FS_OPTION(sbi).extensions;
+ ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+
+ if (strlen(name) >= F2FS_EXTENSION_LEN ||
+ ext_cnt >= COMPRESS_EXT_NUM) {
+ f2fs_err(sbi,
+ "invalid extension length/number");
+ kfree(name);
+ return -EINVAL;
+ }
+
+ strcpy(ext[ext_cnt], name);
+ F2FS_OPTION(sbi).compress_ext_cnt++;
+ kfree(name);
+ break;
default:
f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
p);
@@ -1125,6 +1192,8 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_node_manager(sbi);
f2fs_destroy_segment_manager(sbi);
+ f2fs_destroy_post_read_wq(sbi);
+
kvfree(sbi->ckpt);
f2fs_unregister_sysfs(sbi);
@@ -1169,9 +1238,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
cpc.reason = __get_cp_reason(sbi);
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
err = f2fs_write_checkpoint(sbi, &cpc);
- mutex_unlock(&sbi->gc_mutex);
+ up_write(&sbi->gc_lock);
}
f2fs_trace_ios(NULL, 1);
@@ -1213,12 +1282,10 @@ static int f2fs_statfs_project(struct super_block *sb,
return PTR_ERR(dquot);
spin_lock(&dquot->dq_dqb_lock);
- limit = 0;
- if (dquot->dq_dqb.dqb_bsoftlimit)
- limit = dquot->dq_dqb.dqb_bsoftlimit;
- if (dquot->dq_dqb.dqb_bhardlimit &&
- (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
- limit = dquot->dq_dqb.dqb_bhardlimit;
+ limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
+ dquot->dq_dqb.dqb_bhardlimit);
+ if (limit)
+ limit >>= sb->s_blocksize_bits;
if (limit && buf->f_blocks > limit) {
curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
@@ -1228,12 +1295,8 @@ static int f2fs_statfs_project(struct super_block *sb,
(buf->f_blocks - curblock) : 0;
}
- limit = 0;
- if (dquot->dq_dqb.dqb_isoftlimit)
- limit = dquot->dq_dqb.dqb_isoftlimit;
- if (dquot->dq_dqb.dqb_ihardlimit &&
- (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
- limit = dquot->dq_dqb.dqb_ihardlimit;
+ limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
+ dquot->dq_dqb.dqb_ihardlimit);
if (limit && buf->f_files > limit) {
buf->f_files = limit;
@@ -1340,6 +1403,35 @@ static inline void f2fs_show_quota_options(struct seq_file *seq,
#endif
}
+static inline void f2fs_show_compress_options(struct seq_file *seq,
+ struct super_block *sb)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ char *algtype = "";
+ int i;
+
+ if (!f2fs_sb_has_compression(sbi))
+ return;
+
+ switch (F2FS_OPTION(sbi).compress_algorithm) {
+ case COMPRESS_LZO:
+ algtype = "lzo";
+ break;
+ case COMPRESS_LZ4:
+ algtype = "lz4";
+ break;
+ }
+ seq_printf(seq, ",compress_algorithm=%s", algtype);
+
+ seq_printf(seq, ",compress_log_size=%u",
+ F2FS_OPTION(sbi).compress_log_size);
+
+ for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
+ seq_printf(seq, ",compress_extension=%s",
+ F2FS_OPTION(sbi).extensions[i]);
+ }
+}
+
static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
{
struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
@@ -1462,6 +1554,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_printf(seq, ",fsync_mode=%s", "strict");
else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
seq_printf(seq, ",fsync_mode=%s", "nobarrier");
+
+ f2fs_show_compress_options(seq, sbi->sb);
return 0;
}
@@ -1476,6 +1570,9 @@ static void default_options(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).test_dummy_encryption = false;
F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
+ F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZO;
+ F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
+ F2FS_OPTION(sbi).compress_ext_cnt = 0;
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
@@ -1524,7 +1621,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
f2fs_update_time(sbi, DISABLE_TIME);
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
err = f2fs_gc(sbi, true, false, NULL_SEGNO);
if (err == -ENODATA) {
err = 0;
@@ -1546,7 +1643,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
goto restore_flag;
}
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED);
err = f2fs_write_checkpoint(sbi, &cpc);
@@ -1558,7 +1655,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
spin_unlock(&sbi->stat_lock);
out_unlock:
- mutex_unlock(&sbi->gc_mutex);
+ up_write(&sbi->gc_lock);
restore_flag:
sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
return err;
@@ -1566,12 +1663,12 @@ restore_flag:
static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
{
- mutex_lock(&sbi->gc_mutex);
+ down_write(&sbi->gc_lock);
f2fs_dirty_to_prefree(sbi);
clear_sbi_flag(sbi, SBI_CP_DISABLED);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- mutex_unlock(&sbi->gc_mutex);
+ up_write(&sbi->gc_lock);
f2fs_sync_fs(sbi->sb, 1);
}
@@ -2158,7 +2255,7 @@ static int f2fs_dquot_commit(struct dquot *dquot)
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
int ret;
- down_read(&sbi->quota_sem);
+ down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
ret = dquot_commit(dquot);
if (ret < 0)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
@@ -2182,13 +2279,10 @@ static int f2fs_dquot_acquire(struct dquot *dquot)
static int f2fs_dquot_release(struct dquot *dquot)
{
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
- int ret;
+ int ret = dquot_release(dquot);
- down_read(&sbi->quota_sem);
- ret = dquot_release(dquot);
if (ret < 0)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- up_read(&sbi->quota_sem);
return ret;
}
@@ -2196,29 +2290,22 @@ static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
{
struct super_block *sb = dquot->dq_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- int ret;
-
- down_read(&sbi->quota_sem);
- ret = dquot_mark_dquot_dirty(dquot);
+ int ret = dquot_mark_dquot_dirty(dquot);
/* if we are using journalled quota */
if (is_journalled_quota(sbi))
set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
- up_read(&sbi->quota_sem);
return ret;
}
static int f2fs_dquot_commit_info(struct super_block *sb, int type)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- int ret;
+ int ret = dquot_commit_info(sb, type);
- down_read(&sbi->quota_sem);
- ret = dquot_commit_info(sb, type);
if (ret < 0)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
- up_read(&sbi->quota_sem);
return ret;
}
@@ -3311,7 +3398,7 @@ try_onemore:
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;
- mutex_init(&sbi->gc_mutex);
+ init_rwsem(&sbi->gc_lock);
mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
mutex_init(&sbi->resize_mutex);
@@ -3400,6 +3487,12 @@ try_onemore:
goto free_devices;
}
+ err = f2fs_init_post_read_wq(sbi);
+ if (err) {
+ f2fs_err(sbi, "Failed to initialize post read workqueue");
+ goto free_devices;
+ }
+
sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count);
percpu_counter_set(&sbi->total_valid_inode_count,
@@ -3544,6 +3637,17 @@ try_onemore:
goto free_meta;
}
}
+
+ /*
+ * If the f2fs is not readonly and fsync data recovery succeeds,
+ * check zoned block devices' write pointer consistency.
+ */
+ if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
+ err = f2fs_check_write_pointer(sbi);
+ if (err)
+ goto free_meta;
+ }
+
reset_checkpoint:
/* f2fs_recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
@@ -3621,6 +3725,7 @@ free_nm:
f2fs_destroy_node_manager(sbi);
free_sm:
f2fs_destroy_segment_manager(sbi);
+ f2fs_destroy_post_read_wq(sbi);
free_devices:
destroy_device_list(sbi);
kvfree(sbi->ckpt);
@@ -3762,8 +3867,12 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_bio_entry_cache();
if (err)
goto free_post_read;
+ err = f2fs_init_bioset();
+ if (err)
+ goto free_bio_enrty_cache;
return 0;
-
+free_bio_enrty_cache:
+ f2fs_destroy_bio_entry_cache();
free_post_read:
f2fs_destroy_post_read_processing();
free_root_stats:
@@ -3789,6 +3898,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ f2fs_destroy_bioset();
f2fs_destroy_bio_entry_cache();
f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 70945ceb9c0c..91d649790b1b 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -25,6 +25,9 @@ enum {
DCC_INFO, /* struct discard_cmd_control */
NM_INFO, /* struct f2fs_nm_info */
F2FS_SBI, /* struct f2fs_sb_info */
+#ifdef CONFIG_F2FS_STAT_FS
+ STAT_INFO, /* struct f2fs_stat_info */
+#endif
#ifdef CONFIG_F2FS_FAULT_INJECTION
FAULT_INFO_RATE, /* struct f2fs_fault_info */
FAULT_INFO_TYPE, /* struct f2fs_fault_info */
@@ -42,6 +45,9 @@ struct f2fs_attr {
int id;
};
+static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf);
+
static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
{
if (struct_type == GC_THREAD)
@@ -59,41 +65,25 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
struct_type == FAULT_INFO_TYPE)
return (unsigned char *)&F2FS_OPTION(sbi).fault_info;
#endif
+#ifdef CONFIG_F2FS_STAT_FS
+ else if (struct_type == STAT_INFO)
+ return (unsigned char *)F2FS_STAT(sbi);
+#endif
return NULL;
}
static ssize_t dirty_segments_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)(dirty_segments(sbi)));
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)(dirty_segments(sbi)));
}
-static ssize_t unusable_show(struct f2fs_attr *a,
+static ssize_t free_segments_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- block_t unusable;
-
- if (test_opt(sbi, DISABLE_CHECKPOINT))
- unusable = sbi->unusable_block_count;
- else
- unusable = f2fs_get_unusable_blocks(sbi);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)unusable);
-}
-
-static ssize_t encoding_show(struct f2fs_attr *a,
- struct f2fs_sb_info *sbi, char *buf)
-{
-#ifdef CONFIG_UNICODE
- if (f2fs_sb_has_casefold(sbi))
- return snprintf(buf, PAGE_SIZE, "%s (%d.%d.%d)\n",
- sbi->s_encoding->charset,
- (sbi->s_encoding->version >> 16) & 0xff,
- (sbi->s_encoding->version >> 8) & 0xff,
- sbi->s_encoding->version & 0xff);
-#endif
- return snprintf(buf, PAGE_SIZE, "(none)");
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)(free_segments(sbi)));
}
static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
@@ -102,10 +92,10 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
struct super_block *sb = sbi->sb;
if (!sb->s_bdev->bd_part)
- return snprintf(buf, PAGE_SIZE, "0\n");
+ return sprintf(buf, "0\n");
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- (unsigned long long)(sbi->kbytes_written +
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)(sbi->kbytes_written +
BD_PART_WRITTEN(sbi)));
}
@@ -116,7 +106,7 @@ static ssize_t features_show(struct f2fs_attr *a,
int len = 0;
if (!sb->s_bdev->bd_part)
- return snprintf(buf, PAGE_SIZE, "0\n");
+ return sprintf(buf, "0\n");
if (f2fs_sb_has_encrypt(sbi))
len += snprintf(buf, PAGE_SIZE - len, "%s",
@@ -154,6 +144,9 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_casefold(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "casefold");
+ if (f2fs_sb_has_compression(sbi))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "compression");
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "pin_file");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -163,9 +156,66 @@ static ssize_t features_show(struct f2fs_attr *a,
static ssize_t current_reserved_blocks_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%u\n", sbi->current_reserved_blocks);
+ return sprintf(buf, "%u\n", sbi->current_reserved_blocks);
+}
+
+static ssize_t unusable_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ block_t unusable;
+
+ if (test_opt(sbi, DISABLE_CHECKPOINT))
+ unusable = sbi->unusable_block_count;
+ else
+ unusable = f2fs_get_unusable_blocks(sbi);
+ return sprintf(buf, "%llu\n", (unsigned long long)unusable);
+}
+
+static ssize_t encoding_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+#ifdef CONFIG_UNICODE
+ if (f2fs_sb_has_casefold(sbi))
+ return snprintf(buf, PAGE_SIZE, "%s (%d.%d.%d)\n",
+ sbi->s_encoding->charset,
+ (sbi->s_encoding->version >> 16) & 0xff,
+ (sbi->s_encoding->version >> 8) & 0xff,
+ sbi->s_encoding->version & 0xff);
+#endif
+ return sprintf(buf, "(none)");
}
+#ifdef CONFIG_F2FS_STAT_FS
+static ssize_t moved_blocks_foreground_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
+
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)(si->tot_blks -
+ (si->bg_data_blks + si->bg_node_blks)));
+}
+
+static ssize_t moved_blocks_background_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
+
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)(si->bg_data_blks + si->bg_node_blks));
+}
+
+static ssize_t avg_vblocks_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
+
+ si->dirty_count = dirty_segments(sbi);
+ f2fs_update_sit_info(sbi);
+ return sprintf(buf, "%llu\n", (unsigned long long)(si->avg_vblocks));
+}
+#endif
+
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -199,7 +249,7 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
ui = (unsigned int *)(ptr + a->offset);
- return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+ return sprintf(buf, "%u\n", *ui);
}
static ssize_t __sbi_store(struct f2fs_attr *a,
@@ -389,6 +439,7 @@ enum feat_id {
FEAT_VERITY,
FEAT_SB_CHECKSUM,
FEAT_CASEFOLD,
+ FEAT_COMPRESSION,
};
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -408,7 +459,8 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
case FEAT_VERITY:
case FEAT_SB_CHECKSUM:
case FEAT_CASEFOLD:
- return snprintf(buf, PAGE_SIZE, "supported\n");
+ case FEAT_COMPRESSION:
+ return sprintf(buf, "supported\n");
}
return 0;
}
@@ -437,6 +489,14 @@ static struct f2fs_attr f2fs_attr_##_name = { \
.id = _id, \
}
+#define F2FS_STAT_ATTR(_struct_type, _struct_name, _name, _elname) \
+static struct f2fs_attr f2fs_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = 0444 }, \
+ .show = f2fs_sbi_show, \
+ .struct_type = _struct_type, \
+ .offset = offsetof(struct _struct_name, _elname), \
+}
+
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_urgent_sleep_time,
urgent_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
@@ -478,11 +538,21 @@ F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
#endif
F2FS_GENERAL_RO_ATTR(dirty_segments);
+F2FS_GENERAL_RO_ATTR(free_segments);
F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
F2FS_GENERAL_RO_ATTR(features);
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
F2FS_GENERAL_RO_ATTR(unusable);
F2FS_GENERAL_RO_ATTR(encoding);
+#ifdef CONFIG_F2FS_STAT_FS
+F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count);
+F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count);
+F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, gc_foreground_calls, call_count);
+F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, gc_background_calls, bg_gc);
+F2FS_GENERAL_RO_ATTR(moved_blocks_background);
+F2FS_GENERAL_RO_ATTR(moved_blocks_foreground);
+F2FS_GENERAL_RO_ATTR(avg_vblocks);
+#endif
#ifdef CONFIG_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
@@ -503,6 +573,7 @@ F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY);
#endif
F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD);
+F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -543,12 +614,22 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(inject_type),
#endif
ATTR_LIST(dirty_segments),
+ ATTR_LIST(free_segments),
ATTR_LIST(unusable),
ATTR_LIST(lifetime_write_kbytes),
ATTR_LIST(features),
ATTR_LIST(reserved_blocks),
ATTR_LIST(current_reserved_blocks),
ATTR_LIST(encoding),
+#ifdef CONFIG_F2FS_STAT_FS
+ ATTR_LIST(cp_foreground_calls),
+ ATTR_LIST(cp_background_calls),
+ ATTR_LIST(gc_foreground_calls),
+ ATTR_LIST(gc_background_calls),
+ ATTR_LIST(moved_blocks_foreground),
+ ATTR_LIST(moved_blocks_background),
+ ATTR_LIST(avg_vblocks),
+#endif
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
@@ -573,6 +654,7 @@ static struct attribute *f2fs_feat_attrs[] = {
#endif
ATTR_LIST(sb_checksum),
ATTR_LIST(casefold),
+ ATTR_LIST(compression),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_feat);
@@ -733,10 +815,12 @@ int __init f2fs_init_sysfs(void)
ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype,
NULL, "features");
- if (ret)
+ if (ret) {
+ kobject_put(&f2fs_feat);
kset_unregister(&f2fs_kset);
- else
+ } else {
f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ }
return ret;
}
@@ -757,8 +841,11 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
init_completion(&sbi->s_kobj_unregister);
err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL,
"%s", sb->s_id);
- if (err)
+ if (err) {
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
return err;
+ }
if (f2fs_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
@@ -786,4 +873,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
}
kobject_del(&sbi->s_kobj);
+ kobject_put(&sbi->s_kobj);
}
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index a401ef72bc82..d7d430a6f130 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -222,12 +222,55 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
return size;
}
+/*
+ * Prefetch some pages from the file's Merkle tree.
+ *
+ * This is basically a stripped-down version of __do_page_cache_readahead()
+ * which works on pages past i_size.
+ */
+static void f2fs_merkle_tree_readahead(struct address_space *mapping,
+ pgoff_t start_index, unsigned long count)
+{
+ LIST_HEAD(pages);
+ unsigned int nr_pages = 0;
+ struct page *page;
+ pgoff_t index;
+ struct blk_plug plug;
+
+ for (index = start_index; index < start_index + count; index++) {
+ page = xa_load(&mapping->i_pages, index);
+ if (!page || xa_is_value(page)) {
+ page = __page_cache_alloc(readahead_gfp_mask(mapping));
+ if (!page)
+ break;
+ page->index = index;
+ list_add(&page->lru, &pages);
+ nr_pages++;
+ }
+ }
+ blk_start_plug(&plug);
+ f2fs_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
+ blk_finish_plug(&plug);
+}
+
static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
- pgoff_t index)
+ pgoff_t index,
+ unsigned long num_ra_pages)
{
+ struct page *page;
+
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
- return read_mapping_page(inode->i_mapping, index, NULL);
+ page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
+ if (!page || !PageUptodate(page)) {
+ if (page)
+ put_page(page);
+ else if (num_ra_pages > 1)
+ f2fs_merkle_tree_readahead(inode->i_mapping, index,
+ num_ra_pages);
+ page = read_mapping_page(inode->i_mapping, index, NULL);
+ }
+ return page;
}
static int f2fs_write_merkle_tree_block(struct inode *inode, const void *buf,
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 5f04c5c810fb..594b05ae16c9 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -21,6 +21,7 @@
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <asm/unaligned.h>
+#include <linux/random.h>
#include <linux/iversion.h>
#include "fat.h"
@@ -521,7 +522,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
inode_inc_iversion(inode);
- inode->i_generation = get_seconds();
+ inode->i_generation = prandom_u32();
if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
inode->i_generation &= ~1;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 1e08bd54c5fb..f1b2a1fc2a6a 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -271,6 +271,14 @@ static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
{
return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
}
+
+static inline struct timespec64 fat_timespec64_trunc_10ms(struct timespec64 ts)
+{
+ if (ts.tv_nsec)
+ ts.tv_nsec -= ts.tv_nsec % 10000000UL;
+ return ts;
+}
+
/*
* truncate the various times with appropriate granularity:
* root inode:
@@ -308,7 +316,7 @@ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
}
if (flags & S_CTIME) {
if (sbi->options.isvfat)
- inode->i_ctime = timespec64_trunc(*now, 10000000);
+ inode->i_ctime = fat_timespec64_trunc_10ms(*now);
else
inode->i_ctime = fat_timespec64_trunc_2secs(*now);
}
diff --git a/fs/file.c b/fs/file.c
index 3da91a112bab..a364e1a9b7e8 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -642,7 +642,9 @@ out_unlock:
EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
/*
- * variant of __close_fd that gets a ref on the file for later fput
+ * variant of __close_fd that gets a ref on the file for later fput.
+ * The caller must ensure that filp_close() called on the file, and then
+ * an fput().
*/
int __close_fd_get_file(unsigned int fd, struct file **res)
{
@@ -662,7 +664,7 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
spin_unlock(&files->file_lock);
get_file(file);
*res = file;
- return filp_close(file, files);
+ return 0;
out_unlock:
spin_unlock(&files->file_lock);
@@ -706,9 +708,9 @@ void do_close_on_exec(struct files_struct *files)
spin_unlock(&files->file_lock);
}
-static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
+static struct file *__fget_files(struct files_struct *files, unsigned int fd,
+ fmode_t mask, unsigned int refs)
{
- struct files_struct *files = current->files;
struct file *file;
rcu_read_lock();
@@ -729,6 +731,12 @@ loop:
return file;
}
+static inline struct file *__fget(unsigned int fd, fmode_t mask,
+ unsigned int refs)
+{
+ return __fget_files(current->files, fd, mask, refs);
+}
+
struct file *fget_many(unsigned int fd, unsigned int refs)
{
return __fget(fd, FMODE_PATH, refs);
@@ -746,6 +754,18 @@ struct file *fget_raw(unsigned int fd)
}
EXPORT_SYMBOL(fget_raw);
+struct file *fget_task(struct task_struct *task, unsigned int fd)
+{
+ struct file *file = NULL;
+
+ task_lock(task);
+ if (task->files)
+ file = __fget_files(task->files, fd, 0, 1);
+ task_unlock(task);
+
+ return file;
+}
+
/*
* Lightweight file lookup - no refcnt increment if fd table isn't shared.
*
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 9135646e41ac..77bf5f95362d 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -74,7 +74,8 @@ int register_filesystem(struct file_system_type * fs)
int res = 0;
struct file_system_type ** p;
- if (fs->parameters && !fs_validate_description(fs->parameters))
+ if (fs->parameters &&
+ !fs_validate_description(fs->name, fs->parameters))
return -EINVAL;
BUG_ON(strchr(fs->name, '.'));
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 335607b8c5c0..76ac9c7d32ec 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2063,7 +2063,7 @@ void wb_workfn(struct work_struct *work)
struct bdi_writeback, dwork);
long pages_written;
- set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
+ set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
current->flags |= PF_SWAPWRITE;
if (likely(!current_is_workqueue_rescuer() ||
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 138b5b4d621d..fc9f6ef93b55 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -45,6 +45,7 @@ static const struct constant_table common_set_sb_flag[] = {
{ "posixacl", SB_POSIXACL },
{ "ro", SB_RDONLY },
{ "sync", SB_SYNCHRONOUS },
+ { },
};
static const struct constant_table common_clear_sb_flag[] = {
@@ -53,6 +54,7 @@ static const struct constant_table common_clear_sb_flag[] = {
{ "nomand", SB_MANDLOCK },
{ "rw", SB_RDONLY },
{ "silent", SB_SILENT },
+ { },
};
static const char *const forbidden_sb_flag[] = {
@@ -175,14 +177,15 @@ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
struct fs_parameter param = {
.key = key,
- .type = fs_value_is_string,
+ .type = fs_value_is_flag,
.size = v_size,
};
- if (v_size > 0) {
+ if (value) {
param.string = kmemdup_nul(value, v_size, GFP_KERNEL);
if (!param.string)
return -ENOMEM;
+ param.type = fs_value_is_string;
}
ret = vfs_parse_fs_param(fc, &param);
@@ -268,6 +271,7 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
fc->fs_type = get_filesystem(fs_type);
fc->cred = get_current_cred();
fc->net_ns = get_net(current->nsproxy->net_ns);
+ fc->log.prefix = fs_type->name;
mutex_init(&fc->uapi_mutex);
@@ -361,8 +365,8 @@ struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc)
get_net(fc->net_ns);
get_user_ns(fc->user_ns);
get_cred(fc->cred);
- if (fc->log)
- refcount_inc(&fc->log->usage);
+ if (fc->log.log)
+ refcount_inc(&fc->log.log->usage);
/* Can't call put until we've called ->dup */
ret = fc->ops->dup(fc, src_fc);
@@ -385,64 +389,33 @@ EXPORT_SYMBOL(vfs_dup_fs_context);
* @fc: The filesystem context to log to.
* @fmt: The format of the buffer.
*/
-void logfc(struct fs_context *fc, const char *fmt, ...)
+void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...)
{
- static const char store_failure[] = "OOM: Can't store error string";
- struct fc_log *log = fc ? fc->log : NULL;
- const char *p;
va_list va;
- char *q;
- u8 freeable;
+ struct va_format vaf = {.fmt = fmt, .va = &va};
va_start(va, fmt);
- if (!strchr(fmt, '%')) {
- p = fmt;
- goto unformatted_string;
- }
- if (strcmp(fmt, "%s") == 0) {
- p = va_arg(va, const char *);
- goto unformatted_string;
- }
-
- q = kvasprintf(GFP_KERNEL, fmt, va);
-copied_string:
- if (!q)
- goto store_failure;
- freeable = 1;
- goto store_string;
-
-unformatted_string:
- if ((unsigned long)p >= (unsigned long)__start_rodata &&
- (unsigned long)p < (unsigned long)__end_rodata)
- goto const_string;
- if (log && within_module_core((unsigned long)p, log->owner))
- goto const_string;
- q = kstrdup(p, GFP_KERNEL);
- goto copied_string;
-
-store_failure:
- p = store_failure;
-const_string:
- q = (char *)p;
- freeable = 0;
-store_string:
if (!log) {
- switch (fmt[0]) {
+ switch (level) {
case 'w':
- printk(KERN_WARNING "%s\n", q + 2);
+ printk(KERN_WARNING "%s%s%pV\n", prefix ? prefix : "",
+ prefix ? ": " : "", &vaf);
break;
case 'e':
- printk(KERN_ERR "%s\n", q + 2);
+ printk(KERN_ERR "%s%s%pV\n", prefix ? prefix : "",
+ prefix ? ": " : "", &vaf);
break;
default:
- printk(KERN_NOTICE "%s\n", q + 2);
+ printk(KERN_NOTICE "%s%s%pV\n", prefix ? prefix : "",
+ prefix ? ": " : "", &vaf);
break;
}
- if (freeable)
- kfree(q);
} else {
unsigned int logsize = ARRAY_SIZE(log->buffer);
u8 index;
+ char *q = kasprintf(GFP_KERNEL, "%c %s%s%pV\n", level,
+ prefix ? prefix : "",
+ prefix ? ": " : "", &vaf);
index = log->head & (logsize - 1);
BUILD_BUG_ON(sizeof(log->head) != sizeof(u8) ||
@@ -454,9 +427,11 @@ store_string:
log->tail++;
}
- log->buffer[index] = q;
- log->need_free &= ~(1 << index);
- log->need_free |= freeable << index;
+ log->buffer[index] = q ? q : "OOM: Can't store error string";
+ if (q)
+ log->need_free |= 1 << index;
+ else
+ log->need_free &= ~(1 << index);
log->head++;
}
va_end(va);
@@ -468,12 +443,12 @@ EXPORT_SYMBOL(logfc);
*/
static void put_fc_log(struct fs_context *fc)
{
- struct fc_log *log = fc->log;
+ struct fc_log *log = fc->log.log;
int i;
if (log) {
if (refcount_dec_and_test(&log->usage)) {
- fc->log = NULL;
+ fc->log.log = NULL;
for (i = 0; i <= 7; i++)
if (log->need_free & (1 << i))
kfree(log->buffer[i]);
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index d1930adce68d..7e6fb43f9541 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -20,42 +20,66 @@ static const struct constant_table bool_names[] = {
{ "no", false },
{ "true", true },
{ "yes", true },
+ { },
};
+static const struct constant_table *
+__lookup_constant(const struct constant_table *tbl, const char *name)
+{
+ for ( ; tbl->name; tbl++)
+ if (strcmp(name, tbl->name) == 0)
+ return tbl;
+ return NULL;
+}
+
/**
* lookup_constant - Look up a constant by name in an ordered table
* @tbl: The table of constants to search.
- * @tbl_size: The size of the table.
* @name: The name to look up.
* @not_found: The value to return if the name is not found.
*/
-int __lookup_constant(const struct constant_table *tbl, size_t tbl_size,
- const char *name, int not_found)
+int lookup_constant(const struct constant_table *tbl, const char *name, int not_found)
{
- unsigned int i;
+ const struct constant_table *p = __lookup_constant(tbl, name);
- for (i = 0; i < tbl_size; i++)
- if (strcmp(name, tbl[i].name) == 0)
- return tbl[i].value;
+ return p ? p->value : not_found;
+}
+EXPORT_SYMBOL(lookup_constant);
- return not_found;
+static inline bool is_flag(const struct fs_parameter_spec *p)
+{
+ return p->type == NULL;
}
-EXPORT_SYMBOL(__lookup_constant);
static const struct fs_parameter_spec *fs_lookup_key(
- const struct fs_parameter_description *desc,
- const char *name)
+ const struct fs_parameter_spec *desc,
+ struct fs_parameter *param, bool *negated)
{
- const struct fs_parameter_spec *p;
-
- if (!desc->specs)
- return NULL;
-
- for (p = desc->specs; p->name; p++)
- if (strcmp(p->name, name) == 0)
+ const struct fs_parameter_spec *p, *other = NULL;
+ const char *name = param->key;
+ bool want_flag = param->type == fs_value_is_flag;
+
+ *negated = false;
+ for (p = desc; p->name; p++) {
+ if (strcmp(p->name, name) != 0)
+ continue;
+ if (likely(is_flag(p) == want_flag))
return p;
-
- return NULL;
+ other = p;
+ }
+ if (want_flag) {
+ if (name[0] == 'n' && name[1] == 'o' && name[2]) {
+ for (p = desc; p->name; p++) {
+ if (strcmp(p->name, name + 2) != 0)
+ continue;
+ if (!(p->flags & fs_param_neg_with_no))
+ continue;
+ *negated = true;
+ return p;
+ }
+ }
+ }
+ return other;
}
/*
@@ -76,172 +100,38 @@ static const struct fs_parameter_spec *fs_lookup_key(
* unknown parameters are okay and -EINVAL if there was a conversion issue or
* the parameter wasn't recognised and unknowns aren't okay.
*/
-int fs_parse(struct fs_context *fc,
- const struct fs_parameter_description *desc,
+int __fs_parse(struct p_log *log,
+ const struct fs_parameter_spec *desc,
struct fs_parameter *param,
struct fs_parse_result *result)
{
const struct fs_parameter_spec *p;
- const struct fs_parameter_enum *e;
- int ret = -ENOPARAM, b;
- result->has_value = !!param->string;
- result->negated = false;
result->uint_64 = 0;
- p = fs_lookup_key(desc, param->key);
- if (!p) {
- /* If we didn't find something that looks like "noxxx", see if
- * "xxx" takes the "no"-form negative - but only if there
- * wasn't an value.
- */
- if (result->has_value)
- goto unknown_parameter;
- if (param->key[0] != 'n' || param->key[1] != 'o' || !param->key[2])
- goto unknown_parameter;
-
- p = fs_lookup_key(desc, param->key + 2);
- if (!p)
- goto unknown_parameter;
- if (!(p->flags & fs_param_neg_with_no))
- goto unknown_parameter;
- result->boolean = false;
- result->negated = true;
- }
+ p = fs_lookup_key(desc, param, &result->negated);
+ if (!p)
+ return -ENOPARAM;
if (p->flags & fs_param_deprecated)
- warnf(fc, "%s: Deprecated parameter '%s'",
- desc->name, param->key);
-
- if (result->negated)
- goto okay;
-
- /* Certain parameter types only take a string and convert it. */
- switch (p->type) {
- case __fs_param_wasnt_defined:
- return -EINVAL;
- case fs_param_is_u32:
- case fs_param_is_u32_octal:
- case fs_param_is_u32_hex:
- case fs_param_is_s32:
- case fs_param_is_u64:
- case fs_param_is_enum:
- case fs_param_is_string:
- if (param->type != fs_value_is_string)
- goto bad_value;
- if (!result->has_value) {
- if (p->flags & fs_param_v_optional)
- goto okay;
- goto bad_value;
- }
- /* Fall through */
- default:
- break;
- }
+ warn_plog(log, "Deprecated parameter '%s'", param->key);
/* Try to turn the type we were given into the type desired by the
* parameter and give an error if we can't.
*/
- switch (p->type) {
- case fs_param_is_flag:
- if (param->type != fs_value_is_flag &&
- (param->type != fs_value_is_string || result->has_value))
- return invalf(fc, "%s: Unexpected value for '%s'",
- desc->name, param->key);
- result->boolean = true;
- goto okay;
-
- case fs_param_is_bool:
- switch (param->type) {
- case fs_value_is_flag:
- result->boolean = true;
- goto okay;
- case fs_value_is_string:
- if (param->size == 0) {
- result->boolean = true;
- goto okay;
- }
- b = lookup_constant(bool_names, param->string, -1);
- if (b == -1)
- goto bad_value;
- result->boolean = b;
- goto okay;
- default:
- goto bad_value;
- }
-
- case fs_param_is_u32:
- ret = kstrtouint(param->string, 0, &result->uint_32);
- goto maybe_okay;
- case fs_param_is_u32_octal:
- ret = kstrtouint(param->string, 8, &result->uint_32);
- goto maybe_okay;
- case fs_param_is_u32_hex:
- ret = kstrtouint(param->string, 16, &result->uint_32);
- goto maybe_okay;
- case fs_param_is_s32:
- ret = kstrtoint(param->string, 0, &result->int_32);
- goto maybe_okay;
- case fs_param_is_u64:
- ret = kstrtoull(param->string, 0, &result->uint_64);
- goto maybe_okay;
-
- case fs_param_is_enum:
- for (e = desc->enums; e->name[0]; e++) {
- if (e->opt == p->opt &&
- strcmp(e->name, param->string) == 0) {
- result->uint_32 = e->value;
- goto okay;
- }
- }
- goto bad_value;
-
- case fs_param_is_string:
- goto okay;
- case fs_param_is_blob:
- if (param->type != fs_value_is_blob)
- goto bad_value;
- goto okay;
-
- case fs_param_is_fd: {
- switch (param->type) {
- case fs_value_is_string:
- if (!result->has_value)
- goto bad_value;
-
- ret = kstrtouint(param->string, 0, &result->uint_32);
- break;
- case fs_value_is_file:
- result->uint_32 = param->dirfd;
- ret = 0;
- default:
- goto bad_value;
- }
-
- if (result->uint_32 > INT_MAX)
- goto bad_value;
- goto maybe_okay;
- }
-
- case fs_param_is_blockdev:
- case fs_param_is_path:
- goto okay;
- default:
- BUG();
+ if (is_flag(p)) {
+ if (param->type != fs_value_is_flag)
+ return inval_plog(log, "Unexpected value for '%s'",
+ param->key);
+ result->boolean = !result->negated;
+ } else {
+ int ret = p->type(log, p, param, result);
+ if (ret)
+ return ret;
}
-
-maybe_okay:
- if (ret < 0)
- goto bad_value;
-okay:
return p->opt;
-
-bad_value:
- return invalf(fc, "%s: Bad value for '%s'", desc->name, param->key);
-unknown_parameter:
- return -ENOPARAM;
}
-EXPORT_SYMBOL(fs_parse);
+EXPORT_SYMBOL(__fs_parse);
/**
* fs_lookup_param - Look up a path referred to by a parameter
@@ -267,9 +157,6 @@ int fs_lookup_param(struct fs_context *fc,
return PTR_ERR(f);
put_f = true;
break;
- case fs_value_is_filename_empty:
- flags = LOOKUP_EMPTY;
- /* Fall through */
case fs_value_is_filename:
f = param->name;
put_f = false;
@@ -302,6 +189,124 @@ out:
}
EXPORT_SYMBOL(fs_lookup_param);
+int fs_param_bad_value(struct p_log *log, struct fs_parameter *param)
+{
+ return inval_plog(log, "Bad value for '%s'", param->key);
+}
+
+int fs_param_is_bool(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ int b;
+ if (param->type != fs_value_is_string)
+ return fs_param_bad_value(log, param);
+ b = lookup_constant(bool_names, param->string, -1);
+ if (b == -1)
+ return fs_param_bad_value(log, param);
+ result->boolean = b;
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_bool);
+
+int fs_param_is_u32(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ int base = (unsigned long)p->data;
+ if (param->type != fs_value_is_string ||
+ kstrtouint(param->string, base, &result->uint_32) < 0)
+ return fs_param_bad_value(log, param);
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_u32);
+
+int fs_param_is_s32(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ if (param->type != fs_value_is_string ||
+ kstrtoint(param->string, 0, &result->int_32) < 0)
+ return fs_param_bad_value(log, param);
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_s32);
+
+int fs_param_is_u64(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ if (param->type != fs_value_is_string ||
+ kstrtoull(param->string, 0, &result->uint_64) < 0)
+ return fs_param_bad_value(log, param);
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_u64);
+
+int fs_param_is_enum(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ const struct constant_table *c;
+ if (param->type != fs_value_is_string)
+ return fs_param_bad_value(log, param);
+ c = __lookup_constant(p->data, param->string);
+ if (!c)
+ return fs_param_bad_value(log, param);
+ result->uint_32 = c->value;
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_enum);
+
+int fs_param_is_string(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ if (param->type != fs_value_is_string || !*param->string)
+ return fs_param_bad_value(log, param);
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_string);
+
+int fs_param_is_blob(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ if (param->type != fs_value_is_blob)
+ return fs_param_bad_value(log, param);
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_blob);
+
+int fs_param_is_fd(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ switch (param->type) {
+ case fs_value_is_string:
+ if (kstrtouint(param->string, 0, &result->uint_32) < 0)
+ break;
+ if (result->uint_32 <= INT_MAX)
+ return 0;
+ break;
+ case fs_value_is_file:
+ result->uint_32 = param->dirfd;
+ if (result->uint_32 <= INT_MAX)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return fs_param_bad_value(log, param);
+}
+EXPORT_SYMBOL(fs_param_is_fd);
+
+int fs_param_is_blockdev(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_blockdev);
+
+int fs_param_is_path(struct p_log *log, const struct fs_parameter_spec *p,
+ struct fs_parameter *param, struct fs_parse_result *result)
+{
+ return 0;
+}
+EXPORT_SYMBOL(fs_param_is_path);
+
#ifdef CONFIG_VALIDATE_FS_PARSER
/**
* validate_constant_table - Validate a constant table
@@ -357,102 +362,26 @@ bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
* fs_validate_description - Validate a parameter description
* @desc: The parameter description to validate.
*/
-bool fs_validate_description(const struct fs_parameter_description *desc)
+bool fs_validate_description(const char *name,
+ const struct fs_parameter_spec *desc)
{
const struct fs_parameter_spec *param, *p2;
- const struct fs_parameter_enum *e;
- const char *name = desc->name;
- unsigned int nr_params = 0;
- bool good = true, enums = false;
+ bool good = true;
pr_notice("*** VALIDATE %s ***\n", name);
- if (!name[0]) {
- pr_err("VALIDATE Parser: No name\n");
- name = "Unknown";
- good = false;
- }
-
- if (desc->specs) {
- for (param = desc->specs; param->name; param++) {
- enum fs_parameter_type t = param->type;
-
- /* Check that the type is in range */
- if (t == __fs_param_wasnt_defined ||
- t >= nr__fs_parameter_type) {
- pr_err("VALIDATE %s: PARAM[%s] Bad type %u\n",
- name, param->name, t);
- good = false;
- } else if (t == fs_param_is_enum) {
- enums = true;
- }
-
- /* Check for duplicate parameter names */
- for (p2 = desc->specs; p2 < param; p2++) {
- if (strcmp(param->name, p2->name) == 0) {
- pr_err("VALIDATE %s: PARAM[%s]: Duplicate\n",
- name, param->name);
- good = false;
- }
- }
- }
-
- nr_params = param - desc->specs;
- }
-
- if (desc->enums) {
- if (!nr_params) {
- pr_err("VALIDATE %s: Enum table but no parameters\n",
- name);
- good = false;
- goto no_enums;
- }
- if (!enums) {
- pr_err("VALIDATE %s: Enum table but no enum-type values\n",
- name);
- good = false;
- goto no_enums;
- }
-
- for (e = desc->enums; e->name[0]; e++) {
- /* Check that all entries in the enum table have at
- * least one parameter that uses them.
- */
- for (param = desc->specs; param->name; param++) {
- if (param->opt == e->opt &&
- param->type != fs_param_is_enum) {
- pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
- name, e - desc->enums, param->name);
- good = false;
- }
- }
- }
-
- /* Check that all enum-type parameters have at least one enum
- * value in the enum table.
- */
- for (param = desc->specs; param->name; param++) {
- if (param->type != fs_param_is_enum)
- continue;
- for (e = desc->enums; e->name[0]; e++)
- if (e->opt == param->opt)
- break;
- if (!e->name[0]) {
- pr_err("VALIDATE %s: PARAM[%s] enum with no values\n",
+ for (param = desc; param->name; param++) {
+ /* Check for duplicate parameter names */
+ for (p2 = desc; p2 < param; p2++) {
+ if (strcmp(param->name, p2->name) == 0) {
+ if (is_flag(param) != is_flag(p2))
+ continue;
+ pr_err("VALIDATE %s: PARAM[%s]: Duplicate\n",
name, param->name);
good = false;
}
}
- } else {
- if (enums) {
- pr_err("VALIDATE %s: enum-type values, but no enum table\n",
- name);
- good = false;
- goto no_enums;
- }
}
-
-no_enums:
return good;
}
#endif /* CONFIG_VALIDATE_FS_PARSER */
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 9616af3768e1..08e91efbce53 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -111,7 +111,7 @@ extern void fscache_enqueue_object(struct fscache_object *);
* object-list.c
*/
#ifdef CONFIG_FSCACHE_OBJECT_LIST
-extern const struct file_operations fscache_objlist_fops;
+extern const struct proc_ops fscache_objlist_proc_ops;
extern void fscache_objlist_add(struct fscache_object *);
extern void fscache_objlist_remove(struct fscache_object *);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 72ebfe578f40..e106a1a1600d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -7,6 +7,7 @@
#define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h>
+#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/key.h>
@@ -405,9 +406,9 @@ static int fscache_objlist_release(struct inode *inode, struct file *file)
return seq_release(inode, file);
}
-const struct file_operations fscache_objlist_fops = {
- .open = fscache_objlist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = fscache_objlist_release,
+const struct proc_ops fscache_objlist_proc_ops = {
+ .proc_open = fscache_objlist_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = fscache_objlist_release,
};
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
index 5523446e2952..90a7bc22f7e1 100644
--- a/fs/fscache/proc.c
+++ b/fs/fscache/proc.c
@@ -35,7 +35,7 @@ int __init fscache_proc_init(void)
#ifdef CONFIG_FSCACHE_OBJECT_LIST
if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
- &fscache_objlist_fops))
+ &fscache_objlist_proc_ops))
goto error_objects;
#endif
diff --git a/fs/fsopen.c b/fs/fsopen.c
index 043ffa8dc263..2fa3f241b762 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -25,7 +25,7 @@ static ssize_t fscontext_read(struct file *file,
char __user *_buf, size_t len, loff_t *pos)
{
struct fs_context *fc = file->private_data;
- struct fc_log *log = fc->log;
+ struct fc_log *log = fc->log.log;
unsigned int logsize = ARRAY_SIZE(log->buffer);
ssize_t ret;
char *p;
@@ -97,11 +97,11 @@ static int fscontext_create_fd(struct fs_context *fc, unsigned int o_flags)
static int fscontext_alloc_log(struct fs_context *fc)
{
- fc->log = kzalloc(sizeof(*fc->log), GFP_KERNEL);
- if (!fc->log)
+ fc->log.log = kzalloc(sizeof(*fc->log.log), GFP_KERNEL);
+ if (!fc->log.log)
return -ENOMEM;
- refcount_set(&fc->log->usage, 1);
- fc->log->owner = fc->fs_type->owner;
+ refcount_set(&fc->log.log->usage, 1);
+ fc->log.log->owner = fc->fs_type->owner;
return 0;
}
@@ -321,6 +321,7 @@ SYSCALL_DEFINE5(fsconfig,
struct fs_context *fc;
struct fd f;
int ret;
+ int lookup_flags = 0;
struct fs_parameter param = {
.type = fs_value_is_undefined,
@@ -409,19 +410,12 @@ SYSCALL_DEFINE5(fsconfig,
goto out_key;
}
break;
+ case FSCONFIG_SET_PATH_EMPTY:
+ lookup_flags = LOOKUP_EMPTY;
+ /* fallthru */
case FSCONFIG_SET_PATH:
param.type = fs_value_is_filename;
- param.name = getname_flags(_value, 0, NULL);
- if (IS_ERR(param.name)) {
- ret = PTR_ERR(param.name);
- goto out_key;
- }
- param.dirfd = aux;
- param.size = strlen(param.name->name);
- break;
- case FSCONFIG_SET_PATH_EMPTY:
- param.type = fs_value_is_filename_empty;
- param.name = getname_flags(_value, LOOKUP_EMPTY, NULL);
+ param.name = getname_flags(_value, lookup_flags, NULL);
if (IS_ERR(param.name)) {
ret = PTR_ERR(param.name);
goto out_key;
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 00015d851382..030f094910c3 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -451,8 +451,8 @@ static int cuse_send_init(struct cuse_conn *cc)
ap->args.out_args[0].size = sizeof(ia->out);
ap->args.out_args[0].value = &ia->out;
ap->args.out_args[1].size = CUSE_INIT_INFO_MAX;
- ap->args.out_argvar = 1;
- ap->args.out_pages = 1;
+ ap->args.out_argvar = true;
+ ap->args.out_pages = true;
ap->num_pages = 1;
ap->pages = &ia->page;
ap->descs = &ia->desc;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index ee190119f45c..de1e2fde60bd 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -818,7 +818,7 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
struct fuse_conn *fc = get_fuse_conn(olddir);
int err;
- if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
if (flags) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ce715380143c..9d67b830fb7a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -803,6 +803,10 @@ static int fuse_do_readpage(struct file *file, struct page *page)
attr_ver = fuse_get_attr_version(fc);
+ /* Don't overflow end offset */
+ if (pos + (desc.length - 1) == LLONG_MAX)
+ desc.length--;
+
fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
res = fuse_simple_request(fc, &ia.ap.args);
if (res < 0)
@@ -888,6 +892,14 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
ap->args.out_pages = true;
ap->args.page_zeroing = true;
ap->args.page_replace = true;
+
+ /* Don't overflow end offset */
+ if (pos + (count - 1) == LLONG_MAX) {
+ count--;
+ ap->descs[ap->num_pages - 1].length--;
+ }
+ WARN_ON((loff_t) (pos + count) < 0);
+
fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
ia->read.attr_ver = fuse_get_attr_version(fc);
if (fc->async_read) {
@@ -1397,9 +1409,9 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
}
if (write)
- ap->args.in_pages = 1;
+ ap->args.in_pages = true;
else
- ap->args.out_pages = 1;
+ ap->args.out_pages = true;
*nbytesp = nbytes;
@@ -1465,6 +1477,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
}
ia = NULL;
if (nres < 0) {
+ iov_iter_revert(iter, nbytes);
err = nres;
break;
}
@@ -1473,8 +1486,10 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
count -= nres;
res += nres;
pos += nres;
- if (nres != nbytes)
+ if (nres != nbytes) {
+ iov_iter_revert(iter, nbytes - nres);
break;
+ }
if (count) {
max_pages = iov_iter_npages(iter, fc->max_pages);
ia = fuse_io_alloc(io, max_pages);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 16aec32f7f3d..95d712d44ca1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -448,7 +448,7 @@ enum {
OPT_ERR
};
-static const struct fs_parameter_spec fuse_param_specs[] = {
+static const struct fs_parameter_spec fuse_fs_parameters[] = {
fsparam_string ("source", OPT_SOURCE),
fsparam_u32 ("fd", OPT_FD),
fsparam_u32oct ("rootmode", OPT_ROOTMODE),
@@ -462,68 +462,63 @@ static const struct fs_parameter_spec fuse_param_specs[] = {
{}
};
-static const struct fs_parameter_description fuse_fs_parameters = {
- .name = "fuse",
- .specs = fuse_param_specs,
-};
-
static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct fs_parse_result result;
struct fuse_fs_context *ctx = fc->fs_private;
int opt;
- opt = fs_parse(fc, &fuse_fs_parameters, param, &result);
+ opt = fs_parse(fc, fuse_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case OPT_SOURCE:
if (fc->source)
- return invalf(fc, "fuse: Multiple sources specified");
+ return invalfc(fc, "Multiple sources specified");
fc->source = param->string;
param->string = NULL;
break;
case OPT_SUBTYPE:
if (ctx->subtype)
- return invalf(fc, "fuse: Multiple subtypes specified");
+ return invalfc(fc, "Multiple subtypes specified");
ctx->subtype = param->string;
param->string = NULL;
return 0;
case OPT_FD:
ctx->fd = result.uint_32;
- ctx->fd_present = 1;
+ ctx->fd_present = true;
break;
case OPT_ROOTMODE:
if (!fuse_valid_type(result.uint_32))
- return invalf(fc, "fuse: Invalid rootmode");
+ return invalfc(fc, "Invalid rootmode");
ctx->rootmode = result.uint_32;
- ctx->rootmode_present = 1;
+ ctx->rootmode_present = true;
break;
case OPT_USER_ID:
ctx->user_id = make_kuid(fc->user_ns, result.uint_32);
if (!uid_valid(ctx->user_id))
- return invalf(fc, "fuse: Invalid user_id");
- ctx->user_id_present = 1;
+ return invalfc(fc, "Invalid user_id");
+ ctx->user_id_present = true;
break;
case OPT_GROUP_ID:
ctx->group_id = make_kgid(fc->user_ns, result.uint_32);
if (!gid_valid(ctx->group_id))
- return invalf(fc, "fuse: Invalid group_id");
- ctx->group_id_present = 1;
+ return invalfc(fc, "Invalid group_id");
+ ctx->group_id_present = true;
break;
case OPT_DEFAULT_PERMISSIONS:
- ctx->default_permissions = 1;
+ ctx->default_permissions = true;
break;
case OPT_ALLOW_OTHER:
- ctx->allow_other = 1;
+ ctx->allow_other = true;
break;
case OPT_MAX_READ:
@@ -532,7 +527,7 @@ static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param)
case OPT_BLKSIZE:
if (!ctx->is_bdev)
- return invalf(fc, "fuse: blksize only supported for fuseblk");
+ return invalfc(fc, "blksize only supported for fuseblk");
ctx->blksize = result.uint_32;
break;
@@ -997,7 +992,7 @@ void fuse_send_init(struct fuse_conn *fc)
/* Variable length argument used for backward compatibility
with interface version < 7.5. Rest of init_out is zeroed
by do_get_request(), so a short reply is not a problem */
- ia->args.out_argvar = 1;
+ ia->args.out_argvar = true;
ia->args.out_args[0].size = sizeof(ia->out);
ia->args.out_args[0].value = &ia->out;
ia->args.force = true;
@@ -1347,7 +1342,7 @@ static struct file_system_type fuse_fs_type = {
.name = "fuse",
.fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
.init_fs_context = fuse_init_fs_context,
- .parameters = &fuse_fs_parameters,
+ .parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_anon,
};
MODULE_ALIAS_FS("fuse");
@@ -1363,7 +1358,7 @@ static struct file_system_type fuseblk_fs_type = {
.owner = THIS_MODULE,
.name = "fuseblk",
.init_fs_context = fuse_init_fs_context,
- .parameters = &fuse_fs_parameters,
+ .parameters = fuse_fs_parameters,
.kill_sb = fuse_kill_sb_blk,
.fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
};
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 6a40f75a0d25..90e3f01bd796 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -332,7 +332,7 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx)
return -ENOMEM;
plus = fuse_use_readdirplus(inode, ctx);
- ap->args.out_pages = 1;
+ ap->args.out_pages = true;
ap->num_pages = 1;
ap->pages = &page;
ap->descs = &desc;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 9c6df721321a..ba83b49ce18c 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -183,14 +183,12 @@ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc
struct inode *inode = page->mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- int ret;
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
goto out;
if (PageChecked(page) || current->journal_info)
goto out_ignore;
- ret = __gfs2_jdata_writepage(page, wbc);
- return ret;
+ return __gfs2_jdata_writepage(page, wbc);
out_ignore:
redirty_page_for_writepage(wbc, page);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index eb9c0578978f..c8b62577e2f2 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -73,9 +73,6 @@
#include "bmap.h"
#include "util.h"
-#define IS_LEAF 1 /* Hashed (leaf) directory */
-#define IS_DINODE 2 /* Linear (stuffed dinode block) directory */
-
#define MAX_RA_BLOCKS 32 /* max read-ahead blocks */
#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 9d58295ccf7a..cb26be6f4351 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -847,7 +847,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct gfs2_inode *ip = GFS2_I(inode);
- ssize_t written = 0, ret;
+ ssize_t ret;
ret = gfs2_rsqa_alloc(ip);
if (ret)
@@ -867,68 +867,58 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode);
ret = generic_write_checks(iocb, from);
if (ret <= 0)
- goto out;
-
- /* We can write back this queue in page reclaim */
- current->backing_dev_info = inode_to_bdi(inode);
+ goto out_unlock;
ret = file_remove_privs(file);
if (ret)
- goto out2;
+ goto out_unlock;
ret = file_update_time(file);
if (ret)
- goto out2;
+ goto out_unlock;
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = file->f_mapping;
- loff_t pos, endbyte;
- ssize_t buffered;
+ ssize_t buffered, ret2;
- written = gfs2_file_direct_write(iocb, from);
- if (written < 0 || !iov_iter_count(from))
- goto out2;
+ ret = gfs2_file_direct_write(iocb, from);
+ if (ret < 0 || !iov_iter_count(from))
+ goto out_unlock;
- ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
- if (unlikely(ret < 0))
- goto out2;
- buffered = ret;
+ iocb->ki_flags |= IOCB_DSYNC;
+ current->backing_dev_info = inode_to_bdi(inode);
+ buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ current->backing_dev_info = NULL;
+ if (unlikely(buffered <= 0))
+ goto out_unlock;
/*
* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
- * semantics.
+ * semantics. If the writeback or invalidate fails, only report
+ * the direct I/O range as we don't know if the buffered pages
+ * made it to disk.
*/
- pos = iocb->ki_pos;
- endbyte = pos + buffered - 1;
- ret = filemap_write_and_wait_range(mapping, pos, endbyte);
- if (!ret) {
- iocb->ki_pos += buffered;
- written += buffered;
- invalidate_mapping_pages(mapping,
- pos >> PAGE_SHIFT,
- endbyte >> PAGE_SHIFT);
- } else {
- /*
- * We don't know how much we wrote, so just return
- * the number of bytes which were direct-written
- */
- }
+ iocb->ki_pos += buffered;
+ ret2 = generic_write_sync(iocb, buffered);
+ invalidate_mapping_pages(mapping,
+ (iocb->ki_pos - buffered) >> PAGE_SHIFT,
+ (iocb->ki_pos - 1) >> PAGE_SHIFT);
+ if (!ret || ret2 > 0)
+ ret += ret2;
} else {
+ current->backing_dev_info = inode_to_bdi(inode);
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
- if (likely(ret > 0))
+ current->backing_dev_info = NULL;
+ if (likely(ret > 0)) {
iocb->ki_pos += ret;
+ ret = generic_write_sync(iocb, ret);
+ }
}
-out2:
- current->backing_dev_info = NULL;
-out:
+out_unlock:
inode_unlock(inode);
- if (likely(ret > 0)) {
- /* Handle various SYNC-type writes */
- ret = generic_write_sync(iocb, ret);
- }
- return written ? written : ret;
+ return ret;
}
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b7123de7c180..d0eceaff3cea 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -826,7 +826,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
if (glops->go_flags & GLOF_LVB) {
- gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
+ gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl);
return -ENOMEM;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 4ede1f18de85..061d22e1ceb6 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -95,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
/* A shortened, inline version of gfs2_trans_begin()
* tr->alloced is not set since the transaction structure is
* on the stack */
- tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
+ tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
tr.tr_ip = _RET_IP_;
if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
return;
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 5f89c515f5bb..9fd88ed18807 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -387,8 +387,6 @@ struct gfs2_glock {
struct rhash_head gl_node;
};
-#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
-
enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
@@ -505,6 +503,7 @@ struct gfs2_trans {
unsigned int tr_num_buf_rm;
unsigned int tr_num_databuf_rm;
unsigned int tr_num_revoke;
+ unsigned int tr_num_revoke_rm;
struct list_head tr_list;
struct list_head tr_databuf;
@@ -703,6 +702,7 @@ struct gfs2_sbd {
u32 sd_fsb2bb_shift;
u32 sd_diptrs; /* Number of pointers in a dinode */
u32 sd_inptrs; /* Number of pointers in a indirect block */
+ u32 sd_ldptrs; /* Number of pointers in a log descriptor block */
u32 sd_jbsize; /* Size of a journaled data block */
u32 sd_hash_bsize; /* sizeof(exhash block) */
u32 sd_hash_bsize_shift;
@@ -803,7 +803,7 @@ struct gfs2_sbd {
struct gfs2_trans *sd_log_tr;
unsigned int sd_log_blks_reserved;
- int sd_log_commited_revoke;
+ int sd_log_committed_revoke;
atomic_t sd_log_pinned;
unsigned int sd_log_num_revoke;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index dafef10b91f1..2716d56ed0a0 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -136,7 +136,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (inode->i_state & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
- ip->i_no_formal_ino = no_formal_ino;
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (unlikely(error))
@@ -175,21 +174,22 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
gfs2_glock_put(io_gl);
io_gl = NULL;
+ /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
+ inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
+ inode->i_atime.tv_nsec = 0;
+
if (type == DT_UNKNOWN) {
/* Inode glock must be locked already */
error = gfs2_inode_refresh(GFS2_I(inode));
if (error)
goto fail_refresh;
} else {
+ ip->i_no_formal_ino = no_formal_ino;
inode->i_mode = DT2IF(type);
}
gfs2_set_iop(inode);
- /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
- inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
- inode->i_atime.tv_nsec = 0;
-
unlock_new_inode(inode);
}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index eb3f2e7b8085..00a2e721a374 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -37,7 +37,6 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
* gfs2_struct2blk - compute stuff
* @sdp: the filesystem
* @nstruct: the number of structures
- * @ssize: the size of the structures
*
* Compute the number of log descriptor blocks needed to hold a certain number
* of structures of a certain size.
@@ -45,18 +44,16 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
* Returns: the number of blocks needed (minimum is always 1)
*/
-unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
- unsigned int ssize)
+unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
{
unsigned int blks;
unsigned int first, second;
blks = 1;
- first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
+ first = sdp->sd_ldptrs;
if (nstruct > first) {
- second = (sdp->sd_sb.sb_bsize -
- sizeof(struct gfs2_meta_header)) / ssize;
+ second = sdp->sd_inptrs;
blks += DIV_ROUND_UP(nstruct - first, second);
}
@@ -472,9 +469,8 @@ static unsigned int calc_reserved(struct gfs2_sbd *sdp)
reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
}
- if (sdp->sd_log_commited_revoke > 0)
- reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
- sizeof(u64));
+ if (sdp->sd_log_committed_revoke > 0)
+ reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
/* One for the overall header */
if (reserved)
reserved++;
@@ -829,7 +825,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
if (unlikely(state == SFS_FROZEN))
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
gfs2_assert_withdraw(sdp,
- sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
+ sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke);
gfs2_ordered_write(sdp);
lops_before_commit(sdp, tr);
@@ -848,7 +844,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
gfs2_log_lock(sdp);
sdp->sd_log_head = sdp->sd_log_flush_head;
sdp->sd_log_blks_reserved = 0;
- sdp->sd_log_commited_revoke = 0;
+ sdp->sd_log_committed_revoke = 0;
spin_lock(&sdp->sd_ail_lock);
if (tr && !list_empty(&tr->tr_ail1_list)) {
@@ -899,6 +895,7 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
old->tr_num_buf_rm += new->tr_num_buf_rm;
old->tr_num_databuf_rm += new->tr_num_databuf_rm;
old->tr_num_revoke += new->tr_num_revoke;
+ old->tr_num_revoke_rm += new->tr_num_revoke_rm;
list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
list_splice_tail_init(&new->tr_buf, &old->tr_buf);
@@ -920,7 +917,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
set_bit(TR_ATTACHED, &tr->tr_flags);
}
- sdp->sd_log_commited_revoke += tr->tr_num_revoke;
+ sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
reserved = calc_reserved(sdp);
maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
gfs2_assert_withdraw(sdp, maxres >= reserved);
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 2ff163a8dce1..c0a65e5a126b 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -60,9 +60,9 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
spin_unlock(&sdp->sd_ordered_lock);
}
}
+
extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
-extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
- unsigned int ssize);
+extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 55fed7daf2b1..c090d5ad3f22 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -259,7 +259,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
struct super_block *sb = sdp->sd_vfs;
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
- bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
+ bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = end_io;
bio->bi_private = sdp;
@@ -422,7 +422,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
- if (lh.lh_sequence > head->lh_sequence)
+ if (lh.lh_sequence >= head->lh_sequence)
*head = lh;
else {
ret = true;
@@ -472,6 +472,20 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
put_page(page); /* Once more for find_or_create_page */
}
+static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
+{
+ struct bio *new;
+
+ new = bio_alloc(GFP_NOIO, nr_iovecs);
+ bio_copy_dev(new, prev);
+ new->bi_iter.bi_sector = bio_end_sector(prev);
+ new->bi_opf = prev->bi_opf;
+ new->bi_write_hint = prev->bi_write_hint;
+ bio_chain(new, prev);
+ submit_bio(prev);
+ return new;
+}
+
/**
* gfs2_find_jhead - find the head of a log
* @jd: The journal descriptor
@@ -488,15 +502,15 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct address_space *mapping = jd->jd_inode->i_mapping;
unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
- unsigned int bsize = sdp->sd_sb.sb_bsize;
+ unsigned int bsize = sdp->sd_sb.sb_bsize, off;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
unsigned int shift = PAGE_SHIFT - bsize_shift;
- unsigned int readhead_blocks = BIO_MAX_PAGES << shift;
+ unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
struct gfs2_journal_extent *je;
int sz, ret = 0;
struct bio *bio = NULL;
struct page *page = NULL;
- bool done = false;
+ bool bio_chained = false, done = false;
errseq_t since;
memset(head, 0, sizeof(*head));
@@ -505,9 +519,9 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
since = filemap_sample_wb_err(mapping);
list_for_each_entry(je, &jd->extent_list, list) {
- for (; block < je->lblock + je->blocks; block++) {
- u64 dblock;
+ u64 dblock = je->dblock;
+ for (; block < je->lblock + je->blocks; block++, dblock++) {
if (!page) {
page = find_or_create_page(mapping,
block >> shift, GFP_NOFS);
@@ -516,35 +530,41 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
done = true;
goto out;
}
+ off = 0;
}
- if (bio) {
- unsigned int off;
-
- off = (block << bsize_shift) & ~PAGE_MASK;
+ if (!bio || (bio_chained && !off)) {
+ /* start new bio */
+ } else {
sz = bio_add_page(bio, page, bsize, off);
- if (sz == bsize) { /* block added */
- if (off + bsize == PAGE_SIZE) {
- page = NULL;
- goto page_added;
- }
- continue;
+ if (sz == bsize)
+ goto block_added;
+ if (off) {
+ unsigned int blocks =
+ (PAGE_SIZE - off) >> bsize_shift;
+
+ bio = gfs2_chain_bio(bio, blocks);
+ bio_chained = true;
+ goto add_block_to_new_bio;
}
+ }
+
+ if (bio) {
blocks_submitted = block + 1;
submit_bio(bio);
- bio = NULL;
}
- dblock = je->dblock + (block - je->lblock);
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
bio->bi_opf = REQ_OP_READ;
- sz = bio_add_page(bio, page, bsize, 0);
- gfs2_assert_warn(sdp, sz == bsize);
- if (bsize == PAGE_SIZE)
+ bio_chained = false;
+add_block_to_new_bio:
+ sz = bio_add_page(bio, page, bsize, off);
+ BUG_ON(sz != bsize);
+block_added:
+ off += bsize;
+ if (off == PAGE_SIZE)
page = NULL;
-
-page_added:
- if (blocks_submitted < blocks_read + readhead_blocks) {
+ if (blocks_submitted < blocks_read + readahead_blocks) {
/* Keep at least one bio in flight */
continue;
}
@@ -846,7 +866,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
if (!sdp->sd_log_num_revoke)
return;
- length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
+ length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
offset = sizeof(struct gfs2_log_descriptor);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index e8b7b0ce8404..a1a8ef7ed3fd 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -298,6 +298,8 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
sizeof(struct gfs2_dinode)) / sizeof(u64);
sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_meta_header)) / sizeof(u64);
+ sdp->sd_ldptrs = (sdp->sd_sb.sb_bsize -
+ sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
@@ -1250,6 +1252,7 @@ enum gfs2_param {
Opt_upgrade,
Opt_acl,
Opt_quota,
+ Opt_quota_flag,
Opt_suiddir,
Opt_data,
Opt_meta,
@@ -1264,17 +1267,11 @@ enum gfs2_param {
Opt_loccookie,
};
-enum opt_quota {
- Opt_quota_unset = 0,
- Opt_quota_off,
- Opt_quota_account,
- Opt_quota_on,
-};
-
-static const unsigned int opt_quota_values[] = {
- [Opt_quota_off] = GFS2_QUOTA_OFF,
- [Opt_quota_account] = GFS2_QUOTA_ACCOUNT,
- [Opt_quota_on] = GFS2_QUOTA_ON,
+static const struct constant_table gfs2_param_quota[] = {
+ {"off", GFS2_QUOTA_OFF},
+ {"account", GFS2_QUOTA_ACCOUNT},
+ {"on", GFS2_QUOTA_ON},
+ {}
};
enum opt_data {
@@ -1282,12 +1279,24 @@ enum opt_data {
Opt_data_ordered = GFS2_DATA_ORDERED,
};
+static const struct constant_table gfs2_param_data[] = {
+ {"writeback", Opt_data_writeback },
+ {"ordered", Opt_data_ordered },
+ {}
+};
+
enum opt_errors {
Opt_errors_withdraw = GFS2_ERRORS_WITHDRAW,
Opt_errors_panic = GFS2_ERRORS_PANIC,
};
-static const struct fs_parameter_spec gfs2_param_specs[] = {
+static const struct constant_table gfs2_param_errors[] = {
+ {"withdraw", Opt_errors_withdraw },
+ {"panic", Opt_errors_panic },
+ {}
+};
+
+static const struct fs_parameter_spec gfs2_fs_parameters[] = {
fsparam_string ("lockproto", Opt_lockproto),
fsparam_string ("locktable", Opt_locktable),
fsparam_string ("hostdata", Opt_hostdata),
@@ -1300,11 +1309,11 @@ static const struct fs_parameter_spec gfs2_param_specs[] = {
fsparam_flag ("upgrade", Opt_upgrade),
fsparam_flag_no("acl", Opt_acl),
fsparam_flag_no("suiddir", Opt_suiddir),
- fsparam_enum ("data", Opt_data),
+ fsparam_enum ("data", Opt_data, gfs2_param_data),
fsparam_flag ("meta", Opt_meta),
fsparam_flag_no("discard", Opt_discard),
fsparam_s32 ("commit", Opt_commit),
- fsparam_enum ("errors", Opt_errors),
+ fsparam_enum ("errors", Opt_errors, gfs2_param_errors),
fsparam_s32 ("statfs_quantum", Opt_statfs_quantum),
fsparam_s32 ("statfs_percent", Opt_statfs_percent),
fsparam_s32 ("quota_quantum", Opt_quota_quantum),
@@ -1312,27 +1321,11 @@ static const struct fs_parameter_spec gfs2_param_specs[] = {
fsparam_flag_no("rgrplvb", Opt_rgrplvb),
fsparam_flag_no("loccookie", Opt_loccookie),
/* quota can be a flag or an enum so it gets special treatment */
- __fsparam(fs_param_is_enum, "quota", Opt_quota, fs_param_neg_with_no|fs_param_v_optional),
- {}
-};
-
-static const struct fs_parameter_enum gfs2_param_enums[] = {
- { Opt_quota, "off", Opt_quota_off },
- { Opt_quota, "account", Opt_quota_account },
- { Opt_quota, "on", Opt_quota_on },
- { Opt_data, "writeback", Opt_data_writeback },
- { Opt_data, "ordered", Opt_data_ordered },
- { Opt_errors, "withdraw", Opt_errors_withdraw },
- { Opt_errors, "panic", Opt_errors_panic },
+ fsparam_flag_no("quota", Opt_quota_flag),
+ fsparam_enum("quota", Opt_quota, gfs2_param_quota),
{}
};
-static const struct fs_parameter_description gfs2_fs_parameters = {
- .name = "gfs2",
- .specs = gfs2_param_specs,
- .enums = gfs2_param_enums,
-};
-
/* Parse a single mount parameter */
static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
@@ -1340,7 +1333,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
struct fs_parse_result result;
int o;
- o = fs_parse(fc, &gfs2_fs_parameters, param, &result);
+ o = fs_parse(fc, gfs2_fs_parameters, param, &result);
if (o < 0)
return o;
@@ -1368,7 +1361,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_debug:
if (result.boolean && args->ar_errors == GFS2_ERRORS_PANIC)
- return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
+ return invalfc(fc, "-o debug and -o errors=panic are mutually exclusive");
args->ar_debug = result.boolean;
break;
case Opt_upgrade:
@@ -1377,17 +1370,11 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_acl:
args->ar_posix_acl = result.boolean;
break;
+ case Opt_quota_flag:
+ args->ar_quota = result.negated ? GFS2_QUOTA_OFF : GFS2_QUOTA_ON;
+ break;
case Opt_quota:
- /* The quota option can be a flag or an enum. A non-zero int_32
- result means that we have an enum index. Otherwise we have
- to rely on the 'negated' flag to tell us whether 'quota' or
- 'noquota' was specified. */
- if (result.negated)
- args->ar_quota = GFS2_QUOTA_OFF;
- else if (result.int_32 > 0)
- args->ar_quota = opt_quota_values[result.int_32];
- else
- args->ar_quota = GFS2_QUOTA_ON;
+ args->ar_quota = result.int_32;
break;
case Opt_suiddir:
args->ar_suiddir = result.boolean;
@@ -1404,27 +1391,27 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_commit:
if (result.int_32 <= 0)
- return invalf(fc, "gfs2: commit mount option requires a positive numeric argument");
+ return invalfc(fc, "commit mount option requires a positive numeric argument");
args->ar_commit = result.int_32;
break;
case Opt_statfs_quantum:
if (result.int_32 < 0)
- return invalf(fc, "gfs2: statfs_quantum mount option requires a non-negative numeric argument");
+ return invalfc(fc, "statfs_quantum mount option requires a non-negative numeric argument");
args->ar_statfs_quantum = result.int_32;
break;
case Opt_quota_quantum:
if (result.int_32 <= 0)
- return invalf(fc, "gfs2: quota_quantum mount option requires a positive numeric argument");
+ return invalfc(fc, "quota_quantum mount option requires a positive numeric argument");
args->ar_quota_quantum = result.int_32;
break;
case Opt_statfs_percent:
if (result.int_32 < 0 || result.int_32 > 100)
- return invalf(fc, "gfs2: statfs_percent mount option requires a numeric argument between 0 and 100");
+ return invalfc(fc, "statfs_percent mount option requires a numeric argument between 0 and 100");
args->ar_statfs_percent = result.int_32;
break;
case Opt_errors:
if (args->ar_debug && result.uint_32 == GFS2_ERRORS_PANIC)
- return invalf(fc, "gfs2: -o debug and -o errors=panic are mutually exclusive");
+ return invalfc(fc, "-o debug and -o errors=panic are mutually exclusive");
args->ar_errors = result.uint_32;
break;
case Opt_barrier:
@@ -1437,7 +1424,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
args->ar_loccookie = result.boolean;
break;
default:
- return invalf(fc, "gfs2: invalid mount option: %s", param->key);
+ return invalfc(fc, "invalid mount option: %s", param->key);
}
return 0;
}
@@ -1463,27 +1450,27 @@ static int gfs2_reconfigure(struct fs_context *fc)
spin_unlock(&gt->gt_spin);
if (strcmp(newargs->ar_lockproto, oldargs->ar_lockproto)) {
- errorf(fc, "gfs2: reconfiguration of locking protocol not allowed");
+ errorfc(fc, "reconfiguration of locking protocol not allowed");
return -EINVAL;
}
if (strcmp(newargs->ar_locktable, oldargs->ar_locktable)) {
- errorf(fc, "gfs2: reconfiguration of lock table not allowed");
+ errorfc(fc, "reconfiguration of lock table not allowed");
return -EINVAL;
}
if (strcmp(newargs->ar_hostdata, oldargs->ar_hostdata)) {
- errorf(fc, "gfs2: reconfiguration of host data not allowed");
+ errorfc(fc, "reconfiguration of host data not allowed");
return -EINVAL;
}
if (newargs->ar_spectator != oldargs->ar_spectator) {
- errorf(fc, "gfs2: reconfiguration of spectator mode not allowed");
+ errorfc(fc, "reconfiguration of spectator mode not allowed");
return -EINVAL;
}
if (newargs->ar_localflocks != oldargs->ar_localflocks) {
- errorf(fc, "gfs2: reconfiguration of localflocks not allowed");
+ errorfc(fc, "reconfiguration of localflocks not allowed");
return -EINVAL;
}
if (newargs->ar_meta != oldargs->ar_meta) {
- errorf(fc, "gfs2: switching between gfs2 and gfs2meta not allowed");
+ errorfc(fc, "switching between gfs2 and gfs2meta not allowed");
return -EINVAL;
}
if (oldargs->ar_spectator)
@@ -1493,11 +1480,11 @@ static int gfs2_reconfigure(struct fs_context *fc)
if (fc->sb_flags & SB_RDONLY) {
error = gfs2_make_fs_ro(sdp);
if (error)
- errorf(fc, "gfs2: unable to remount read-only");
+ errorfc(fc, "unable to remount read-only");
} else {
error = gfs2_make_fs_rw(sdp);
if (error)
- errorf(fc, "gfs2: unable to remount read-write");
+ errorfc(fc, "unable to remount read-write");
}
}
sdp->sd_args = *newargs;
@@ -1642,7 +1629,7 @@ struct file_system_type gfs2_fs_type = {
.name = "gfs2",
.fs_flags = FS_REQUIRES_DEV,
.init_fs_context = gfs2_init_fs_context,
- .parameters = &gfs2_fs_parameters,
+ .parameters = gfs2_fs_parameters,
.kill_sb = gfs2_kill_sb,
.owner = THIS_MODULE,
};
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 2466bb44a23c..e7bf91ec231c 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -36,16 +36,6 @@
#define BFITNOENT ((u32)~0)
#define NO_BLOCK ((u64)~0)
-#if BITS_PER_LONG == 32
-#define LBITMASK (0x55555555UL)
-#define LBITSKIP55 (0x55555555UL)
-#define LBITSKIP00 (0x00000000UL)
-#else
-#define LBITMASK (0x5555555555555555UL)
-#define LBITSKIP55 (0x5555555555555555UL)
-#define LBITSKIP00 (0x0000000000000000UL)
-#endif
-
/*
* These routines are used by the resource group routines (rgrp.c)
* to keep track of block allocation. Each block is represented by two
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 9d4227330de4..a685637a5b55 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -49,8 +49,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
if (blocks)
tr->tr_reserved += 6 + blocks;
if (revokes)
- tr->tr_reserved += gfs2_struct2blk(sdp, revokes,
- sizeof(u64));
+ tr->tr_reserved += gfs2_struct2blk(sdp, revokes);
INIT_LIST_HEAD(&tr->tr_databuf);
INIT_LIST_HEAD(&tr->tr_buf);
@@ -77,10 +76,10 @@ static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
test_bit(TR_TOUCHED, &tr->tr_flags));
- fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
+ fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
tr->tr_num_buf_new, tr->tr_num_buf_rm,
tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
- tr->tr_num_revoke);
+ tr->tr_num_revoke, tr->tr_num_revoke_rm);
}
void gfs2_trans_end(struct gfs2_sbd *sdp)
@@ -265,7 +264,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
if (bd->bd_gl)
gfs2_glock_remove_revoke(bd->bd_gl);
kmem_cache_free(gfs2_bufdata_cachep, bd);
- tr->tr_num_revoke--;
+ tr->tr_num_revoke_rm++;
if (--n == 0)
break;
}
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 6d0783e2e276..f71c384064c8 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -242,19 +242,35 @@ extern void hfs_mark_mdb_dirty(struct super_block *sb);
/*
* There are two time systems. Both are based on seconds since
* a particular time/date.
- * Unix: unsigned lil-endian since 00:00 GMT, Jan. 1, 1970
+ * Unix: signed little-endian since 00:00 GMT, Jan. 1, 1970
* mac: unsigned big-endian since 00:00 GMT, Jan. 1, 1904
*
+ * HFS implementations are highly inconsistent, this one matches the
+ * traditional behavior of 64-bit Linux, giving the most useful
+ * time range between 1970 and 2106, by treating any on-disk timestamp
+ * under HFS_UTC_OFFSET (Jan 1 1970) as a time between 2040 and 2106.
*/
-#define __hfs_u_to_mtime(sec) cpu_to_be32(sec + 2082844800U - sys_tz.tz_minuteswest * 60)
-#define __hfs_m_to_utime(sec) (be32_to_cpu(sec) - 2082844800U + sys_tz.tz_minuteswest * 60)
+#define HFS_UTC_OFFSET 2082844800U
+static inline time64_t __hfs_m_to_utime(__be32 mt)
+{
+ time64_t ut = (u32)(be32_to_cpu(mt) - HFS_UTC_OFFSET);
+
+ return ut + sys_tz.tz_minuteswest * 60;
+}
+
+static inline __be32 __hfs_u_to_mtime(time64_t ut)
+{
+ ut -= sys_tz.tz_minuteswest * 60;
+
+ return cpu_to_be32(lower_32_bits(ut) + HFS_UTC_OFFSET);
+}
#define HFS_I(inode) (container_of(inode, struct hfs_inode_info, vfs_inode))
#define HFS_SB(sb) ((struct hfs_sb_info *)(sb)->s_fs_info)
-#define hfs_m_to_utime(time) (struct timespec){ .tv_sec = __hfs_m_to_utime(time) }
-#define hfs_u_to_mtime(time) __hfs_u_to_mtime((time).tv_sec)
-#define hfs_mtime() __hfs_u_to_mtime(get_seconds())
+#define hfs_m_to_utime(time) (struct timespec64){ .tv_sec = __hfs_m_to_utime(time) }
+#define hfs_u_to_mtime(time) __hfs_u_to_mtime((time).tv_sec)
+#define hfs_mtime() __hfs_u_to_mtime(ktime_get_real_seconds())
static inline const char *hfs_mdb_name(struct super_block *sb)
{
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index da243c84e93b..2f224b98ee94 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -351,7 +351,7 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_mode &= ~hsb->s_file_umask;
inode->i_mode |= S_IFREG;
inode->i_ctime = inode->i_atime = inode->i_mtime =
- timespec_to_timespec64(hfs_m_to_utime(rec->file.MdDat));
+ hfs_m_to_utime(rec->file.MdDat);
inode->i_op = &hfs_file_inode_operations;
inode->i_fop = &hfs_file_operations;
inode->i_mapping->a_ops = &hfs_aops;
@@ -362,7 +362,7 @@ static int hfs_read_inode(struct inode *inode, void *data)
HFS_I(inode)->fs_blocks = 0;
inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
inode->i_ctime = inode->i_atime = inode->i_mtime =
- timespec_to_timespec64(hfs_m_to_utime(rec->dir.MdDat));
+ hfs_m_to_utime(rec->dir.MdDat);
inode->i_op = &hfs_dir_inode_operations;
inode->i_fop = &hfs_dir_operations;
break;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index b8471bf05def..3b03fff68543 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -533,13 +533,31 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf,
void **data, int op, int op_flags);
int hfsplus_read_wrapper(struct super_block *sb);
-/* time macros */
-#define __hfsp_mt2ut(t) (be32_to_cpu(t) - 2082844800U)
-#define __hfsp_ut2mt(t) (cpu_to_be32(t + 2082844800U))
+/*
+ * time helpers: convert between 1904-base and 1970-base timestamps
+ *
+ * HFS+ implementations are highly inconsistent, this one matches the
+ * traditional behavior of 64-bit Linux, giving the most useful
+ * time range between 1970 and 2106, by treating any on-disk timestamp
+ * under HFSPLUS_UTC_OFFSET (Jan 1 1970) as a time between 2040 and 2106.
+ */
+#define HFSPLUS_UTC_OFFSET 2082844800U
+
+static inline time64_t __hfsp_mt2ut(__be32 mt)
+{
+ time64_t ut = (u32)(be32_to_cpu(mt) - HFSPLUS_UTC_OFFSET);
+
+ return ut;
+}
+
+static inline __be32 __hfsp_ut2mt(time64_t ut)
+{
+ return cpu_to_be32(lower_32_bits(ut) + HFSPLUS_UTC_OFFSET);
+}
/* compatibility */
-#define hfsp_mt2ut(t) (struct timespec){ .tv_sec = __hfsp_mt2ut(t) }
+#define hfsp_mt2ut(t) (struct timespec64){ .tv_sec = __hfsp_mt2ut(t) }
#define hfsp_ut2mt(t) __hfsp_ut2mt((t).tv_sec)
-#define hfsp_now2mt() __hfsp_ut2mt(get_seconds())
+#define hfsp_now2mt() __hfsp_ut2mt(ktime_get_real_seconds())
#endif
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index d131c8ea7eb6..94bd83b36644 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -504,9 +504,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
hfsplus_get_perms(inode, &folder->permissions, 1);
set_nlink(inode, 1);
inode->i_size = 2 + be32_to_cpu(folder->valence);
- inode->i_atime = timespec_to_timespec64(hfsp_mt2ut(folder->access_date));
- inode->i_mtime = timespec_to_timespec64(hfsp_mt2ut(folder->content_mod_date));
- inode->i_ctime = timespec_to_timespec64(hfsp_mt2ut(folder->attribute_mod_date));
+ inode->i_atime = hfsp_mt2ut(folder->access_date);
+ inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
+ inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
HFSPLUS_I(inode)->create_date = folder->create_date;
HFSPLUS_I(inode)->fs_blocks = 0;
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
@@ -542,9 +542,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
init_special_inode(inode, inode->i_mode,
be32_to_cpu(file->permissions.dev));
}
- inode->i_atime = timespec_to_timespec64(hfsp_mt2ut(file->access_date));
- inode->i_mtime = timespec_to_timespec64(hfsp_mt2ut(file->content_mod_date));
- inode->i_ctime = timespec_to_timespec64(hfsp_mt2ut(file->attribute_mod_date));
+ inode->i_atime = hfsp_mt2ut(file->access_date);
+ inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
+ inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
HFSPLUS_I(inode)->create_date = file->create_date;
} else {
pr_err("bad catalog entry used to create inode\n");
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index f4295aa19350..69cb796f6270 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -37,16 +37,20 @@
* is on, and remove the appropriate bits from attr->ia_mode (attr is a
* "struct iattr *"). -BlaisorBlade
*/
+struct hostfs_timespec {
+ long long tv_sec;
+ long long tv_nsec;
+};
struct hostfs_iattr {
- unsigned int ia_valid;
- unsigned short ia_mode;
- uid_t ia_uid;
- gid_t ia_gid;
- loff_t ia_size;
- struct timespec ia_atime;
- struct timespec ia_mtime;
- struct timespec ia_ctime;
+ unsigned int ia_valid;
+ unsigned short ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ loff_t ia_size;
+ struct hostfs_timespec ia_atime;
+ struct hostfs_timespec ia_mtime;
+ struct hostfs_timespec ia_ctime;
};
struct hostfs_stat {
@@ -56,7 +60,7 @@ struct hostfs_stat {
unsigned int uid;
unsigned int gid;
unsigned long long size;
- struct timespec atime, mtime, ctime;
+ struct hostfs_timespec atime, mtime, ctime;
unsigned int blksize;
unsigned long long blocks;
unsigned int maj;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5a7eb0c79839..e6b8c49076bb 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -549,9 +549,9 @@ static int read_name(struct inode *ino, char *name)
set_nlink(ino, st.nlink);
i_uid_write(ino, st.uid);
i_gid_write(ino, st.gid);
- ino->i_atime = timespec_to_timespec64(st.atime);
- ino->i_mtime = timespec_to_timespec64(st.mtime);
- ino->i_ctime = timespec_to_timespec64(st.ctime);
+ ino->i_atime = (struct timespec64){ st.atime.tv_sec, st.atime.tv_nsec };
+ ino->i_mtime = (struct timespec64){ st.mtime.tv_sec, st.mtime.tv_nsec };
+ ino->i_ctime = (struct timespec64){ st.ctime.tv_sec, st.ctime.tv_nsec };
ino->i_size = st.size;
ino->i_blocks = st.blocks;
return 0;
@@ -820,15 +820,18 @@ static int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_ATIME) {
attrs.ia_valid |= HOSTFS_ATTR_ATIME;
- attrs.ia_atime = timespec64_to_timespec(attr->ia_atime);
+ attrs.ia_atime = (struct hostfs_timespec)
+ { attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec };
}
if (attr->ia_valid & ATTR_MTIME) {
attrs.ia_valid |= HOSTFS_ATTR_MTIME;
- attrs.ia_mtime = timespec64_to_timespec(attr->ia_mtime);
+ attrs.ia_mtime = (struct hostfs_timespec)
+ { attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec };
}
if (attr->ia_valid & ATTR_CTIME) {
attrs.ia_valid |= HOSTFS_ATTR_CTIME;
- attrs.ia_ctime = timespec64_to_timespec(attr->ia_ctime);
+ attrs.ia_ctime = (struct hostfs_timespec)
+ { attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec };
}
if (attr->ia_valid & ATTR_ATIME_SET) {
attrs.ia_valid |= HOSTFS_ATTR_ATIME_SET;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a66e425884d1..aff8642f0c2e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -73,7 +73,7 @@ enum hugetlb_param {
Opt_uid,
};
-static const struct fs_parameter_spec hugetlb_param_specs[] = {
+static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
fsparam_u32 ("gid", Opt_gid),
fsparam_string("min_size", Opt_min_size),
fsparam_u32 ("mode", Opt_mode),
@@ -84,11 +84,6 @@ static const struct fs_parameter_spec hugetlb_param_specs[] = {
{}
};
-static const struct fs_parameter_description hugetlb_fs_parameters = {
- .name = "hugetlbfs",
- .specs = hugetlb_param_specs,
-};
-
#ifdef CONFIG_NUMA
static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
struct inode *inode, pgoff_t index)
@@ -1171,7 +1166,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
unsigned long ps;
int opt;
- opt = fs_parse(fc, &hugetlb_fs_parameters, param, &result);
+ opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
if (opt < 0)
return opt;
@@ -1233,7 +1228,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
}
bad_val:
- return invalf(fc, "hugetlbfs: Bad value '%s' for mount option '%s'\n",
+ return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
param->string, param->key);
}
@@ -1358,7 +1353,7 @@ static int hugetlbfs_init_fs_context(struct fs_context *fc)
static struct file_system_type hugetlbfs_fs_type = {
.name = "hugetlbfs",
.init_fs_context = hugetlbfs_init_fs_context,
- .parameters = &hugetlb_fs_parameters,
+ .parameters = hugetlb_fs_parameters,
.kill_sb = kill_litter_super,
};
diff --git a/fs/inode.c b/fs/inode.c
index 96d62d97694e..7d57068b6b7a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -12,6 +12,7 @@
#include <linux/security.h>
#include <linux/cdev.h>
#include <linux/memblock.h>
+#include <linux/fscrypt.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/posix_acl.h>
@@ -1598,25 +1599,31 @@ retry:
}
EXPORT_SYMBOL(iput);
+#ifdef CONFIG_BLOCK
/**
* bmap - find a block number in a file
- * @inode: inode of file
- * @block: block to find
- *
- * Returns the block number on the device holding the inode that
- * is the disk block number for the block of the file requested.
- * That is, asked for block 4 of inode 1 the function will return the
- * disk block relative to the disk start that holds that block of the
- * file.
+ * @inode: inode owning the block number being requested
+ * @block: pointer containing the block to find
+ *
+ * Replaces the value in *block with the block number on the device holding
+ * corresponding to the requested block number in the file.
+ * That is, asked for block 4 of inode 1 the function will replace the
+ * 4 in *block, with disk block relative to the disk start that holds that
+ * block of the file.
+ *
+ * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
+ * hole, returns 0 and *block is also set to 0.
*/
-sector_t bmap(struct inode *inode, sector_t block)
+int bmap(struct inode *inode, sector_t *block)
{
- sector_t res = 0;
- if (inode->i_mapping->a_ops->bmap)
- res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
- return res;
+ if (!inode->i_mapping->a_ops->bmap)
+ return -EINVAL;
+
+ *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
+ return 0;
}
EXPORT_SYMBOL(bmap);
+#endif
/*
* With relative atime, only update atime if the previous atime is
@@ -1682,12 +1689,9 @@ EXPORT_SYMBOL(generic_update_time);
*/
static int update_time(struct inode *inode, struct timespec64 *time, int flags)
{
- int (*update_time)(struct inode *, struct timespec64 *, int);
-
- update_time = inode->i_op->update_time ? inode->i_op->update_time :
- generic_update_time;
-
- return update_time(inode, time, flags);
+ if (inode->i_op->update_time)
+ return inode->i_op->update_time(inode, time, flags);
+ return generic_update_time(inode, time, flags);
}
/**
@@ -2153,30 +2157,6 @@ void inode_nohighmem(struct inode *inode)
EXPORT_SYMBOL(inode_nohighmem);
/**
- * timespec64_trunc - Truncate timespec64 to a granularity
- * @t: Timespec64
- * @gran: Granularity in ns.
- *
- * Truncate a timespec64 to a granularity. Always rounds down. gran must
- * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
- */
-struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran)
-{
- /* Avoid division in the common cases 1 ns and 1 s. */
- if (gran == 1) {
- /* nothing */
- } else if (gran == NSEC_PER_SEC) {
- t.tv_nsec = 0;
- } else if (gran > 1 && gran < NSEC_PER_SEC) {
- t.tv_nsec -= t.tv_nsec % gran;
- } else {
- WARN(1, "illegal file time granularity: %u", gran);
- }
- return t;
-}
-EXPORT_SYMBOL(timespec64_trunc);
-
-/**
* timestamp_truncate - Truncate timespec to a granularity
* @t: Timespec
* @inode: inode being updated
@@ -2252,7 +2232,7 @@ int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
!capable(CAP_LINUX_IMMUTABLE))
return -EPERM;
- return 0;
+ return fscrypt_prepare_setflags(inode, oldflags, flags);
}
EXPORT_SYMBOL(vfs_ioc_setflags_prepare);
diff --git a/fs/internal.h b/fs/internal.h
index e3fa69544b66..f3f280b952a3 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -124,6 +124,8 @@ extern struct file *do_filp_open(int dfd, struct filename *pathname,
const struct open_flags *op);
extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
const char *, const struct open_flags *);
+extern struct open_how build_open_how(int flags, umode_t mode);
+extern int build_open_flags(const struct open_how *how, struct open_flags *op);
long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
long do_faccessat(int dfd, const char __user *filename, int mode);
@@ -180,11 +182,11 @@ extern void mnt_pin_kill(struct mount *m);
*/
extern const struct dentry_operations ns_dentry_operations;
-/*
- * fs/ioctl.c
- */
-extern int do_vfs_ioctl(struct file *file, unsigned int fd, unsigned int cmd,
- unsigned long arg);
-
/* direct-io.c: */
int sb_init_dio_done_wq(struct super_block *sb);
+
+/*
+ * fs/stat.c:
+ */
+unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags);
+int cp_statx(const struct kstat *stat, struct statx __user *buffer);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 5147d2213b01..0a5ab1a8f69a 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/rculist_nulls.h>
+#include <linux/fs_struct.h>
#include "io-wq.h"
@@ -56,8 +57,10 @@ struct io_worker {
struct rcu_head rcu;
struct mm_struct *mm;
- const struct cred *creds;
+ const struct cred *cur_creds;
+ const struct cred *saved_creds;
struct files_struct *restore_files;
+ struct fs_struct *restore_fs;
};
#if BITS_PER_LONG == 64
@@ -109,10 +112,10 @@ struct io_wq {
struct task_struct *manager;
struct user_struct *user;
- const struct cred *creds;
- struct mm_struct *mm;
refcount_t refs;
struct completion done;
+
+ refcount_t use_refs;
};
static bool io_worker_get(struct io_worker *worker)
@@ -135,9 +138,9 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
{
bool dropped_lock = false;
- if (worker->creds) {
- revert_creds(worker->creds);
- worker->creds = NULL;
+ if (worker->saved_creds) {
+ revert_creds(worker->saved_creds);
+ worker->cur_creds = worker->saved_creds = NULL;
}
if (current->files != worker->restore_files) {
@@ -150,6 +153,9 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
task_unlock(current);
}
+ if (current->fs != worker->restore_fs)
+ current->fs = worker->restore_fs;
+
/*
* If we have an active mm, we need to drop the wq lock before unusing
* it. If we do, return true and let the caller retry the idle loop.
@@ -310,6 +316,7 @@ static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
worker->restore_files = current->files;
+ worker->restore_fs = current->fs;
io_wqe_inc_running(wqe, worker);
}
@@ -396,6 +403,43 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
return NULL;
}
+static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
+{
+ if (worker->mm) {
+ unuse_mm(worker->mm);
+ mmput(worker->mm);
+ worker->mm = NULL;
+ }
+ if (!work->mm) {
+ set_fs(KERNEL_DS);
+ return;
+ }
+ if (mmget_not_zero(work->mm)) {
+ use_mm(work->mm);
+ if (!worker->mm)
+ set_fs(USER_DS);
+ worker->mm = work->mm;
+ /* hang on to this mm */
+ work->mm = NULL;
+ return;
+ }
+
+ /* failed grabbing mm, ensure work gets cancelled */
+ work->flags |= IO_WQ_WORK_CANCEL;
+}
+
+static void io_wq_switch_creds(struct io_worker *worker,
+ struct io_wq_work *work)
+{
+ const struct cred *old_creds = override_creds(work->creds);
+
+ worker->cur_creds = work->creds;
+ if (worker->saved_creds)
+ put_cred(old_creds); /* creds set by previous switch */
+ else
+ worker->saved_creds = old_creds;
+}
+
static void io_worker_handle_work(struct io_worker *worker)
__releases(wqe->lock)
{
@@ -438,24 +482,21 @@ next:
if (work->flags & IO_WQ_WORK_CB)
work->func(&work);
- if ((work->flags & IO_WQ_WORK_NEEDS_FILES) &&
- current->files != work->files) {
+ if (work->files && current->files != work->files) {
task_lock(current);
current->files = work->files;
task_unlock(current);
}
- if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
- wq->mm) {
- if (mmget_not_zero(wq->mm)) {
- use_mm(wq->mm);
- set_fs(USER_DS);
- worker->mm = wq->mm;
- } else {
- work->flags |= IO_WQ_WORK_CANCEL;
- }
- }
- if (!worker->creds)
- worker->creds = override_creds(wq->creds);
+ if (work->fs && current->fs != work->fs)
+ current->fs = work->fs;
+ if (work->mm != worker->mm)
+ io_wq_switch_mm(worker, work);
+ if (worker->cur_creds != work->creds)
+ io_wq_switch_creds(worker, work);
+ /*
+ * OK to set IO_WQ_WORK_CANCEL even for uncancellable work,
+ * the worker function will do the right thing.
+ */
if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
work->flags |= IO_WQ_WORK_CANCEL;
if (worker->mm)
@@ -658,11 +699,16 @@ static int io_wq_manager(void *data)
/* create fixed workers */
refcount_set(&wq->refs, workers_to_create);
for_each_node(node) {
+ if (!node_online(node))
+ continue;
if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
goto err;
workers_to_create--;
}
+ while (workers_to_create--)
+ refcount_dec(&wq->refs);
+
complete(&wq->done);
while (!kthread_should_stop()) {
@@ -670,6 +716,9 @@ static int io_wq_manager(void *data)
struct io_wqe *wqe = wq->wqes[node];
bool fork_worker[2] = { false, false };
+ if (!node_online(node))
+ continue;
+
spin_lock_irq(&wqe->lock);
if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
fork_worker[IO_WQ_ACCT_BOUND] = true;
@@ -720,6 +769,7 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
+ int work_flags;
unsigned long flags;
/*
@@ -734,12 +784,14 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
return;
}
+ work_flags = work->flags;
spin_lock_irqsave(&wqe->lock, flags);
wq_list_add_tail(&work->list, &wqe->work_list);
wqe->flags &= ~IO_WQE_FLAG_STALLED;
spin_unlock_irqrestore(&wqe->lock, flags);
- if (!atomic_read(&acct->nr_running))
+ if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
+ !atomic_read(&acct->nr_running))
io_wqe_wake_worker(wqe, acct);
}
@@ -785,7 +837,9 @@ static bool io_wq_for_each_worker(struct io_wqe *wqe,
list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
if (io_worker_get(worker)) {
- ret = func(worker, data);
+ /* no task if node is/was offline */
+ if (worker->task)
+ ret = func(worker, data);
io_worker_release(worker);
if (ret)
break;
@@ -828,6 +882,7 @@ static bool io_work_cancel(struct io_worker *worker, void *cancel_data)
*/
spin_lock_irqsave(&worker->lock, flags);
if (worker->cur_work &&
+ !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
data->cancel(worker->cur_work, data->caller_data)) {
send_sig(SIGINT, worker->task, 1);
ret = true;
@@ -892,17 +947,20 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
return ret;
}
+struct work_match {
+ bool (*fn)(struct io_wq_work *, void *data);
+ void *data;
+};
+
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
- struct io_wq_work *work = data;
+ struct work_match *match = data;
unsigned long flags;
bool ret = false;
- if (worker->cur_work != work)
- return false;
-
spin_lock_irqsave(&worker->lock, flags);
- if (worker->cur_work == work) {
+ if (match->fn(worker->cur_work, match->data) &&
+ !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL)) {
send_sig(SIGINT, worker->task, 1);
ret = true;
}
@@ -912,15 +970,13 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
}
static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
- struct io_wq_work *cwork)
+ struct work_match *match)
{
struct io_wq_work_node *node, *prev;
struct io_wq_work *work;
unsigned long flags;
bool found = false;
- cwork->flags |= IO_WQ_WORK_CANCEL;
-
/*
* First check pending list, if we're lucky we can just remove it
* from there. CANCEL_OK means that the work is returned as-new,
@@ -930,7 +986,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
wq_list_for_each(node, prev, &wqe->work_list) {
work = container_of(node, struct io_wq_work, list);
- if (work == cwork) {
+ if (match->fn(work, match->data)) {
wq_node_del(&wqe->work_list, node, prev);
found = true;
break;
@@ -951,20 +1007,60 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
* completion will run normally in this case.
*/
rcu_read_lock();
- found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, cwork);
+ found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
rcu_read_unlock();
return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
}
+static bool io_wq_work_match(struct io_wq_work *work, void *data)
+{
+ return work == data;
+}
+
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
{
+ struct work_match match = {
+ .fn = io_wq_work_match,
+ .data = cwork
+ };
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+ int node;
+
+ cwork->flags |= IO_WQ_WORK_CANCEL;
+
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+
+ ret = io_wqe_cancel_work(wqe, &match);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
+ }
+
+ return ret;
+}
+
+static bool io_wq_pid_match(struct io_wq_work *work, void *data)
+{
+ pid_t pid = (pid_t) (unsigned long) data;
+
+ if (work)
+ return work->task_pid == pid;
+ return false;
+}
+
+enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
+{
+ struct work_match match = {
+ .fn = io_wq_pid_match,
+ .data = (void *) (unsigned long) pid
+ };
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
int node;
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
- ret = io_wqe_cancel_work(wqe, cwork);
+ ret = io_wqe_cancel_work(wqe, &match);
if (ret != IO_WQ_CANCEL_NOTFOUND)
break;
}
@@ -998,6 +1094,8 @@ void io_wq_flush(struct io_wq *wq)
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
+ if (!node_online(node))
+ continue;
init_completion(&data.done);
INIT_IO_WORK(&data.work, io_wq_flush_func);
data.work.flags |= IO_WQ_WORK_INTERNAL;
@@ -1026,16 +1124,18 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
/* caller must already hold a reference to this */
wq->user = data->user;
- wq->creds = data->creds;
for_each_node(node) {
struct io_wqe *wqe;
+ int alloc_node = node;
- wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, node);
+ if (!node_online(alloc_node))
+ alloc_node = NUMA_NO_NODE;
+ wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
if (!wqe)
goto err;
wq->wqes[node] = wqe;
- wqe->node = node;
+ wqe->node = alloc_node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
if (wq->user) {
@@ -1043,7 +1143,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
task_rlimit(current, RLIMIT_NPROC);
}
atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
- wqe->node = node;
wqe->wq = wq;
spin_lock_init(&wqe->lock);
INIT_WQ_LIST(&wqe->work_list);
@@ -1053,9 +1152,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
init_completion(&wq->done);
- /* caller must have already done mmgrab() on this mm */
- wq->mm = data->mm;
-
wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
if (!IS_ERR(wq->manager)) {
wake_up_process(wq->manager);
@@ -1064,6 +1160,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
ret = -ENOMEM;
goto err;
}
+ refcount_set(&wq->use_refs, 1);
reinit_completion(&wq->done);
return wq;
}
@@ -1078,13 +1175,21 @@ err:
return ERR_PTR(ret);
}
+bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
+{
+ if (data->get_work != wq->get_work || data->put_work != wq->put_work)
+ return false;
+
+ return refcount_inc_not_zero(&wq->use_refs);
+}
+
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
wake_up_process(worker->task);
return false;
}
-void io_wq_destroy(struct io_wq *wq)
+static void __io_wq_destroy(struct io_wq *wq)
{
int node;
@@ -1104,3 +1209,9 @@ void io_wq_destroy(struct io_wq *wq)
kfree(wq->wqes);
kfree(wq);
}
+
+void io_wq_destroy(struct io_wq *wq)
+{
+ if (refcount_dec_and_test(&wq->use_refs))
+ __io_wq_destroy(wq);
+}
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 3f5e356de980..ccc7d84af57d 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -7,11 +7,11 @@ enum {
IO_WQ_WORK_CANCEL = 1,
IO_WQ_WORK_HAS_MM = 2,
IO_WQ_WORK_HASHED = 4,
- IO_WQ_WORK_NEEDS_USER = 8,
- IO_WQ_WORK_NEEDS_FILES = 16,
IO_WQ_WORK_UNBOUND = 32,
IO_WQ_WORK_INTERNAL = 64,
IO_WQ_WORK_CB = 128,
+ IO_WQ_WORK_NO_CANCEL = 256,
+ IO_WQ_WORK_CONCURRENT = 512,
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
};
@@ -72,30 +72,36 @@ struct io_wq_work {
};
void (*func)(struct io_wq_work **);
struct files_struct *files;
+ struct mm_struct *mm;
+ const struct cred *creds;
+ struct fs_struct *fs;
unsigned flags;
+ pid_t task_pid;
};
#define INIT_IO_WORK(work, _func) \
do { \
(work)->list.next = NULL; \
(work)->func = _func; \
- (work)->flags = 0; \
(work)->files = NULL; \
+ (work)->mm = NULL; \
+ (work)->creds = NULL; \
+ (work)->fs = NULL; \
+ (work)->flags = 0; \
} while (0) \
typedef void (get_work_fn)(struct io_wq_work *);
typedef void (put_work_fn)(struct io_wq_work *);
struct io_wq_data {
- struct mm_struct *mm;
struct user_struct *user;
- const struct cred *creds;
get_work_fn *get_work;
put_work_fn *put_work;
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
+bool io_wq_get(struct io_wq *wq, struct io_wq_data *data);
void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
@@ -104,6 +110,7 @@ void io_wq_flush(struct io_wq *wq);
void io_wq_cancel_all(struct io_wq *wq);
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
+enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 187dd94fd6b1..5a826017ebb8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -46,6 +46,7 @@
#include <linux/compat.h>
#include <linux/refcount.h>
#include <linux/uio.h>
+#include <linux/bits.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
@@ -70,6 +71,11 @@
#include <linux/sizes.h>
#include <linux/hugetlb.h>
#include <linux/highmem.h>
+#include <linux/namei.h>
+#include <linux/fsnotify.h>
+#include <linux/fadvise.h>
+#include <linux/eventpoll.h>
+#include <linux/fs_struct.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -177,6 +183,21 @@ struct fixed_file_table {
struct file **files;
};
+enum {
+ FFD_F_ATOMIC,
+};
+
+struct fixed_file_data {
+ struct fixed_file_table *table;
+ struct io_ring_ctx *ctx;
+
+ struct percpu_ref refs;
+ struct llist_head put_llist;
+ unsigned long state;
+ struct work_struct ref_work;
+ struct completion done;
+};
+
struct io_ring_ctx {
struct {
struct percpu_ref refs;
@@ -184,10 +205,11 @@ struct io_ring_ctx {
struct {
unsigned int flags;
- bool compat;
- bool account_mem;
- bool cq_overflow_flushed;
- bool drain_next;
+ unsigned int compat: 1;
+ unsigned int account_mem: 1;
+ unsigned int cq_overflow_flushed: 1;
+ unsigned int drain_next: 1;
+ unsigned int eventfd_async: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -207,13 +229,14 @@ struct io_ring_ctx {
unsigned sq_thread_idle;
unsigned cached_sq_dropped;
atomic_t cached_cq_overflow;
- struct io_uring_sqe *sq_sqes;
+ unsigned long sq_check_overflow;
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
wait_queue_head_t inflight_wait;
+ struct io_uring_sqe *sq_sqes;
} ____cacheline_aligned_in_smp;
struct io_rings *rings;
@@ -229,8 +252,10 @@ struct io_ring_ctx {
* readers must ensure that ->refs is alive as long as the file* is
* used. Only updated through io_uring_register(2).
*/
- struct fixed_file_table *file_table;
+ struct fixed_file_data *file_data;
unsigned nr_user_files;
+ int ring_fd;
+ struct file *ring_file;
/* if used, fixed mapped user buffers */
unsigned nr_user_bufs;
@@ -250,11 +275,14 @@ struct io_ring_ctx {
struct socket *ring_sock;
#endif
+ struct idr personality_idr;
+
struct {
unsigned cached_cq_tail;
unsigned cq_entries;
unsigned cq_mask;
atomic_t cq_timeouts;
+ unsigned long cq_check_overflow;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
@@ -267,7 +295,8 @@ struct io_ring_ctx {
struct {
spinlock_t completion_lock;
- bool poll_multi_file;
+ struct llist_head poll_llist;
+
/*
* ->poll_list is protected by the ctx->uring_lock for
* io_uring instances that don't use IORING_SETUP_SQPOLL.
@@ -277,6 +306,7 @@ struct io_ring_ctx {
struct list_head poll_list;
struct hlist_head *cancel_hash;
unsigned cancel_hash_bits;
+ bool poll_multi_file;
spinlock_t inflight_lock;
struct list_head inflight_list;
@@ -299,6 +329,12 @@ struct io_poll_iocb {
struct wait_queue_entry wait;
};
+struct io_close {
+ struct file *file;
+ struct file *put_file;
+ int fd;
+};
+
struct io_timeout_data {
struct io_kiocb *req;
struct hrtimer timer;
@@ -319,6 +355,7 @@ struct io_sync {
loff_t len;
loff_t off;
int flags;
+ int mode;
};
struct io_cancel {
@@ -348,8 +385,52 @@ struct io_connect {
struct io_sr_msg {
struct file *file;
- struct user_msghdr __user *msg;
+ union {
+ struct user_msghdr __user *msg;
+ void __user *buf;
+ };
int msg_flags;
+ size_t len;
+};
+
+struct io_open {
+ struct file *file;
+ int dfd;
+ union {
+ unsigned mask;
+ };
+ struct filename *filename;
+ struct statx __user *buffer;
+ struct open_how how;
+};
+
+struct io_files_update {
+ struct file *file;
+ u64 arg;
+ u32 nr_args;
+ u32 offset;
+};
+
+struct io_fadvise {
+ struct file *file;
+ u64 offset;
+ u32 len;
+ u32 advice;
+};
+
+struct io_madvise {
+ struct file *file;
+ u64 addr;
+ u32 len;
+ u32 advice;
+};
+
+struct io_epoll {
+ struct file *file;
+ int epfd;
+ int op;
+ int fd;
+ struct epoll_event event;
};
struct io_async_connect {
@@ -361,6 +442,7 @@ struct io_async_msghdr {
struct iovec *iov;
struct sockaddr __user *uaddr;
struct msghdr msg;
+ struct sockaddr_storage addr;
};
struct io_async_rw {
@@ -379,6 +461,71 @@ struct io_async_ctx {
};
};
+enum {
+ REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
+ REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
+ REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
+ REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
+ REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
+
+ REQ_F_LINK_NEXT_BIT,
+ REQ_F_FAIL_LINK_BIT,
+ REQ_F_INFLIGHT_BIT,
+ REQ_F_CUR_POS_BIT,
+ REQ_F_NOWAIT_BIT,
+ REQ_F_IOPOLL_COMPLETED_BIT,
+ REQ_F_LINK_TIMEOUT_BIT,
+ REQ_F_TIMEOUT_BIT,
+ REQ_F_ISREG_BIT,
+ REQ_F_MUST_PUNT_BIT,
+ REQ_F_TIMEOUT_NOSEQ_BIT,
+ REQ_F_COMP_LOCKED_BIT,
+ REQ_F_NEED_CLEANUP_BIT,
+ REQ_F_OVERFLOW_BIT,
+};
+
+enum {
+ /* ctx owns file */
+ REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
+ /* drain existing IO first */
+ REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
+ /* linked sqes */
+ REQ_F_LINK = BIT(REQ_F_LINK_BIT),
+ /* doesn't sever on completion < 0 */
+ REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
+ /* IOSQE_ASYNC */
+ REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
+
+ /* already grabbed next link */
+ REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
+ /* fail rest of links */
+ REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
+ /* on inflight list */
+ REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
+ /* read/write uses file position */
+ REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
+ /* must not punt to workers */
+ REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
+ /* polled IO has completed */
+ REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
+ /* has linked timeout */
+ REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
+ /* timeout request */
+ REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
+ /* regular file */
+ REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
+ /* must be punted even for NONBLOCK */
+ REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
+ /* no timeout sequence */
+ REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
+ /* completion under lock */
+ REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
+ /* needs cleanup */
+ REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
+ /* in overflow list */
+ REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
+};
+
/*
* NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can
@@ -396,12 +543,19 @@ struct io_kiocb {
struct io_timeout timeout;
struct io_connect connect;
struct io_sr_msg sr_msg;
+ struct io_open open;
+ struct io_close close;
+ struct io_files_update files_update;
+ struct io_fadvise fadvise;
+ struct io_madvise madvise;
+ struct io_epoll epoll;
};
struct io_async_ctx *io;
- struct file *ring_file;
- int ring_fd;
- bool has_user;
+ /*
+ * llist_node is only used for poll deferred completions
+ */
+ struct llist_node llist_node;
bool in_async;
bool needs_fixed_file;
u8 opcode;
@@ -414,23 +568,6 @@ struct io_kiocb {
struct list_head link_list;
unsigned int flags;
refcount_t refs;
-#define REQ_F_NOWAIT 1 /* must not punt to workers */
-#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
-#define REQ_F_FIXED_FILE 4 /* ctx owns file */
-#define REQ_F_LINK_NEXT 8 /* already grabbed next link */
-#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
-#define REQ_F_IO_DRAINED 32 /* drain done */
-#define REQ_F_LINK 64 /* linked sqes */
-#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
-#define REQ_F_FAIL_LINK 256 /* fail rest of links */
-#define REQ_F_DRAIN_LINK 512 /* link should be fully drained */
-#define REQ_F_TIMEOUT 1024 /* timeout request */
-#define REQ_F_ISREG 2048 /* regular file */
-#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
-#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
-#define REQ_F_INFLIGHT 16384 /* on inflight list */
-#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
-#define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */
u64 user_data;
u32 result;
u32 sequence;
@@ -450,8 +587,7 @@ struct io_submit_state {
* io_kiocb alloc cache
*/
void *reqs[IO_IOPOLL_BATCH];
- unsigned int free_reqs;
- unsigned int cur_req;
+ unsigned int free_reqs;
/*
* File reference cache
@@ -463,14 +599,171 @@ struct io_submit_state {
unsigned int ios_left;
};
+struct io_op_def {
+ /* needs req->io allocated for deferral/async */
+ unsigned async_ctx : 1;
+ /* needs current->mm setup, does mm access */
+ unsigned needs_mm : 1;
+ /* needs req->file assigned */
+ unsigned needs_file : 1;
+ /* needs req->file assigned IFF fd is >= 0 */
+ unsigned fd_non_neg : 1;
+ /* hash wq insertion if file is a regular file */
+ unsigned hash_reg_file : 1;
+ /* unbound wq insertion if file is a non-regular file */
+ unsigned unbound_nonreg_file : 1;
+ /* opcode is not supported by this kernel */
+ unsigned not_supported : 1;
+ /* needs file table */
+ unsigned file_table : 1;
+ /* needs ->fs */
+ unsigned needs_fs : 1;
+};
+
+static const struct io_op_def io_op_defs[] = {
+ [IORING_OP_NOP] = {},
+ [IORING_OP_READV] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_WRITEV] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .hash_reg_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_FSYNC] = {
+ .needs_file = 1,
+ },
+ [IORING_OP_READ_FIXED] = {
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_WRITE_FIXED] = {
+ .needs_file = 1,
+ .hash_reg_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_POLL_ADD] = {
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_POLL_REMOVE] = {},
+ [IORING_OP_SYNC_FILE_RANGE] = {
+ .needs_file = 1,
+ },
+ [IORING_OP_SENDMSG] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ .needs_fs = 1,
+ },
+ [IORING_OP_RECVMSG] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ .needs_fs = 1,
+ },
+ [IORING_OP_TIMEOUT] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ },
+ [IORING_OP_TIMEOUT_REMOVE] = {},
+ [IORING_OP_ACCEPT] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ .file_table = 1,
+ },
+ [IORING_OP_ASYNC_CANCEL] = {},
+ [IORING_OP_LINK_TIMEOUT] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ },
+ [IORING_OP_CONNECT] = {
+ .async_ctx = 1,
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_FALLOCATE] = {
+ .needs_file = 1,
+ },
+ [IORING_OP_OPENAT] = {
+ .needs_file = 1,
+ .fd_non_neg = 1,
+ .file_table = 1,
+ .needs_fs = 1,
+ },
+ [IORING_OP_CLOSE] = {
+ .needs_file = 1,
+ .file_table = 1,
+ },
+ [IORING_OP_FILES_UPDATE] = {
+ .needs_mm = 1,
+ .file_table = 1,
+ },
+ [IORING_OP_STATX] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .fd_non_neg = 1,
+ .needs_fs = 1,
+ },
+ [IORING_OP_READ] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_WRITE] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_FADVISE] = {
+ .needs_file = 1,
+ },
+ [IORING_OP_MADVISE] = {
+ .needs_mm = 1,
+ },
+ [IORING_OP_SEND] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_RECV] = {
+ .needs_mm = 1,
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+ [IORING_OP_OPENAT2] = {
+ .needs_file = 1,
+ .fd_non_neg = 1,
+ .file_table = 1,
+ .needs_fs = 1,
+ },
+ [IORING_OP_EPOLL_CTL] = {
+ .unbound_nonreg_file = 1,
+ .file_table = 1,
+ },
+};
+
static void io_wq_submit_work(struct io_wq_work **workptr);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
-static void __io_free_req(struct io_kiocb *req);
static void io_put_req(struct io_kiocb *req);
-static void io_double_put_req(struct io_kiocb *req);
static void __io_double_put_req(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
+static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ struct io_uring_files_update *ip,
+ unsigned nr_args);
+static int io_grab_files(struct io_kiocb *req);
+static void io_ring_file_ref_flush(struct fixed_file_data *data);
+static void io_cleanup_req(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -537,9 +830,11 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->completions[0]);
init_completion(&ctx->completions[1]);
+ idr_init(&ctx->personality_idr);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
spin_lock_init(&ctx->completion_lock);
+ init_llist_head(&ctx->poll_llist);
INIT_LIST_HEAD(&ctx->poll_list);
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
@@ -566,7 +861,7 @@ static inline bool __req_need_defer(struct io_kiocb *req)
static inline bool req_need_defer(struct io_kiocb *req)
{
- if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
+ if (unlikely(req->flags & REQ_F_IO_DRAIN))
return __req_need_defer(req);
return false;
@@ -606,53 +901,75 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
- if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
- /* order cqe stores with ring update */
- smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
+ /* order cqe stores with ring update */
+ smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
+
+ if (wq_has_sleeper(&ctx->cq_wait)) {
+ wake_up_interruptible(&ctx->cq_wait);
+ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+ }
+}
- if (wq_has_sleeper(&ctx->cq_wait)) {
- wake_up_interruptible(&ctx->cq_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+static inline void io_req_work_grab_env(struct io_kiocb *req,
+ const struct io_op_def *def)
+{
+ if (!req->work.mm && def->needs_mm) {
+ mmgrab(current->mm);
+ req->work.mm = current->mm;
+ }
+ if (!req->work.creds)
+ req->work.creds = get_current_cred();
+ if (!req->work.fs && def->needs_fs) {
+ spin_lock(&current->fs->lock);
+ if (!current->fs->in_exec) {
+ req->work.fs = current->fs;
+ req->work.fs->users++;
+ } else {
+ req->work.flags |= IO_WQ_WORK_CANCEL;
}
+ spin_unlock(&current->fs->lock);
}
+ if (!req->work.task_pid)
+ req->work.task_pid = task_pid_vnr(current);
}
-static inline bool io_req_needs_user(struct io_kiocb *req)
+static inline void io_req_work_drop_env(struct io_kiocb *req)
{
- return !(req->opcode == IORING_OP_READ_FIXED ||
- req->opcode == IORING_OP_WRITE_FIXED);
+ if (req->work.mm) {
+ mmdrop(req->work.mm);
+ req->work.mm = NULL;
+ }
+ if (req->work.creds) {
+ put_cred(req->work.creds);
+ req->work.creds = NULL;
+ }
+ if (req->work.fs) {
+ struct fs_struct *fs = req->work.fs;
+
+ spin_lock(&req->work.fs->lock);
+ if (--fs->users)
+ fs = NULL;
+ spin_unlock(&req->work.fs->lock);
+ if (fs)
+ free_fs_struct(fs);
+ }
}
static inline bool io_prep_async_work(struct io_kiocb *req,
struct io_kiocb **link)
{
+ const struct io_op_def *def = &io_op_defs[req->opcode];
bool do_hashed = false;
- switch (req->opcode) {
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- /* only regular files should be hashed for writes */
- if (req->flags & REQ_F_ISREG)
+ if (req->flags & REQ_F_ISREG) {
+ if (def->hash_reg_file)
do_hashed = true;
- /* fall-through */
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_SENDMSG:
- case IORING_OP_RECVMSG:
- case IORING_OP_ACCEPT:
- case IORING_OP_POLL_ADD:
- case IORING_OP_CONNECT:
- /*
- * We know REQ_F_ISREG is not set on some of these
- * opcodes, but this enables us to keep the check in
- * just one place.
- */
- if (!(req->flags & REQ_F_ISREG))
+ } else {
+ if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
- break;
}
- if (io_req_needs_user(req))
- req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+
+ io_req_work_grab_env(req, def);
*link = io_prep_linked_timeout(req);
return do_hashed;
@@ -711,10 +1028,8 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
__io_commit_cqring(ctx);
- while ((req = io_get_deferred_req(ctx)) != NULL) {
- req->flags |= REQ_F_IO_DRAINED;
+ while ((req = io_get_deferred_req(ctx)) != NULL)
io_queue_async_work(req);
- }
}
static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
@@ -735,16 +1050,30 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
return &rings->cqes[tail & ctx->cq_mask];
}
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
+{
+ if (!ctx->cq_ev_fd)
+ return false;
+ if (!ctx->eventfd_async)
+ return true;
+ return io_wq_current_is_worker() || in_interrupt();
+}
+
+static void __io_cqring_ev_posted(struct io_ring_ctx *ctx, bool trigger_ev)
{
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
if (waitqueue_active(&ctx->sqo_wait))
wake_up(&ctx->sqo_wait);
- if (ctx->cq_ev_fd)
+ if (trigger_ev)
eventfd_signal(ctx->cq_ev_fd, 1);
}
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+ __io_cqring_ev_posted(ctx, io_should_trigger_evfd(ctx));
+}
+
/* Returns true if there are no backlogged entries after the flush */
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
@@ -766,7 +1095,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
/* if force is set, the ring is going away. always drop after that */
if (force)
- ctx->cq_overflow_flushed = true;
+ ctx->cq_overflow_flushed = 1;
cqe = NULL;
while (!list_empty(&ctx->cq_overflow_list)) {
@@ -777,6 +1106,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
list);
list_move(&req->list, &list);
+ req->flags &= ~REQ_F_OVERFLOW;
if (cqe) {
WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, req->result);
@@ -788,6 +1118,10 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
}
io_commit_cqring(ctx);
+ if (cqe) {
+ clear_bit(0, &ctx->sq_check_overflow);
+ clear_bit(0, &ctx->cq_check_overflow);
+ }
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
@@ -821,6 +1155,11 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
} else {
+ if (list_empty(&ctx->cq_overflow_list)) {
+ set_bit(0, &ctx->sq_check_overflow);
+ set_bit(0, &ctx->cq_check_overflow);
+ }
+ req->flags |= REQ_F_OVERFLOW;
refcount_inc(&req->refs);
req->result = res;
list_add_tail(&req->list, &ctx->cq_overflow_list);
@@ -863,9 +1202,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
- if (!percpu_ref_tryget(&ctx->refs))
- return NULL;
-
if (!state) {
req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req))
@@ -888,17 +1224,14 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
ret = 1;
}
state->free_reqs = ret - 1;
- state->cur_req = 1;
- req = state->reqs[0];
+ req = state->reqs[ret - 1];
} else {
- req = state->reqs[state->cur_req];
state->free_reqs--;
- state->cur_req++;
+ req = state->reqs[state->free_reqs];
}
got_it:
req->io = NULL;
- req->ring_file = NULL;
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
@@ -915,24 +1248,38 @@ fallback:
return NULL;
}
-static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
+static void __io_req_do_free(struct io_kiocb *req)
+{
+ if (likely(!io_is_fallback_req(req)))
+ kmem_cache_free(req_cachep, req);
+ else
+ clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
+}
+
+static void __io_req_aux_free(struct io_kiocb *req)
{
- if (*nr) {
- kmem_cache_free_bulk(req_cachep, *nr, reqs);
- percpu_ref_put_many(&ctx->refs, *nr);
- *nr = 0;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ kfree(req->io);
+ if (req->file) {
+ if (req->flags & REQ_F_FIXED_FILE)
+ percpu_ref_put(&ctx->file_data->refs);
+ else
+ fput(req->file);
}
+
+ io_req_work_drop_env(req);
}
static void __io_free_req(struct io_kiocb *req)
{
- struct io_ring_ctx *ctx = req->ctx;
+ __io_req_aux_free(req);
+
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ io_cleanup_req(req);
- if (req->io)
- kfree(req->io);
- if (req->file && !(req->flags & REQ_F_FIXED_FILE))
- fput(req->file);
if (req->flags & REQ_F_INFLIGHT) {
+ struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->inflight_lock, flags);
@@ -941,11 +1288,63 @@ static void __io_free_req(struct io_kiocb *req)
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
- percpu_ref_put(&ctx->refs);
- if (likely(!io_is_fallback_req(req)))
- kmem_cache_free(req_cachep, req);
- else
- clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
+
+ percpu_ref_put(&req->ctx->refs);
+ __io_req_do_free(req);
+}
+
+struct req_batch {
+ void *reqs[IO_IOPOLL_BATCH];
+ int to_free;
+ int need_iter;
+};
+
+static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
+{
+ int fixed_refs = rb->to_free;
+
+ if (!rb->to_free)
+ return;
+ if (rb->need_iter) {
+ int i, inflight = 0;
+ unsigned long flags;
+
+ fixed_refs = 0;
+ for (i = 0; i < rb->to_free; i++) {
+ struct io_kiocb *req = rb->reqs[i];
+
+ if (req->flags & REQ_F_FIXED_FILE) {
+ req->file = NULL;
+ fixed_refs++;
+ }
+ if (req->flags & REQ_F_INFLIGHT)
+ inflight++;
+ __io_req_aux_free(req);
+ }
+ if (!inflight)
+ goto do_free;
+
+ spin_lock_irqsave(&ctx->inflight_lock, flags);
+ for (i = 0; i < rb->to_free; i++) {
+ struct io_kiocb *req = rb->reqs[i];
+
+ if (req->flags & REQ_F_INFLIGHT) {
+ list_del(&req->inflight_entry);
+ if (!--inflight)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+
+ if (waitqueue_active(&ctx->inflight_wait))
+ wake_up(&ctx->inflight_wait);
+ }
+do_free:
+ kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
+ if (fixed_refs)
+ percpu_ref_put_many(&ctx->file_data->refs, fixed_refs);
+ percpu_ref_put_many(&ctx->refs, rb->to_free);
+ rb->to_free = rb->need_iter = 0;
}
static bool io_link_cancel_timeout(struct io_kiocb *req)
@@ -1118,19 +1517,21 @@ static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
{
struct io_rings *rings = ctx->rings;
- /*
- * noflush == true is from the waitqueue handler, just ensure we wake
- * up the task, and the next invocation will flush the entries. We
- * cannot safely to it from here.
- */
- if (noflush && !list_empty(&ctx->cq_overflow_list))
- return -1U;
+ if (test_bit(0, &ctx->cq_check_overflow)) {
+ /*
+ * noflush == true is from the waitqueue handler, just ensure
+ * we wake up the task, and the next invocation will flush the
+ * entries. We cannot safely to it from here.
+ */
+ if (noflush && !list_empty(&ctx->cq_overflow_list))
+ return -1U;
- io_cqring_overflow_flush(ctx, false);
+ io_cqring_overflow_flush(ctx, false);
+ }
/* See comment at the top of this file */
smp_rmb();
- return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
+ return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
}
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
@@ -1141,17 +1542,30 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}
+static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
+{
+ if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
+ return false;
+
+ if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
+ rb->need_iter++;
+
+ rb->reqs[rb->to_free++] = req;
+ if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
+ io_free_req_many(req->ctx, rb);
+ return true;
+}
+
/*
* Find and free completed poll iocbs
*/
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
struct list_head *done)
{
- void *reqs[IO_IOPOLL_BATCH];
+ struct req_batch rb;
struct io_kiocb *req;
- int to_free;
- to_free = 0;
+ rb.to_free = rb.need_iter = 0;
while (!list_empty(done)) {
req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list);
@@ -1159,26 +1573,13 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
io_cqring_fill_event(req, req->result);
(*nr_events)++;
- if (refcount_dec_and_test(&req->refs)) {
- /* If we're not using fixed files, we have to pair the
- * completion part with the file put. Use regular
- * completions for those, only batch free for fixed
- * file and non-linked commands.
- */
- if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
- REQ_F_FIXED_FILE) && !io_is_fallback_req(req) &&
- !req->io) {
- reqs[to_free++] = req;
- if (to_free == ARRAY_SIZE(reqs))
- io_free_req_many(ctx, reqs, &to_free);
- } else {
- io_free_req(req);
- }
- }
+ if (refcount_dec_and_test(&req->refs) &&
+ !io_req_multi_free(&rb, req))
+ io_free_req(req);
}
io_commit_cqring(ctx);
- io_free_req_many(ctx, reqs, &to_free);
+ io_free_req_many(ctx, &rb);
}
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -1496,15 +1897,19 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
unsigned ioprio;
int ret;
- if (!req->file)
- return -EBADF;
-
if (S_ISREG(file_inode(req->file)->i_mode))
req->flags |= REQ_F_ISREG;
kiocb->ki_pos = READ_ONCE(sqe->off);
- kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
+ if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
+ req->flags |= REQ_F_CUR_POS;
+ kiocb->ki_pos = req->file->f_pos;
+ }
kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
+ kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
+ ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
+ if (unlikely(ret))
+ return ret;
ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) {
@@ -1516,10 +1921,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} else
kiocb->ki_ioprio = get_current_ioprio();
- ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
- if (unlikely(ret))
- return ret;
-
/* don't allow async punt if RWF_NOWAIT was requested */
if ((kiocb->ki_flags & IOCB_NOWAIT) ||
(req->file->f_flags & O_NONBLOCK))
@@ -1574,6 +1975,10 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
bool in_async)
{
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
+
+ if (req->flags & REQ_F_CUR_POS)
+ req->file->f_pos = kiocb->ki_pos;
if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw)
*nxt = __io_complete_rw(kiocb, ret);
else
@@ -1671,6 +2076,13 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
if (req->rw.kiocb.private)
return -EINVAL;
+ if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
+ ssize_t ret;
+ ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
+ *iovec = NULL;
+ return ret;
+ }
+
if (req->io) {
struct io_async_rw *iorw = &req->io->rw;
@@ -1681,9 +2093,6 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
return iorw->size;
}
- if (!req->has_user)
- return -EFAULT;
-
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
@@ -1762,38 +2171,31 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
req->io->rw.iov = req->io->rw.fast_iov;
memcpy(req->io->rw.iov, fast_iov,
sizeof(struct iovec) * iter->nr_segs);
+ } else {
+ req->flags |= REQ_F_NEED_CLEANUP;
}
}
static int io_alloc_async_ctx(struct io_kiocb *req)
{
+ if (!io_op_defs[req->opcode].async_ctx)
+ return 0;
req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
return req->io == NULL;
}
-static void io_rw_async(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct iovec *iov = NULL;
-
- if (req->io->rw.iov != req->io->rw.fast_iov)
- iov = req->io->rw.iov;
- io_wq_submit_work(workptr);
- kfree(iov);
-}
-
static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
struct iovec *iovec, struct iovec *fast_iov,
struct iov_iter *iter)
{
- if (req->opcode == IORING_OP_READ_FIXED ||
- req->opcode == IORING_OP_WRITE_FIXED)
+ if (!io_op_defs[req->opcode].async_ctx)
return 0;
- if (!req->io && io_alloc_async_ctx(req))
- return -ENOMEM;
+ if (!req->io) {
+ if (io_alloc_async_ctx(req))
+ return -ENOMEM;
- io_req_map_rw(req, io_size, iovec, fast_iov, iter);
- req->work.func = io_rw_async;
+ io_req_map_rw(req, io_size, iovec, fast_iov, iter);
+ }
return 0;
}
@@ -1811,7 +2213,8 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (unlikely(!(req->file->f_mode & FMODE_READ)))
return -EBADF;
- if (!req->io)
+ /* either don't need iovec imported or already have it */
+ if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
return 0;
io = req->io;
@@ -1880,8 +2283,8 @@ copy_iov:
}
}
out_free:
- if (!io_wq_current_is_worker())
- kfree(iovec);
+ kfree(iovec);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
return ret;
}
@@ -1899,7 +2302,8 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
- if (!req->io)
+ /* either don't need iovec imported or already have it */
+ if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
return 0;
io = req->io;
@@ -1974,6 +2378,12 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
ret2 = call_write_iter(req->file, kiocb, &iter);
else
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
+ /*
+ * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just
+ * retry them without IOCB_NOWAIT.
+ */
+ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
+ ret2 = -EAGAIN;
if (!force_nonblock || ret2 != -EAGAIN) {
kiocb_done(kiocb, ret2, nxt, req->in_async);
} else {
@@ -1986,8 +2396,8 @@ copy_iov:
}
}
out_free:
- if (!io_wq_current_is_worker())
- kfree(iovec);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ kfree(iovec);
return ret;
}
@@ -2101,6 +2511,435 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
return 0;
}
+static void io_fallocate_finish(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
+ int ret;
+
+ ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
+ req->sync.len);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, &nxt);
+ if (nxt)
+ io_wq_assign_next(workptr, nxt);
+}
+
+static int io_fallocate_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
+ return -EINVAL;
+
+ req->sync.off = READ_ONCE(sqe->off);
+ req->sync.len = READ_ONCE(sqe->addr);
+ req->sync.mode = READ_ONCE(sqe->len);
+ return 0;
+}
+
+static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ struct io_wq_work *work, *old_work;
+
+ /* fallocate always requiring blocking context */
+ if (force_nonblock) {
+ io_put_req(req);
+ req->work.func = io_fallocate_finish;
+ return -EAGAIN;
+ }
+
+ work = old_work = &req->work;
+ io_fallocate_finish(&work);
+ if (work && work != old_work)
+ *nxt = container_of(work, struct io_kiocb, work);
+
+ return 0;
+}
+
+static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ const char __user *fname;
+ int ret;
+
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+ if (sqe->flags & IOSQE_FIXED_FILE)
+ return -EBADF;
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
+
+ req->open.dfd = READ_ONCE(sqe->fd);
+ req->open.how.mode = READ_ONCE(sqe->len);
+ fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ req->open.how.flags = READ_ONCE(sqe->open_flags);
+
+ req->open.filename = getname(fname);
+ if (IS_ERR(req->open.filename)) {
+ ret = PTR_ERR(req->open.filename);
+ req->open.filename = NULL;
+ return ret;
+ }
+
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return 0;
+}
+
+static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct open_how __user *how;
+ const char __user *fname;
+ size_t len;
+ int ret;
+
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+ if (sqe->flags & IOSQE_FIXED_FILE)
+ return -EBADF;
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
+
+ req->open.dfd = READ_ONCE(sqe->fd);
+ fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ len = READ_ONCE(sqe->len);
+
+ if (len < OPEN_HOW_SIZE_VER0)
+ return -EINVAL;
+
+ ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
+ len);
+ if (ret)
+ return ret;
+
+ if (!(req->open.how.flags & O_PATH) && force_o_largefile())
+ req->open.how.flags |= O_LARGEFILE;
+
+ req->open.filename = getname(fname);
+ if (IS_ERR(req->open.filename)) {
+ ret = PTR_ERR(req->open.filename);
+ req->open.filename = NULL;
+ return ret;
+ }
+
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return 0;
+}
+
+static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ struct open_flags op;
+ struct file *file;
+ int ret;
+
+ if (force_nonblock)
+ return -EAGAIN;
+
+ ret = build_open_flags(&req->open.how, &op);
+ if (ret)
+ goto err;
+
+ ret = get_unused_fd_flags(req->open.how.flags);
+ if (ret < 0)
+ goto err;
+
+ file = do_filp_open(req->open.dfd, req->open.filename, &op);
+ if (IS_ERR(file)) {
+ put_unused_fd(ret);
+ ret = PTR_ERR(file);
+ } else {
+ fsnotify_open(file);
+ fd_install(ret, file);
+ }
+err:
+ putname(req->open.filename);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+}
+
+static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
+ return io_openat2(req, nxt, force_nonblock);
+}
+
+static int io_epoll_ctl_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+#if defined(CONFIG_EPOLL)
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+
+ req->epoll.epfd = READ_ONCE(sqe->fd);
+ req->epoll.op = READ_ONCE(sqe->len);
+ req->epoll.fd = READ_ONCE(sqe->off);
+
+ if (ep_op_has_event(req->epoll.op)) {
+ struct epoll_event __user *ev;
+
+ ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
+ return -EFAULT;
+ }
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+#if defined(CONFIG_EPOLL)
+ struct io_epoll *ie = &req->epoll;
+ int ret;
+
+ ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
+ if (force_nonblock && ret == -EAGAIN)
+ return -EAGAIN;
+
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
+ if (sqe->ioprio || sqe->buf_index || sqe->off)
+ return -EINVAL;
+
+ req->madvise.addr = READ_ONCE(sqe->addr);
+ req->madvise.len = READ_ONCE(sqe->len);
+ req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
+ struct io_madvise *ma = &req->madvise;
+ int ret;
+
+ if (force_nonblock)
+ return -EAGAIN;
+
+ ret = do_madvise(ma->addr, ma->len, ma->advice);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ if (sqe->ioprio || sqe->buf_index || sqe->addr)
+ return -EINVAL;
+
+ req->fadvise.offset = READ_ONCE(sqe->off);
+ req->fadvise.len = READ_ONCE(sqe->len);
+ req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
+ return 0;
+}
+
+static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ struct io_fadvise *fa = &req->fadvise;
+ int ret;
+
+ if (force_nonblock) {
+ switch (fa->advice) {
+ case POSIX_FADV_NORMAL:
+ case POSIX_FADV_RANDOM:
+ case POSIX_FADV_SEQUENTIAL:
+ break;
+ default:
+ return -EAGAIN;
+ }
+ }
+
+ ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+}
+
+static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ const char __user *fname;
+ unsigned lookup_flags;
+ int ret;
+
+ if (sqe->ioprio || sqe->buf_index)
+ return -EINVAL;
+ if (sqe->flags & IOSQE_FIXED_FILE)
+ return -EBADF;
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
+
+ req->open.dfd = READ_ONCE(sqe->fd);
+ req->open.mask = READ_ONCE(sqe->len);
+ fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ req->open.how.flags = READ_ONCE(sqe->statx_flags);
+
+ if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags))
+ return -EINVAL;
+
+ req->open.filename = getname_flags(fname, lookup_flags, NULL);
+ if (IS_ERR(req->open.filename)) {
+ ret = PTR_ERR(req->open.filename);
+ req->open.filename = NULL;
+ return ret;
+ }
+
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return 0;
+}
+
+static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ struct io_open *ctx = &req->open;
+ unsigned lookup_flags;
+ struct path path;
+ struct kstat stat;
+ int ret;
+
+ if (force_nonblock)
+ return -EAGAIN;
+
+ if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
+ return -EINVAL;
+
+retry:
+ /* filename_lookup() drops it, keep a reference */
+ ctx->filename->refcnt++;
+
+ ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path,
+ NULL);
+ if (ret)
+ goto err;
+
+ ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags);
+ path_put(&path);
+ if (retry_estale(ret, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
+ if (!ret)
+ ret = cp_statx(&stat, ctx->buffer);
+err:
+ putname(ctx->filename);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+}
+
+static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ /*
+ * If we queue this for async, it must not be cancellable. That would
+ * leave the 'file' in an undeterminate state.
+ */
+ req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+
+ if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
+ sqe->rw_flags || sqe->buf_index)
+ return -EINVAL;
+ if (sqe->flags & IOSQE_FIXED_FILE)
+ return -EBADF;
+
+ req->close.fd = READ_ONCE(sqe->fd);
+ if (req->file->f_op == &io_uring_fops ||
+ req->close.fd == req->ctx->ring_fd)
+ return -EBADF;
+
+ return 0;
+}
+
+/* only called when __close_fd_get_file() is done */
+static void __io_close_finish(struct io_kiocb *req, struct io_kiocb **nxt)
+{
+ int ret;
+
+ ret = filp_close(req->close.put_file, req->work.files);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ fput(req->close.put_file);
+ io_put_req_find_next(req, nxt);
+}
+
+static void io_close_finish(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
+
+ __io_close_finish(req, &nxt);
+ if (nxt)
+ io_wq_assign_next(workptr, nxt);
+}
+
+static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ int ret;
+
+ req->close.put_file = NULL;
+ ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
+ if (ret < 0)
+ return ret;
+
+ /* if the file has a flush method, be safe and punt to async */
+ if (req->close.put_file->f_op->flush && !io_wq_current_is_worker())
+ goto eagain;
+
+ /*
+ * No ->flush(), safely close from here and just punt the
+ * fput() to async context.
+ */
+ __io_close_finish(req, nxt);
+ return 0;
+eagain:
+ req->work.func = io_close_finish;
+ /*
+ * Do manual async queue here to avoid grabbing files - we don't
+ * need the files, and it'll cause io_close_finish() to close
+ * the file again and cause a double CQE entry for this request
+ */
+ io_queue_async_work(req);
+ return 0;
+}
+
static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -2157,34 +2996,29 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
return 0;
}
-#if defined(CONFIG_NET)
-static void io_sendrecv_async(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct iovec *iov = NULL;
-
- if (req->io->rw.iov != req->io->rw.fast_iov)
- iov = req->io->msg.iov;
- io_wq_submit_work(workptr);
- kfree(iov);
-}
-#endif
-
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
struct io_sr_msg *sr = &req->sr_msg;
struct io_async_ctx *io = req->io;
+ int ret;
sr->msg_flags = READ_ONCE(sqe->msg_flags);
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ sr->len = READ_ONCE(sqe->len);
- if (!io)
+ if (!io || req->opcode == IORING_OP_SEND)
+ return 0;
+ /* iovec is already imported */
+ if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
io->msg.iov = io->msg.fast_iov;
- return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+ ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
&io->msg.iov);
+ if (!ret)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return ret;
#else
return -EOPNOTSUPP;
#endif
@@ -2204,12 +3038,11 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
sock = sock_from_file(req->file, &ret);
if (sock) {
struct io_async_ctx io;
- struct sockaddr_storage addr;
unsigned flags;
if (req->io) {
kmsg = &req->io->msg;
- kmsg->msg.msg_name = &addr;
+ kmsg->msg.msg_name = &req->io->msg.addr;
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
@@ -2218,7 +3051,7 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
struct io_sr_msg *sr = &req->sr_msg;
kmsg = &io.msg;
- kmsg->msg.msg_name = &addr;
+ kmsg->msg.msg_name = &io.msg.addr;
io.msg.iov = io.msg.fast_iov;
ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
@@ -2237,18 +3070,73 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
if (force_nonblock && ret == -EAGAIN) {
if (req->io)
return -EAGAIN;
- if (io_alloc_async_ctx(req))
+ if (io_alloc_async_ctx(req)) {
+ if (kmsg && kmsg->iov != kmsg->fast_iov)
+ kfree(kmsg->iov);
return -ENOMEM;
+ }
+ req->flags |= REQ_F_NEED_CLEANUP;
memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
- req->work.func = io_sendrecv_async;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
}
- if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
+ if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ io_cqring_add_event(req, ret);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+ struct socket *sock;
+ int ret;
+
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
+ sock = sock_from_file(req->file, &ret);
+ if (sock) {
+ struct io_sr_msg *sr = &req->sr_msg;
+ struct msghdr msg;
+ struct iovec iov;
+ unsigned flags;
+
+ ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
+ &msg.msg_iter);
+ if (ret)
+ return ret;
+
+ msg.msg_name = NULL;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
+
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
+ msg.msg_flags = flags;
+ ret = sock_sendmsg(sock, &msg);
+ if (force_nonblock && ret == -EAGAIN)
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ }
+
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
@@ -2265,16 +3153,24 @@ static int io_recvmsg_prep(struct io_kiocb *req,
#if defined(CONFIG_NET)
struct io_sr_msg *sr = &req->sr_msg;
struct io_async_ctx *io = req->io;
+ int ret;
sr->msg_flags = READ_ONCE(sqe->msg_flags);
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ sr->len = READ_ONCE(sqe->len);
- if (!io)
+ if (!io || req->opcode == IORING_OP_RECV)
+ return 0;
+ /* iovec is already imported */
+ if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
io->msg.iov = io->msg.fast_iov;
- return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+ ret = recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
&io->msg.uaddr, &io->msg.iov);
+ if (!ret)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return ret;
#else
return -EOPNOTSUPP;
#endif
@@ -2294,12 +3190,11 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
sock = sock_from_file(req->file, &ret);
if (sock) {
struct io_async_ctx io;
- struct sockaddr_storage addr;
unsigned flags;
if (req->io) {
kmsg = &req->io->msg;
- kmsg->msg.msg_name = &addr;
+ kmsg->msg.msg_name = &req->io->msg.addr;
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
@@ -2308,7 +3203,7 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
struct io_sr_msg *sr = &req->sr_msg;
kmsg = &io.msg;
- kmsg->msg.msg_name = &addr;
+ kmsg->msg.msg_name = &io.msg.addr;
io.msg.iov = io.msg.fast_iov;
ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
@@ -2329,18 +3224,22 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
if (force_nonblock && ret == -EAGAIN) {
if (req->io)
return -EAGAIN;
- if (io_alloc_async_ctx(req))
+ if (io_alloc_async_ctx(req)) {
+ if (kmsg && kmsg->iov != kmsg->fast_iov)
+ kfree(kmsg->iov);
return -ENOMEM;
+ }
memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
- req->work.func = io_sendrecv_async;
+ req->flags |= REQ_F_NEED_CLEANUP;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
}
- if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
+ if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
@@ -2351,6 +3250,59 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
#endif
}
+static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+ struct socket *sock;
+ int ret;
+
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
+ sock = sock_from_file(req->file, &ret);
+ if (sock) {
+ struct io_sr_msg *sr = &req->sr_msg;
+ struct msghdr msg;
+ struct iovec iov;
+ unsigned flags;
+
+ ret = import_single_range(READ, sr->buf, sr->len, &iov,
+ &msg.msg_iter);
+ if (ret)
+ return ret;
+
+ msg.msg_name = NULL;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
+ msg.msg_iocb = NULL;
+ msg.msg_flags = 0;
+
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
+ ret = sock_recvmsg(sock, &msg, flags);
+ if (force_nonblock && ret == -EAGAIN)
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ }
+
+ io_cqring_add_event(req, ret);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+
static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
@@ -2414,7 +3366,6 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
ret = __io_accept(req, nxt, force_nonblock);
if (ret == -EAGAIN && force_nonblock) {
req->work.func = io_accept_finish;
- req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
io_put_req(req);
return -EAGAIN;
}
@@ -2635,6 +3586,47 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
io_wq_assign_next(workptr, nxt);
}
+static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
+{
+ struct io_kiocb *req, *tmp;
+ struct req_batch rb;
+
+ rb.to_free = rb.need_iter = 0;
+ spin_lock_irq(&ctx->completion_lock);
+ llist_for_each_entry_safe(req, tmp, nodes, llist_node) {
+ hash_del(&req->hash_node);
+ io_poll_complete(req, req->result, 0);
+
+ if (refcount_dec_and_test(&req->refs) &&
+ !io_req_multi_free(&rb, req)) {
+ req->flags |= REQ_F_COMP_LOCKED;
+ io_free_req(req);
+ }
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ io_cqring_ev_posted(ctx);
+ io_free_req_many(ctx, &rb);
+}
+
+static void io_poll_flush(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct llist_node *nodes;
+
+ nodes = llist_del_all(&req->ctx->poll_llist);
+ if (nodes)
+ __io_poll_flush(req->ctx, nodes);
+}
+
+static void io_poll_trigger_evfd(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+
+ eventfd_signal(req->ctx->cq_ev_fd, 1);
+ io_put_req(req);
+}
+
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
@@ -2642,7 +3634,6 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
struct io_ring_ctx *ctx = req->ctx;
__poll_t mask = key_to_poll(key);
- unsigned long flags;
/* for instances that support it check for an event match first: */
if (mask && !(mask & poll->events))
@@ -2656,17 +3647,39 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
* If we have a link timeout we're going to need the completion_lock
* for finalizing the request, mark us as having grabbed that already.
*/
- if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
- hash_del(&req->hash_node);
- io_poll_complete(req, mask, 0);
- req->flags |= REQ_F_COMP_LOCKED;
- io_put_req(req);
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ if (mask) {
+ unsigned long flags;
- io_cqring_ev_posted(ctx);
- } else {
- io_queue_async_work(req);
+ if (llist_empty(&ctx->poll_llist) &&
+ spin_trylock_irqsave(&ctx->completion_lock, flags)) {
+ bool trigger_ev;
+
+ hash_del(&req->hash_node);
+ io_poll_complete(req, mask, 0);
+
+ trigger_ev = io_should_trigger_evfd(ctx);
+ if (trigger_ev && eventfd_signal_count()) {
+ trigger_ev = false;
+ req->work.func = io_poll_trigger_evfd;
+ } else {
+ req->flags |= REQ_F_COMP_LOCKED;
+ io_put_req(req);
+ req = NULL;
+ }
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ __io_cqring_ev_posted(ctx, trigger_ev);
+ } else {
+ req->result = mask;
+ req->llist_node.next = NULL;
+ /* if the list wasn't empty, we're done */
+ if (!llist_add(&req->llist_node, &ctx->poll_llist))
+ req = NULL;
+ else
+ req->work.func = io_poll_flush;
+ }
}
+ if (req)
+ io_queue_async_work(req);
return 1;
}
@@ -3071,20 +4084,67 @@ static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
return 0;
}
+static int io_files_update_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ if (sqe->flags || sqe->ioprio || sqe->rw_flags)
+ return -EINVAL;
+
+ req->files_update.offset = READ_ONCE(sqe->off);
+ req->files_update.nr_args = READ_ONCE(sqe->len);
+ if (!req->files_update.nr_args)
+ return -EINVAL;
+ req->files_update.arg = READ_ONCE(sqe->addr);
+ return 0;
+}
+
+static int io_files_update(struct io_kiocb *req, bool force_nonblock)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_uring_files_update up;
+ int ret;
+
+ if (force_nonblock)
+ return -EAGAIN;
+
+ up.offset = req->files_update.offset;
+ up.fds = req->files_update.arg;
+
+ mutex_lock(&ctx->uring_lock);
+ ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
+ mutex_unlock(&ctx->uring_lock);
+
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req(req);
+ return 0;
+}
+
static int io_req_defer_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
ssize_t ret = 0;
+ if (io_op_defs[req->opcode].file_table) {
+ ret = io_grab_files(req);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ io_req_work_grab_env(req, &io_op_defs[req->opcode]);
+
switch (req->opcode) {
case IORING_OP_NOP:
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
ret = io_read_prep(req, sqe, true);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
ret = io_write_prep(req, sqe, true);
break;
case IORING_OP_POLL_ADD:
@@ -3100,9 +4160,11 @@ static int io_req_defer_prep(struct io_kiocb *req,
ret = io_prep_sfr(req, sqe);
break;
case IORING_OP_SENDMSG:
+ case IORING_OP_SEND:
ret = io_sendmsg_prep(req, sqe);
break;
case IORING_OP_RECVMSG:
+ case IORING_OP_RECV:
ret = io_recvmsg_prep(req, sqe);
break;
case IORING_OP_CONNECT:
@@ -3123,6 +4185,33 @@ static int io_req_defer_prep(struct io_kiocb *req,
case IORING_OP_ACCEPT:
ret = io_accept_prep(req, sqe);
break;
+ case IORING_OP_FALLOCATE:
+ ret = io_fallocate_prep(req, sqe);
+ break;
+ case IORING_OP_OPENAT:
+ ret = io_openat_prep(req, sqe);
+ break;
+ case IORING_OP_CLOSE:
+ ret = io_close_prep(req, sqe);
+ break;
+ case IORING_OP_FILES_UPDATE:
+ ret = io_files_update_prep(req, sqe);
+ break;
+ case IORING_OP_STATX:
+ ret = io_statx_prep(req, sqe);
+ break;
+ case IORING_OP_FADVISE:
+ ret = io_fadvise_prep(req, sqe);
+ break;
+ case IORING_OP_MADVISE:
+ ret = io_madvise_prep(req, sqe);
+ break;
+ case IORING_OP_OPENAT2:
+ ret = io_openat2_prep(req, sqe);
+ break;
+ case IORING_OP_EPOLL_CTL:
+ ret = io_epoll_ctl_prep(req, sqe);
+ break;
default:
printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
req->opcode);
@@ -3161,6 +4250,35 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EIOCBQUEUED;
}
+static void io_cleanup_req(struct io_kiocb *req)
+{
+ struct io_async_ctx *io = req->io;
+
+ switch (req->opcode) {
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
+ if (io->rw.iov != io->rw.fast_iov)
+ kfree(io->rw.iov);
+ break;
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ if (io->msg.iov != io->msg.fast_iov)
+ kfree(io->msg.iov);
+ break;
+ case IORING_OP_OPENAT:
+ case IORING_OP_OPENAT2:
+ case IORING_OP_STATX:
+ putname(req->open.filename);
+ break;
+ }
+
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+}
+
static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt, bool force_nonblock)
{
@@ -3173,6 +4291,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
if (sqe) {
ret = io_read_prep(req, sqe, force_nonblock);
if (ret < 0)
@@ -3182,6 +4301,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
if (sqe) {
ret = io_write_prep(req, sqe, force_nonblock);
if (ret < 0)
@@ -3222,20 +4342,28 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = io_sync_file_range(req, nxt, force_nonblock);
break;
case IORING_OP_SENDMSG:
+ case IORING_OP_SEND:
if (sqe) {
ret = io_sendmsg_prep(req, sqe);
if (ret < 0)
break;
}
- ret = io_sendmsg(req, nxt, force_nonblock);
+ if (req->opcode == IORING_OP_SENDMSG)
+ ret = io_sendmsg(req, nxt, force_nonblock);
+ else
+ ret = io_send(req, nxt, force_nonblock);
break;
case IORING_OP_RECVMSG:
+ case IORING_OP_RECV:
if (sqe) {
ret = io_recvmsg_prep(req, sqe);
if (ret)
break;
}
- ret = io_recvmsg(req, nxt, force_nonblock);
+ if (req->opcode == IORING_OP_RECVMSG)
+ ret = io_recvmsg(req, nxt, force_nonblock);
+ else
+ ret = io_recv(req, nxt, force_nonblock);
break;
case IORING_OP_TIMEOUT:
if (sqe) {
@@ -3277,6 +4405,78 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
ret = io_async_cancel(req, nxt);
break;
+ case IORING_OP_FALLOCATE:
+ if (sqe) {
+ ret = io_fallocate_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_fallocate(req, nxt, force_nonblock);
+ break;
+ case IORING_OP_OPENAT:
+ if (sqe) {
+ ret = io_openat_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_openat(req, nxt, force_nonblock);
+ break;
+ case IORING_OP_CLOSE:
+ if (sqe) {
+ ret = io_close_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_close(req, nxt, force_nonblock);
+ break;
+ case IORING_OP_FILES_UPDATE:
+ if (sqe) {
+ ret = io_files_update_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_files_update(req, force_nonblock);
+ break;
+ case IORING_OP_STATX:
+ if (sqe) {
+ ret = io_statx_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_statx(req, nxt, force_nonblock);
+ break;
+ case IORING_OP_FADVISE:
+ if (sqe) {
+ ret = io_fadvise_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_fadvise(req, nxt, force_nonblock);
+ break;
+ case IORING_OP_MADVISE:
+ if (sqe) {
+ ret = io_madvise_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_madvise(req, nxt, force_nonblock);
+ break;
+ case IORING_OP_OPENAT2:
+ if (sqe) {
+ ret = io_openat2_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_openat2(req, nxt, force_nonblock);
+ break;
+ case IORING_OP_EPOLL_CTL:
+ if (sqe) {
+ ret = io_epoll_ctl_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_epoll_ctl(req, nxt, force_nonblock);
+ break;
default:
ret = -EINVAL;
break;
@@ -3311,11 +4511,13 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
struct io_kiocb *nxt = NULL;
int ret = 0;
- if (work->flags & IO_WQ_WORK_CANCEL)
+ /* if NO_CANCEL is set, we must still run the work */
+ if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
+ IO_WQ_WORK_CANCEL) {
ret = -ECANCELED;
+ }
if (!ret) {
- req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
req->in_async = true;
do {
ret = io_issue_sqe(req, NULL, &nxt, false);
@@ -3344,26 +4546,13 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_wq_assign_next(workptr, nxt);
}
-static bool io_req_op_valid(int op)
+static int io_req_needs_file(struct io_kiocb *req, int fd)
{
- return op >= IORING_OP_NOP && op < IORING_OP_LAST;
-}
-
-static int io_req_needs_file(struct io_kiocb *req)
-{
- switch (req->opcode) {
- case IORING_OP_NOP:
- case IORING_OP_POLL_REMOVE:
- case IORING_OP_TIMEOUT:
- case IORING_OP_TIMEOUT_REMOVE:
- case IORING_OP_ASYNC_CANCEL:
- case IORING_OP_LINK_TIMEOUT:
+ if (!io_op_defs[req->opcode].needs_file)
return 0;
- default:
- if (io_req_op_valid(req->opcode))
- return 1;
- return -EINVAL;
- }
+ if ((fd == -1 || fd == AT_FDCWD) && io_op_defs[req->opcode].fd_non_neg)
+ return 0;
+ return 1;
}
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
@@ -3371,8 +4560,8 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
{
struct fixed_file_table *table;
- table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
- return table->files[index & IORING_FILE_TABLE_MASK];
+ table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
+ return table->files[index & IORING_FILE_TABLE_MASK];;
}
static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
@@ -3380,20 +4569,16 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
{
struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
- int fd, ret;
+ int fd;
flags = READ_ONCE(sqe->flags);
fd = READ_ONCE(sqe->fd);
- if (flags & IOSQE_IO_DRAIN)
- req->flags |= REQ_F_IO_DRAIN;
-
- ret = io_req_needs_file(req);
- if (ret <= 0)
- return ret;
+ if (!io_req_needs_file(req, fd))
+ return 0;
if (flags & IOSQE_FIXED_FILE) {
- if (unlikely(!ctx->file_table ||
+ if (unlikely(!ctx->file_data ||
(unsigned) fd >= ctx->nr_user_files))
return -EBADF;
fd = array_index_nospec(fd, ctx->nr_user_files);
@@ -3401,6 +4586,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
if (!req->file)
return -EBADF;
req->flags |= REQ_F_FIXED_FILE;
+ percpu_ref_get(&ctx->file_data->refs);
} else {
if (req->needs_fixed_file)
return -EBADF;
@@ -3418,6 +4604,11 @@ static int io_grab_files(struct io_kiocb *req)
int ret = -EBADF;
struct io_ring_ctx *ctx = req->ctx;
+ if (req->work.files)
+ return 0;
+ if (!ctx->ring_file)
+ return -EBADF;
+
rcu_read_lock();
spin_lock_irq(&ctx->inflight_lock);
/*
@@ -3426,7 +4617,7 @@ static int io_grab_files(struct io_kiocb *req)
* the fd has changed since we started down this path, and disallow
* this operation if it has.
*/
- if (fcheck(req->ring_fd) == req->ring_file) {
+ if (fcheck(ctx->ring_fd) == ctx->ring_file) {
list_add(&req->inflight_entry, &ctx->inflight_list);
req->flags |= REQ_F_INFLIGHT;
req->work.files = current->files;
@@ -3532,7 +4723,8 @@ again:
*/
if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
(req->flags & REQ_F_MUST_PUNT))) {
- if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
+punt:
+ if (io_op_defs[req->opcode].file_table) {
ret = io_grab_files(req);
if (ret)
goto err;
@@ -3567,6 +4759,9 @@ done_req:
if (nxt) {
req = nxt;
nxt = NULL;
+
+ if (req->flags & REQ_F_FORCE_ASYNC)
+ goto punt;
goto again;
}
}
@@ -3575,21 +4770,27 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
int ret;
- if (unlikely(req->ctx->drain_next)) {
- req->flags |= REQ_F_IO_DRAIN;
- req->ctx->drain_next = false;
- }
- req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
-
ret = io_req_defer(req, sqe);
if (ret) {
if (ret != -EIOCBQUEUED) {
+fail_req:
io_cqring_add_event(req, ret);
req_set_fail_links(req);
io_double_put_req(req);
}
- } else
+ } else if (req->flags & REQ_F_FORCE_ASYNC) {
+ ret = io_req_defer_prep(req, sqe);
+ if (unlikely(ret < 0))
+ goto fail_req;
+ /*
+ * Never try inline submit of IOSQE_ASYNC is set, go straight
+ * to async execution.
+ */
+ req->work.flags |= IO_WQ_WORK_CONCURRENT;
+ io_queue_async_work(req);
+ } else {
__io_queue_sqe(req, sqe);
+ }
}
static inline void io_queue_link_head(struct io_kiocb *req)
@@ -3602,25 +4803,47 @@ static inline void io_queue_link_head(struct io_kiocb *req)
}
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
- IOSQE_IO_HARDLINK)
+ IOSQE_IO_HARDLINK | IOSQE_ASYNC)
static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_submit_state *state, struct io_kiocb **link)
{
+ const struct cred *old_creds = NULL;
struct io_ring_ctx *ctx = req->ctx;
- int ret;
+ unsigned int sqe_flags;
+ int ret, id;
+
+ sqe_flags = READ_ONCE(sqe->flags);
/* enforce forwards compatibility on users */
- if (unlikely(sqe->flags & ~SQE_VALID_FLAGS)) {
+ if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
goto err_req;
}
+ id = READ_ONCE(sqe->personality);
+ if (id) {
+ const struct cred *personality_creds;
+
+ personality_creds = idr_find(&ctx->personality_idr, id);
+ if (unlikely(!personality_creds)) {
+ ret = -EINVAL;
+ goto err_req;
+ }
+ old_creds = override_creds(personality_creds);
+ }
+
+ /* same numerical values with corresponding REQ_F_*, safe to copy */
+ req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
+ IOSQE_ASYNC);
+
ret = io_req_set_file(state, req, sqe);
if (unlikely(ret)) {
err_req:
io_cqring_add_event(req, ret);
io_double_put_req(req);
+ if (old_creds)
+ revert_creds(old_creds);
return false;
}
@@ -3632,14 +4855,19 @@ err_req:
* conditions are true (normal request), then just queue it.
*/
if (*link) {
- struct io_kiocb *prev = *link;
-
- if (sqe->flags & IOSQE_IO_DRAIN)
- (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
-
- if (sqe->flags & IOSQE_IO_HARDLINK)
- req->flags |= REQ_F_HARDLINK;
+ struct io_kiocb *head = *link;
+ /*
+ * Taking sequential execution of a link, draining both sides
+ * of the link also fullfils IOSQE_IO_DRAIN semantics for all
+ * requests in the link. So, it drains the head and the
+ * next after the link request. The last one is done via
+ * drain_next flag to persist the effect across calls.
+ */
+ if (sqe_flags & IOSQE_IO_DRAIN) {
+ head->flags |= REQ_F_IO_DRAIN;
+ ctx->drain_next = 1;
+ }
if (io_alloc_async_ctx(req)) {
ret = -EAGAIN;
goto err_req;
@@ -3648,25 +4876,36 @@ err_req:
ret = io_req_defer_prep(req, sqe);
if (ret) {
/* fail even hard links since we don't submit */
- prev->flags |= REQ_F_FAIL_LINK;
+ head->flags |= REQ_F_FAIL_LINK;
goto err_req;
}
- trace_io_uring_link(ctx, req, prev);
- list_add_tail(&req->link_list, &prev->link_list);
- } else if (sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
- req->flags |= REQ_F_LINK;
- if (sqe->flags & IOSQE_IO_HARDLINK)
- req->flags |= REQ_F_HARDLINK;
-
- INIT_LIST_HEAD(&req->link_list);
- ret = io_req_defer_prep(req, sqe);
- if (ret)
- req->flags |= REQ_F_FAIL_LINK;
- *link = req;
+ trace_io_uring_link(ctx, req, head);
+ list_add_tail(&req->link_list, &head->link_list);
+
+ /* last request of a link, enqueue the link */
+ if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
+ io_queue_link_head(head);
+ *link = NULL;
+ }
} else {
- io_queue_sqe(req, sqe);
+ if (unlikely(ctx->drain_next)) {
+ req->flags |= REQ_F_IO_DRAIN;
+ req->ctx->drain_next = 0;
+ }
+ if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
+ req->flags |= REQ_F_LINK;
+ INIT_LIST_HEAD(&req->link_list);
+ ret = io_req_defer_prep(req, sqe);
+ if (ret)
+ req->flags |= REQ_F_FAIL_LINK;
+ *link = req;
+ } else {
+ io_queue_sqe(req, sqe);
+ }
}
+ if (old_creds)
+ revert_creds(old_creds);
return true;
}
@@ -3678,8 +4917,7 @@ static void io_submit_state_end(struct io_submit_state *state)
blk_finish_plug(&state->plug);
io_file_put(state);
if (state->free_reqs)
- kmem_cache_free_bulk(req_cachep, state->free_reqs,
- &state->reqs[state->cur_req]);
+ kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
}
/*
@@ -3698,14 +4936,12 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
- if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
- /*
- * Ensure any loads from the SQEs are done at this point,
- * since once we write the new head, the application could
- * write new data to them.
- */
- smp_store_release(&rings->sq.head, ctx->cached_sq_head);
- }
+ /*
+ * Ensure any loads from the SQEs are done at this point,
+ * since once we write the new head, the application could
+ * write new data to them.
+ */
+ smp_store_release(&rings->sq.head, ctx->cached_sq_head);
}
/*
@@ -3719,7 +4955,6 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe **sqe_ptr)
{
- struct io_rings *rings = ctx->rings;
u32 *sq_array = ctx->sq_array;
unsigned head;
@@ -3731,12 +4966,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
* 2) allows the kernel side to track the head on its own, even
* though the application is the one updating it.
*/
- head = ctx->cached_sq_head;
- /* make sure SQ entry isn't read before tail */
- if (unlikely(head == smp_load_acquire(&rings->sq.tail)))
- return false;
-
- head = READ_ONCE(sq_array[head & ctx->sq_mask]);
+ head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
if (likely(head < ctx->sq_entries)) {
/*
* All io need record the previous position, if LINK vs DARIN,
@@ -3754,7 +4984,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
/* drop invalid entries */
ctx->cached_sq_head++;
ctx->cached_sq_dropped++;
- WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
+ WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
return false;
}
@@ -3768,19 +4998,30 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
bool mm_fault = false;
/* if we have a backlog and couldn't flush it all, return BUSY */
- if (!list_empty(&ctx->cq_overflow_list) &&
- !io_cqring_overflow_flush(ctx, false))
- return -EBUSY;
+ if (test_bit(0, &ctx->sq_check_overflow)) {
+ if (!list_empty(&ctx->cq_overflow_list) &&
+ !io_cqring_overflow_flush(ctx, false))
+ return -EBUSY;
+ }
+
+ /* make sure SQ entry isn't read before tail */
+ nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
+
+ if (!percpu_ref_tryget_many(&ctx->refs, nr))
+ return -EAGAIN;
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, nr);
statep = &state;
}
+ ctx->ring_fd = ring_fd;
+ ctx->ring_file = ring_file;
+
for (i = 0; i < nr; i++) {
const struct io_uring_sqe *sqe;
struct io_kiocb *req;
- unsigned int sqe_flags;
+ int err;
req = io_get_req(ctx, statep);
if (unlikely(!req)) {
@@ -3789,39 +5030,44 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
break;
}
if (!io_get_sqring(ctx, req, &sqe)) {
- __io_free_req(req);
+ __io_req_do_free(req);
+ break;
+ }
+
+ /* will complete beyond this point, count as submitted */
+ submitted++;
+
+ if (unlikely(req->opcode >= IORING_OP_LAST)) {
+ err = -EINVAL;
+fail_req:
+ io_cqring_add_event(req, err);
+ io_double_put_req(req);
break;
}
- if (io_req_needs_user(req) && !*mm) {
+ if (io_op_defs[req->opcode].needs_mm && !*mm) {
mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
- if (!mm_fault) {
- use_mm(ctx->sqo_mm);
- *mm = ctx->sqo_mm;
+ if (unlikely(mm_fault)) {
+ err = -EFAULT;
+ goto fail_req;
}
+ use_mm(ctx->sqo_mm);
+ *mm = ctx->sqo_mm;
}
- submitted++;
- sqe_flags = sqe->flags;
-
- req->ring_file = ring_file;
- req->ring_fd = ring_fd;
- req->has_user = *mm != NULL;
req->in_async = async;
req->needs_fixed_file = async;
- trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
+ trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
+ true, async);
if (!io_submit_sqe(req, sqe, statep, &link))
break;
- /*
- * If previous wasn't linked and we have a linked command,
- * that's the end of the chain. Submit the previous link.
- */
- if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) && link) {
- io_queue_link_head(link);
- link = NULL;
- }
}
+ if (unlikely(submitted != nr)) {
+ int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
+
+ percpu_ref_put_many(&ctx->refs, nr - ref_used);
+ }
if (link)
io_queue_link_head(link);
if (statep)
@@ -3900,7 +5146,8 @@ static int io_sq_thread(void *data)
* reap events and wake us up.
*/
if (inflight ||
- (!time_after(jiffies, timeout) && ret != -EBUSY)) {
+ (!time_after(jiffies, timeout) && ret != -EBUSY &&
+ !percpu_ref_is_dying(&ctx->refs))) {
cond_resched();
continue;
}
@@ -3944,7 +5191,6 @@ static int io_sq_thread(void *data)
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
- to_submit = min(to_submit, ctx->sq_entries);
mutex_lock(&ctx->uring_lock);
ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
mutex_unlock(&ctx->uring_lock);
@@ -4075,19 +5321,35 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
#endif
}
+static void io_file_ref_kill(struct percpu_ref *ref)
+{
+ struct fixed_file_data *data;
+
+ data = container_of(ref, struct fixed_file_data, refs);
+ complete(&data->done);
+}
+
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
+ struct fixed_file_data *data = ctx->file_data;
unsigned nr_tables, i;
- if (!ctx->file_table)
+ if (!data)
return -ENXIO;
+ percpu_ref_kill_and_confirm(&data->refs, io_file_ref_kill);
+ flush_work(&data->ref_work);
+ wait_for_completion(&data->done);
+ io_ring_file_ref_flush(data);
+ percpu_ref_exit(&data->refs);
+
__io_sqe_files_unregister(ctx);
nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
for (i = 0; i < nr_tables; i++)
- kfree(ctx->file_table[i].files);
- kfree(ctx->file_table);
- ctx->file_table = NULL;
+ kfree(data->table[i].files);
+ kfree(data->table);
+ kfree(data);
+ ctx->file_data = NULL;
ctx->nr_user_files = 0;
return 0;
}
@@ -4118,16 +5380,6 @@ static void io_finish_async(struct io_ring_ctx *ctx)
}
#if defined(CONFIG_UNIX)
-static void io_destruct_skb(struct sk_buff *skb)
-{
- struct io_ring_ctx *ctx = skb->sk->sk_user_data;
-
- if (ctx->io_wq)
- io_wq_flush(ctx->io_wq);
-
- unix_destruct_scm(skb);
-}
-
/*
* Ensure the UNIX gc is aware of our file set, so we are certain that
* the io_uring can be safely unregistered on process exit, even if we have
@@ -4175,7 +5427,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
fpl->max = SCM_MAX_FD;
fpl->count = nr_files;
UNIXCB(skb).fp = fpl;
- skb->destructor = io_destruct_skb;
+ skb->destructor = unix_destruct_scm;
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
skb_queue_head(&sk->sk_receive_queue, skb);
@@ -4237,7 +5489,7 @@ static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
int i;
for (i = 0; i < nr_tables; i++) {
- struct fixed_file_table *table = &ctx->file_table[i];
+ struct fixed_file_table *table = &ctx->file_data->table[i];
unsigned this_files;
this_files = min(nr_files, IORING_MAX_FILES_TABLE);
@@ -4252,36 +5504,169 @@ static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
return 0;
for (i = 0; i < nr_tables; i++) {
- struct fixed_file_table *table = &ctx->file_table[i];
+ struct fixed_file_table *table = &ctx->file_data->table[i];
kfree(table->files);
}
return 1;
}
+static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
+{
+#if defined(CONFIG_UNIX)
+ struct sock *sock = ctx->ring_sock->sk;
+ struct sk_buff_head list, *head = &sock->sk_receive_queue;
+ struct sk_buff *skb;
+ int i;
+
+ __skb_queue_head_init(&list);
+
+ /*
+ * Find the skb that holds this file in its SCM_RIGHTS. When found,
+ * remove this entry and rearrange the file array.
+ */
+ skb = skb_dequeue(head);
+ while (skb) {
+ struct scm_fp_list *fp;
+
+ fp = UNIXCB(skb).fp;
+ for (i = 0; i < fp->count; i++) {
+ int left;
+
+ if (fp->fp[i] != file)
+ continue;
+
+ unix_notinflight(fp->user, fp->fp[i]);
+ left = fp->count - 1 - i;
+ if (left) {
+ memmove(&fp->fp[i], &fp->fp[i + 1],
+ left * sizeof(struct file *));
+ }
+ fp->count--;
+ if (!fp->count) {
+ kfree_skb(skb);
+ skb = NULL;
+ } else {
+ __skb_queue_tail(&list, skb);
+ }
+ fput(file);
+ file = NULL;
+ break;
+ }
+
+ if (!file)
+ break;
+
+ __skb_queue_tail(&list, skb);
+
+ skb = skb_dequeue(head);
+ }
+
+ if (skb_peek(&list)) {
+ spin_lock_irq(&head->lock);
+ while ((skb = __skb_dequeue(&list)) != NULL)
+ __skb_queue_tail(head, skb);
+ spin_unlock_irq(&head->lock);
+ }
+#else
+ fput(file);
+#endif
+}
+
+struct io_file_put {
+ struct llist_node llist;
+ struct file *file;
+ struct completion *done;
+};
+
+static void io_ring_file_ref_flush(struct fixed_file_data *data)
+{
+ struct io_file_put *pfile, *tmp;
+ struct llist_node *node;
+
+ while ((node = llist_del_all(&data->put_llist)) != NULL) {
+ llist_for_each_entry_safe(pfile, tmp, node, llist) {
+ io_ring_file_put(data->ctx, pfile->file);
+ if (pfile->done)
+ complete(pfile->done);
+ else
+ kfree(pfile);
+ }
+ }
+}
+
+static void io_ring_file_ref_switch(struct work_struct *work)
+{
+ struct fixed_file_data *data;
+
+ data = container_of(work, struct fixed_file_data, ref_work);
+ io_ring_file_ref_flush(data);
+ percpu_ref_get(&data->refs);
+ percpu_ref_switch_to_percpu(&data->refs);
+}
+
+static void io_file_data_ref_zero(struct percpu_ref *ref)
+{
+ struct fixed_file_data *data;
+
+ data = container_of(ref, struct fixed_file_data, refs);
+
+ /*
+ * We can't safely switch from inside this context, punt to wq. If
+ * the table ref is going away, the table is being unregistered.
+ * Don't queue up the async work for that case, the caller will
+ * handle it.
+ */
+ if (!percpu_ref_is_dying(&data->refs))
+ queue_work(system_wq, &data->ref_work);
+}
+
static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
__s32 __user *fds = (__s32 __user *) arg;
unsigned nr_tables;
+ struct file *file;
int fd, ret = 0;
unsigned i;
- if (ctx->file_table)
+ if (ctx->file_data)
return -EBUSY;
if (!nr_args)
return -EINVAL;
if (nr_args > IORING_MAX_FIXED_FILES)
return -EMFILE;
+ ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
+ if (!ctx->file_data)
+ return -ENOMEM;
+ ctx->file_data->ctx = ctx;
+ init_completion(&ctx->file_data->done);
+
nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
- ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
+ ctx->file_data->table = kcalloc(nr_tables,
+ sizeof(struct fixed_file_table),
GFP_KERNEL);
- if (!ctx->file_table)
+ if (!ctx->file_data->table) {
+ kfree(ctx->file_data);
+ ctx->file_data = NULL;
return -ENOMEM;
+ }
+
+ if (percpu_ref_init(&ctx->file_data->refs, io_file_data_ref_zero,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
+ kfree(ctx->file_data->table);
+ kfree(ctx->file_data);
+ ctx->file_data = NULL;
+ return -ENOMEM;
+ }
+ ctx->file_data->put_llist.first = NULL;
+ INIT_WORK(&ctx->file_data->ref_work, io_ring_file_ref_switch);
if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
- kfree(ctx->file_table);
- ctx->file_table = NULL;
+ percpu_ref_exit(&ctx->file_data->refs);
+ kfree(ctx->file_data->table);
+ kfree(ctx->file_data);
+ ctx->file_data = NULL;
return -ENOMEM;
}
@@ -4298,13 +5683,14 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
continue;
}
- table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+ table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
index = i & IORING_FILE_TABLE_MASK;
- table->files[index] = fget(fd);
+ file = fget(fd);
ret = -EBADF;
- if (!table->files[index])
+ if (!file)
break;
+
/*
* Don't allow io_uring instances to be registered. If UNIX
* isn't enabled, then this causes a reference cycle and this
@@ -4312,26 +5698,26 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
* handle it just fine, but there's still no point in allowing
* a ring fd as it doesn't support regular read/write anyway.
*/
- if (table->files[index]->f_op == &io_uring_fops) {
- fput(table->files[index]);
+ if (file->f_op == &io_uring_fops) {
+ fput(file);
break;
}
ret = 0;
+ table->files[index] = file;
}
if (ret) {
for (i = 0; i < ctx->nr_user_files; i++) {
- struct file *file;
-
file = io_file_from_index(ctx, i);
if (file)
fput(file);
}
for (i = 0; i < nr_tables; i++)
- kfree(ctx->file_table[i].files);
+ kfree(ctx->file_data->table[i].files);
- kfree(ctx->file_table);
- ctx->file_table = NULL;
+ kfree(ctx->file_data->table);
+ kfree(ctx->file_data);
+ ctx->file_data = NULL;
ctx->nr_user_files = 0;
return ret;
}
@@ -4343,69 +5729,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
-static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
-{
-#if defined(CONFIG_UNIX)
- struct file *file = io_file_from_index(ctx, index);
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff_head list, *head = &sock->sk_receive_queue;
- struct sk_buff *skb;
- int i;
-
- __skb_queue_head_init(&list);
-
- /*
- * Find the skb that holds this file in its SCM_RIGHTS. When found,
- * remove this entry and rearrange the file array.
- */
- skb = skb_dequeue(head);
- while (skb) {
- struct scm_fp_list *fp;
-
- fp = UNIXCB(skb).fp;
- for (i = 0; i < fp->count; i++) {
- int left;
-
- if (fp->fp[i] != file)
- continue;
-
- unix_notinflight(fp->user, fp->fp[i]);
- left = fp->count - 1 - i;
- if (left) {
- memmove(&fp->fp[i], &fp->fp[i + 1],
- left * sizeof(struct file *));
- }
- fp->count--;
- if (!fp->count) {
- kfree_skb(skb);
- skb = NULL;
- } else {
- __skb_queue_tail(&list, skb);
- }
- fput(file);
- file = NULL;
- break;
- }
-
- if (!file)
- break;
-
- __skb_queue_tail(&list, skb);
-
- skb = skb_dequeue(head);
- }
-
- if (skb_peek(&list)) {
- spin_lock_irq(&head->lock);
- while ((skb = __skb_dequeue(&list)) != NULL)
- __skb_queue_tail(head, skb);
- spin_unlock_irq(&head->lock);
- }
-#else
- fput(io_file_from_index(ctx, index));
-#endif
-}
-
static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
int index)
{
@@ -4449,27 +5772,65 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
#endif
}
-static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
- unsigned nr_args)
+static void io_atomic_switch(struct percpu_ref *ref)
{
- struct io_uring_files_update up;
+ struct fixed_file_data *data;
+
+ data = container_of(ref, struct fixed_file_data, refs);
+ clear_bit(FFD_F_ATOMIC, &data->state);
+}
+
+static bool io_queue_file_removal(struct fixed_file_data *data,
+ struct file *file)
+{
+ struct io_file_put *pfile, pfile_stack;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ /*
+ * If we fail allocating the struct we need for doing async reomval
+ * of this file, just punt to sync and wait for it.
+ */
+ pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
+ if (!pfile) {
+ pfile = &pfile_stack;
+ pfile->done = &done;
+ }
+
+ pfile->file = file;
+ llist_add(&pfile->llist, &data->put_llist);
+
+ if (pfile == &pfile_stack) {
+ if (!test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
+ percpu_ref_put(&data->refs);
+ percpu_ref_switch_to_atomic(&data->refs,
+ io_atomic_switch);
+ }
+ wait_for_completion(&done);
+ flush_work(&data->ref_work);
+ return false;
+ }
+
+ return true;
+}
+
+static int __io_sqe_files_update(struct io_ring_ctx *ctx,
+ struct io_uring_files_update *up,
+ unsigned nr_args)
+{
+ struct fixed_file_data *data = ctx->file_data;
+ bool ref_switch = false;
+ struct file *file;
__s32 __user *fds;
int fd, i, err;
__u32 done;
- if (!ctx->file_table)
- return -ENXIO;
- if (!nr_args)
- return -EINVAL;
- if (copy_from_user(&up, arg, sizeof(up)))
- return -EFAULT;
- if (check_add_overflow(up.offset, nr_args, &done))
+ if (check_add_overflow(up->offset, nr_args, &done))
return -EOVERFLOW;
if (done > ctx->nr_user_files)
return -EINVAL;
done = 0;
- fds = (__s32 __user *) up.fds;
+ fds = u64_to_user_ptr(up->fds);
while (nr_args) {
struct fixed_file_table *table;
unsigned index;
@@ -4479,16 +5840,16 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
err = -EFAULT;
break;
}
- i = array_index_nospec(up.offset, ctx->nr_user_files);
- table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
+ i = array_index_nospec(up->offset, ctx->nr_user_files);
+ table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
index = i & IORING_FILE_TABLE_MASK;
if (table->files[index]) {
- io_sqe_file_unregister(ctx, i);
+ file = io_file_from_index(ctx, index);
table->files[index] = NULL;
+ if (io_queue_file_removal(data, file))
+ ref_switch = true;
}
if (fd != -1) {
- struct file *file;
-
file = fget(fd);
if (!file) {
err = -EBADF;
@@ -4514,11 +5875,32 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
}
nr_args--;
done++;
- up.offset++;
+ up->offset++;
+ }
+
+ if (ref_switch && !test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
+ percpu_ref_put(&data->refs);
+ percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
}
return done ? done : err;
}
+static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned nr_args)
+{
+ struct io_uring_files_update up;
+
+ if (!ctx->file_data)
+ return -ENXIO;
+ if (!nr_args)
+ return -EINVAL;
+ if (copy_from_user(&up, arg, sizeof(up)))
+ return -EFAULT;
+ if (up.resv)
+ return -EINVAL;
+
+ return __io_sqe_files_update(ctx, &up, nr_args);
+}
static void io_put_work(struct io_wq_work *work)
{
@@ -4534,11 +5916,56 @@ static void io_get_work(struct io_wq_work *work)
refcount_inc(&req->refs);
}
+static int io_init_wq_offload(struct io_ring_ctx *ctx,
+ struct io_uring_params *p)
+{
+ struct io_wq_data data;
+ struct fd f;
+ struct io_ring_ctx *ctx_attach;
+ unsigned int concurrency;
+ int ret = 0;
+
+ data.user = ctx->user;
+ data.get_work = io_get_work;
+ data.put_work = io_put_work;
+
+ if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
+ /* Do QD, or 4 * CPUS, whatever is smallest */
+ concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
+
+ ctx->io_wq = io_wq_create(concurrency, &data);
+ if (IS_ERR(ctx->io_wq)) {
+ ret = PTR_ERR(ctx->io_wq);
+ ctx->io_wq = NULL;
+ }
+ return ret;
+ }
+
+ f = fdget(p->wq_fd);
+ if (!f.file)
+ return -EBADF;
+
+ if (f.file->f_op != &io_uring_fops) {
+ ret = -EINVAL;
+ goto out_fput;
+ }
+
+ ctx_attach = f.file->private_data;
+ /* @io_wq is protected by holding the fd */
+ if (!io_wq_get(ctx_attach->io_wq, &data)) {
+ ret = -EINVAL;
+ goto out_fput;
+ }
+
+ ctx->io_wq = ctx_attach->io_wq;
+out_fput:
+ fdput(f);
+ return ret;
+}
+
static int io_sq_offload_start(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
- struct io_wq_data data;
- unsigned concurrency;
int ret;
init_waitqueue_head(&ctx->sqo_wait);
@@ -4582,20 +6009,9 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
goto err;
}
- data.mm = ctx->sqo_mm;
- data.user = ctx->user;
- data.creds = ctx->creds;
- data.get_work = io_get_work;
- data.put_work = io_put_work;
-
- /* Do QD, or 4 * CPUS, whatever is smallest */
- concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
- ctx->io_wq = io_wq_create(concurrency, &data);
- if (IS_ERR(ctx->io_wq)) {
- ret = PTR_ERR(ctx->io_wq);
- ctx->io_wq = NULL;
+ ret = io_init_wq_offload(ctx, p);
+ if (ret)
goto err;
- }
return 0;
err:
@@ -4700,7 +6116,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
for (j = 0; j < imu->nr_bvecs; j++)
- put_user_page(imu->bvec[j].bv_page);
+ unpin_user_page(imu->bvec[j].bv_page);
if (ctx->account_mem)
io_unaccount_mem(ctx->user, imu->nr_bvecs);
@@ -4821,7 +6237,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
ret = 0;
down_read(&current->mm->mmap_sem);
- pret = get_user_pages(ubuf, nr_pages,
+ pret = pin_user_pages(ubuf, nr_pages,
FOLL_WRITE | FOLL_LONGTERM,
pages, vmas);
if (pret == nr_pages) {
@@ -4845,7 +6261,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
* release any pages we did get
*/
if (pret > 0)
- put_user_pages(pages, pret);
+ unpin_user_pages(pages, pret);
if (ctx->account_mem)
io_unaccount_mem(ctx->user, nr_pages);
kvfree(imu->bvec);
@@ -4960,7 +6376,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
ctx->rings->sq_ring_entries)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
+ if (io_cqring_events(ctx, false))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
@@ -4973,12 +6389,33 @@ static int io_uring_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &ctx->cq_fasync);
}
+static int io_remove_personalities(int id, void *p, void *data)
+{
+ struct io_ring_ctx *ctx = data;
+ const struct cred *cred;
+
+ cred = idr_remove(&ctx->personality_idr, id);
+ if (cred)
+ put_cred(cred);
+ return 0;
+}
+
static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);
+ /*
+ * Wait for sq thread to idle, if we have one. It won't spin on new
+ * work after we've killed the ctx ref above. This is important to do
+ * before we cancel existing commands, as the thread could otherwise
+ * be queueing new work post that. If that's work we need to cancel,
+ * it could cause shutdown to hang.
+ */
+ while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
+ cpu_relax();
+
io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
@@ -4989,6 +6426,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
/* if we failed setting up the ctx, we might not have any rings */
if (ctx->rings)
io_cqring_overflow_flush(ctx, true);
+ idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
wait_for_completion(&ctx->completions[0]);
io_ring_ctx_free(ctx);
}
@@ -5030,6 +6468,29 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
if (!cancel_req)
break;
+ if (cancel_req->flags & REQ_F_OVERFLOW) {
+ spin_lock_irq(&ctx->completion_lock);
+ list_del(&cancel_req->list);
+ cancel_req->flags &= ~REQ_F_OVERFLOW;
+ if (list_empty(&ctx->cq_overflow_list)) {
+ clear_bit(0, &ctx->sq_check_overflow);
+ clear_bit(0, &ctx->cq_check_overflow);
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ WRITE_ONCE(ctx->rings->cq_overflow,
+ atomic_inc_return(&ctx->cached_cq_overflow));
+
+ /*
+ * Put inflight ref and overflow ref. If that's
+ * all we had, then we're done with this request.
+ */
+ if (refcount_sub_and_test(2, &cancel_req->refs)) {
+ io_put_req(cancel_req);
+ continue;
+ }
+ }
+
io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
io_put_req(cancel_req);
schedule();
@@ -5042,10 +6503,13 @@ static int io_uring_flush(struct file *file, void *data)
struct io_ring_ctx *ctx = file->private_data;
io_uring_cancel_files(ctx, data);
- if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
- io_cqring_overflow_flush(ctx, true);
- io_wq_cancel_all(ctx->io_wq);
- }
+
+ /*
+ * If the task is going away, cancel work it may have pending
+ */
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
+
return 0;
}
@@ -5159,13 +6623,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
} else if (to_submit) {
struct mm_struct *cur_mm;
- if (current->mm != ctx->sqo_mm ||
- current_cred() != ctx->creds) {
- ret = -EPERM;
- goto out;
- }
-
- to_submit = min(to_submit, ctx->sq_entries);
mutex_lock(&ctx->uring_lock);
/* already have mm, so io_submit_sqes() won't try to grab it */
cur_mm = ctx->sqo_mm;
@@ -5195,6 +6652,80 @@ out_fput:
return submitted ? submitted : ret;
}
+static int io_uring_show_cred(int id, void *p, void *data)
+{
+ const struct cred *cred = p;
+ struct seq_file *m = data;
+ struct user_namespace *uns = seq_user_ns(m);
+ struct group_info *gi;
+ kernel_cap_t cap;
+ unsigned __capi;
+ int g;
+
+ seq_printf(m, "%5d\n", id);
+ seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
+ seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
+ seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
+ seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
+ seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
+ seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
+ seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
+ seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
+ seq_puts(m, "\n\tGroups:\t");
+ gi = cred->group_info;
+ for (g = 0; g < gi->ngroups; g++) {
+ seq_put_decimal_ull(m, g ? " " : "",
+ from_kgid_munged(uns, gi->gid[g]));
+ }
+ seq_puts(m, "\n\tCapEff:\t");
+ cap = cred->cap_effective;
+ CAP_FOR_EACH_U32(__capi)
+ seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
+{
+ int i;
+
+ mutex_lock(&ctx->uring_lock);
+ seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
+ for (i = 0; i < ctx->nr_user_files; i++) {
+ struct fixed_file_table *table;
+ struct file *f;
+
+ table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
+ f = table->files[i & IORING_FILE_TABLE_MASK];
+ if (f)
+ seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
+ else
+ seq_printf(m, "%5u: <none>\n", i);
+ }
+ seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
+ for (i = 0; i < ctx->nr_user_bufs; i++) {
+ struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
+
+ seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
+ (unsigned int) buf->len);
+ }
+ if (!idr_is_empty(&ctx->personality_idr)) {
+ seq_printf(m, "Personalities:\n");
+ idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
+ }
+ mutex_unlock(&ctx->uring_lock);
+}
+
+static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct io_ring_ctx *ctx = f->private_data;
+
+ if (percpu_ref_tryget(&ctx->refs)) {
+ __io_uring_show_fdinfo(ctx, m);
+ percpu_ref_put(&ctx->refs);
+ }
+}
+
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
.flush = io_uring_flush,
@@ -5205,6 +6736,7 @@ static const struct file_operations io_uring_fops = {
#endif
.poll = io_uring_poll,
.fasync = io_uring_fasync,
+ .show_fdinfo = io_uring_show_fdinfo,
};
static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
@@ -5281,7 +6813,6 @@ static int io_uring_get_fd(struct io_ring_ctx *ctx)
#if defined(CONFIG_UNIX)
ctx->ring_sock->file = file;
- ctx->ring_sock->sk->sk_user_data = ctx;
#endif
fd_install(ret, file);
return ret;
@@ -5300,8 +6831,13 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
bool account_mem;
int ret;
- if (!entries || entries > IORING_MAX_ENTRIES)
+ if (!entries)
return -EINVAL;
+ if (entries > IORING_MAX_ENTRIES) {
+ if (!(p->flags & IORING_SETUP_CLAMP))
+ return -EINVAL;
+ entries = IORING_MAX_ENTRIES;
+ }
/*
* Use twice as many entries for the CQ ring. It's possible for the
@@ -5318,8 +6854,13 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
* to a power-of-two, if it isn't already. We do NOT impose
* any cq vs sq ring sizing.
*/
- if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
+ if (p->cq_entries < p->sq_entries)
return -EINVAL;
+ if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
+ if (!(p->flags & IORING_SETUP_CLAMP))
+ return -EINVAL;
+ p->cq_entries = IORING_MAX_CQ_ENTRIES;
+ }
p->cq_entries = roundup_pow_of_two(p->cq_entries);
} else {
p->cq_entries = 2 * p->sq_entries;
@@ -5384,7 +6925,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
goto err;
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
- IORING_FEAT_SUBMIT_STABLE;
+ IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
+ IORING_FEAT_CUR_PERSONALITY;
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
@@ -5411,7 +6953,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
}
if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
- IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
+ IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
+ IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
return -EINVAL;
ret = io_uring_create(entries, &p);
@@ -5430,6 +6973,84 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
return io_uring_setup(entries, params);
}
+static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
+{
+ struct io_uring_probe *p;
+ size_t size;
+ int i, ret;
+
+ size = struct_size(p, ops, nr_args);
+ if (size == SIZE_MAX)
+ return -EOVERFLOW;
+ p = kzalloc(size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(p, arg, size))
+ goto out;
+ ret = -EINVAL;
+ if (memchr_inv(p, 0, size))
+ goto out;
+
+ p->last_op = IORING_OP_LAST - 1;
+ if (nr_args > IORING_OP_LAST)
+ nr_args = IORING_OP_LAST;
+
+ for (i = 0; i < nr_args; i++) {
+ p->ops[i].op = i;
+ if (!io_op_defs[i].not_supported)
+ p->ops[i].flags = IO_URING_OP_SUPPORTED;
+ }
+ p->ops_len = i;
+
+ ret = 0;
+ if (copy_to_user(arg, p, size))
+ ret = -EFAULT;
+out:
+ kfree(p);
+ return ret;
+}
+
+static int io_register_personality(struct io_ring_ctx *ctx)
+{
+ const struct cred *creds = get_current_cred();
+ int id;
+
+ id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
+ USHRT_MAX, GFP_KERNEL);
+ if (id < 0)
+ put_cred(creds);
+ return id;
+}
+
+static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
+{
+ const struct cred *old_creds;
+
+ old_creds = idr_remove(&ctx->personality_idr, id);
+ if (old_creds) {
+ put_cred(old_creds);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static bool io_register_op_must_quiesce(int op)
+{
+ switch (op) {
+ case IORING_UNREGISTER_FILES:
+ case IORING_REGISTER_FILES_UPDATE:
+ case IORING_REGISTER_PROBE:
+ case IORING_REGISTER_PERSONALITY:
+ case IORING_UNREGISTER_PERSONALITY:
+ return false;
+ default:
+ return true;
+ }
+}
+
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
void __user *arg, unsigned nr_args)
__releases(ctx->uring_lock)
@@ -5445,18 +7066,26 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
if (percpu_ref_is_dying(&ctx->refs))
return -ENXIO;
- percpu_ref_kill(&ctx->refs);
+ if (io_register_op_must_quiesce(opcode)) {
+ percpu_ref_kill(&ctx->refs);
- /*
- * Drop uring mutex before waiting for references to exit. If another
- * thread is currently inside io_uring_enter() it might need to grab
- * the uring_lock to make progress. If we hold it here across the drain
- * wait, then we can deadlock. It's safe to drop the mutex here, since
- * no new references will come in after we've killed the percpu ref.
- */
- mutex_unlock(&ctx->uring_lock);
- wait_for_completion(&ctx->completions[0]);
- mutex_lock(&ctx->uring_lock);
+ /*
+ * Drop uring mutex before waiting for references to exit. If
+ * another thread is currently inside io_uring_enter() it might
+ * need to grab the uring_lock to make progress. If we hold it
+ * here across the drain wait, then we can deadlock. It's safe
+ * to drop the mutex here, since no new references will come in
+ * after we've killed the percpu ref.
+ */
+ mutex_unlock(&ctx->uring_lock);
+ ret = wait_for_completion_interruptible(&ctx->completions[0]);
+ mutex_lock(&ctx->uring_lock);
+ if (ret) {
+ percpu_ref_resurrect(&ctx->refs);
+ ret = -EINTR;
+ goto out;
+ }
+ }
switch (opcode) {
case IORING_REGISTER_BUFFERS:
@@ -5481,10 +7110,17 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
ret = io_sqe_files_update(ctx, arg, nr_args);
break;
case IORING_REGISTER_EVENTFD:
+ case IORING_REGISTER_EVENTFD_ASYNC:
ret = -EINVAL;
if (nr_args != 1)
break;
ret = io_eventfd_register(ctx, arg);
+ if (ret)
+ break;
+ if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
+ ctx->eventfd_async = 1;
+ else
+ ctx->eventfd_async = 0;
break;
case IORING_UNREGISTER_EVENTFD:
ret = -EINVAL;
@@ -5492,14 +7128,35 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_eventfd_unregister(ctx);
break;
+ case IORING_REGISTER_PROBE:
+ ret = -EINVAL;
+ if (!arg || nr_args > 256)
+ break;
+ ret = io_probe(ctx, arg, nr_args);
+ break;
+ case IORING_REGISTER_PERSONALITY:
+ ret = -EINVAL;
+ if (arg || nr_args)
+ break;
+ ret = io_register_personality(ctx);
+ break;
+ case IORING_UNREGISTER_PERSONALITY:
+ ret = -EINVAL;
+ if (arg)
+ break;
+ ret = io_unregister_personality(ctx, nr_args);
+ break;
default:
ret = -EINVAL;
break;
}
- /* bring the ctx back to life */
- reinit_completion(&ctx->completions[0]);
- percpu_ref_reinit(&ctx->refs);
+ if (io_register_op_must_quiesce(opcode)) {
+ /* bring the ctx back to life */
+ percpu_ref_reinit(&ctx->refs);
+out:
+ reinit_completion(&ctx->completions[0]);
+ }
return ret;
}
@@ -5532,6 +7189,40 @@ out_fput:
static int __init io_uring_init(void)
{
+#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
+ BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
+ BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
+} while (0)
+
+#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
+ __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
+ BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
+ BUILD_BUG_SQE_ELEM(0, __u8, opcode);
+ BUILD_BUG_SQE_ELEM(1, __u8, flags);
+ BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
+ BUILD_BUG_SQE_ELEM(4, __s32, fd);
+ BUILD_BUG_SQE_ELEM(8, __u64, off);
+ BUILD_BUG_SQE_ELEM(8, __u64, addr2);
+ BUILD_BUG_SQE_ELEM(16, __u64, addr);
+ BUILD_BUG_SQE_ELEM(24, __u32, len);
+ BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
+ BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
+ BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
+ BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
+ BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
+ BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
+ BUILD_BUG_SQE_ELEM(32, __u64, user_data);
+ BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
+ BUILD_BUG_SQE_ELEM(42, __u16, personality);
+
+ BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
return 0;
};
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 2f5e4e5b97e1..282d45be6f45 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -54,19 +54,32 @@ EXPORT_SYMBOL(vfs_ioctl);
static int ioctl_fibmap(struct file *filp, int __user *p)
{
- struct address_space *mapping = filp->f_mapping;
- int res, block;
+ struct inode *inode = file_inode(filp);
+ int error, ur_block;
+ sector_t block;
- /* do we support this mess? */
- if (!mapping->a_ops->bmap)
- return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- res = get_user(block, p);
- if (res)
- return res;
- res = mapping->a_ops->bmap(mapping, block);
- return put_user(res, p);
+
+ error = get_user(ur_block, p);
+ if (error)
+ return error;
+
+ if (ur_block < 0)
+ return -EINVAL;
+
+ block = ur_block;
+ error = bmap(inode, &block);
+
+ if (error)
+ ur_block = 0;
+ else
+ ur_block = block;
+
+ if (put_user(ur_block, p))
+ error = -EFAULT;
+
+ return error;
}
/**
@@ -467,7 +480,7 @@ EXPORT_SYMBOL(generic_block_fiemap);
* Only the l_start, l_len and l_whence fields of the 'struct space_resv'
* are used here, rest are ignored.
*/
-int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
+static int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
{
struct inode *inode = file_inode(filp);
struct space_resv sr;
@@ -495,8 +508,8 @@ int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
/* on ia32 l_start is on a 32-bit boundary */
#if defined CONFIG_COMPAT && defined(CONFIG_X86_64)
/* just account for different alignment */
-int compat_ioctl_preallocate(struct file *file, int mode,
- struct space_resv_32 __user *argp)
+static int compat_ioctl_preallocate(struct file *file, int mode,
+ struct space_resv_32 __user *argp)
{
struct inode *inode = file_inode(file);
struct space_resv_32 sr;
@@ -521,17 +534,11 @@ int compat_ioctl_preallocate(struct file *file, int mode,
}
#endif
-static int file_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+static int file_ioctl(struct file *filp, unsigned int cmd, int __user *p)
{
- struct inode *inode = file_inode(filp);
- int __user *p = (int __user *)arg;
-
switch (cmd) {
case FIBMAP:
return ioctl_fibmap(filp, p);
- case FIONREAD:
- return put_user(i_size_read(inode) - filp->f_pos, p);
case FS_IOC_RESVSP:
case FS_IOC_RESVSP64:
return ioctl_preallocate(filp, 0, p);
@@ -542,7 +549,7 @@ static int file_ioctl(struct file *filp, unsigned int cmd,
return ioctl_preallocate(filp, FALLOC_FL_ZERO_RANGE, p);
}
- return vfs_ioctl(filp, cmd, arg);
+ return -ENOIOCTLCMD;
}
static int ioctl_fionbio(struct file *filp, int __user *argp)
@@ -661,53 +668,48 @@ out:
}
/*
- * When you add any new common ioctls to the switches above and below
- * please update compat_sys_ioctl() too.
- *
* do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
* It's just a simple helper for sys_ioctl and compat_sys_ioctl.
+ *
+ * When you add any new common ioctls to the switches above and below,
+ * please ensure they have compatible arguments in compat mode.
*/
-int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
- unsigned long arg)
+static int do_vfs_ioctl(struct file *filp, unsigned int fd,
+ unsigned int cmd, unsigned long arg)
{
- int error = 0;
void __user *argp = (void __user *)arg;
struct inode *inode = file_inode(filp);
switch (cmd) {
case FIOCLEX:
set_close_on_exec(fd, 1);
- break;
+ return 0;
case FIONCLEX:
set_close_on_exec(fd, 0);
- break;
+ return 0;
case FIONBIO:
- error = ioctl_fionbio(filp, argp);
- break;
+ return ioctl_fionbio(filp, argp);
case FIOASYNC:
- error = ioctl_fioasync(fd, filp, argp);
- break;
+ return ioctl_fioasync(fd, filp, argp);
case FIOQSIZE:
if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
S_ISLNK(inode->i_mode)) {
loff_t res = inode_get_bytes(inode);
- error = copy_to_user(argp, &res, sizeof(res)) ?
- -EFAULT : 0;
- } else
- error = -ENOTTY;
- break;
+ return copy_to_user(argp, &res, sizeof(res)) ?
+ -EFAULT : 0;
+ }
+
+ return -ENOTTY;
case FIFREEZE:
- error = ioctl_fsfreeze(filp);
- break;
+ return ioctl_fsfreeze(filp);
case FITHAW:
- error = ioctl_fsthaw(filp);
- break;
+ return ioctl_fsthaw(filp);
case FS_IOC_FIEMAP:
return ioctl_fiemap(filp, argp);
@@ -716,6 +718,7 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
/* anon_bdev filesystems may not have a block size */
if (!inode->i_sb->s_blocksize)
return -EINVAL;
+
return put_user(inode->i_sb->s_blocksize, (int __user *)argp);
case FICLONE:
@@ -727,26 +730,39 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
case FIDEDUPERANGE:
return ioctl_file_dedupe_range(filp, argp);
+ case FIONREAD:
+ if (!S_ISREG(inode->i_mode))
+ return vfs_ioctl(filp, cmd, arg);
+
+ return put_user(i_size_read(inode) - filp->f_pos,
+ (int __user *)argp);
+
default:
if (S_ISREG(inode->i_mode))
- error = file_ioctl(filp, cmd, arg);
- else
- error = vfs_ioctl(filp, cmd, arg);
+ return file_ioctl(filp, cmd, argp);
break;
}
- return error;
+
+ return -ENOIOCTLCMD;
}
int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- int error;
struct fd f = fdget(fd);
+ int error;
if (!f.file)
return -EBADF;
+
error = security_file_ioctl(f.file, cmd, arg);
- if (!error)
- error = do_vfs_ioctl(f.file, fd, cmd, arg);
+ if (error)
+ goto out;
+
+ error = do_vfs_ioctl(f.file, fd, cmd, arg);
+ if (error == -ENOIOCTLCMD)
+ error = vfs_ioctl(f.file, cmd, arg);
+
+out:
fdput(f);
return error;
}
@@ -788,4 +804,65 @@ long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
EXPORT_SYMBOL(compat_ptr_ioctl);
+
+COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
+ compat_ulong_t, arg)
+{
+ struct fd f = fdget(fd);
+ int error;
+
+ if (!f.file)
+ return -EBADF;
+
+ /* RED-PEN how should LSM module know it's handling 32bit? */
+ error = security_file_ioctl(f.file, cmd, arg);
+ if (error)
+ goto out;
+
+ switch (cmd) {
+ /* FICLONE takes an int argument, so don't use compat_ptr() */
+ case FICLONE:
+ error = ioctl_file_clone(f.file, arg, 0, 0, 0);
+ break;
+
+#if defined(CONFIG_X86_64)
+ /* these get messy on amd64 due to alignment differences */
+ case FS_IOC_RESVSP_32:
+ case FS_IOC_RESVSP64_32:
+ error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
+ break;
+ case FS_IOC_UNRESVSP_32:
+ case FS_IOC_UNRESVSP64_32:
+ error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
+ compat_ptr(arg));
+ break;
+ case FS_IOC_ZERO_RANGE_32:
+ error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
+ compat_ptr(arg));
+ break;
+#endif
+
+ /*
+ * everything else in do_vfs_ioctl() takes either a compatible
+ * pointer argument or no argument -- call it with a modified
+ * argument.
+ */
+ default:
+ error = do_vfs_ioctl(f.file, fd, cmd,
+ (unsigned long)compat_ptr(arg));
+ if (error != -ENOIOCTLCMD)
+ break;
+
+ if (f.file->f_op->compat_ioctl)
+ error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
+ if (error == -ENOIOCTLCMD)
+ error = -ENOTTY;
+ break;
+ }
+
+ out:
+ fdput(f);
+
+ return error;
+}
#endif
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 828444e14d09..7c84c4c027c4 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1077,24 +1077,16 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
unsigned long length;
- loff_t offset, size;
+ loff_t offset;
ssize_t ret;
lock_page(page);
- size = i_size_read(inode);
- offset = page_offset(page);
- if (page->mapping != inode->i_mapping || offset > size) {
- /* We overload EFAULT to mean page got truncated */
- ret = -EFAULT;
+ ret = page_mkwrite_check_truncate(page, inode);
+ if (ret < 0)
goto out_unlock;
- }
-
- /* page is wholly or partially inside EOF */
- if (offset > size - PAGE_SIZE)
- length = offset_in_page(size);
- else
- length = PAGE_SIZE;
+ length = ret;
+ offset = page_offset(page);
while (length > 0) {
ret = iomap_apply(inode, offset, length,
IOMAP_WRITE | IOMAP_FAULT, ops, page,
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 8fff6677a5da..96bf33986d03 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -164,7 +164,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
"journal space in %s\n", __func__,
journal->j_devname);
WARN_ON(1);
- jbd2_journal_abort(journal, 0);
+ jbd2_journal_abort(journal, -EIO);
}
write_lock(&journal->j_state_lock);
} else {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 7f0b362b3842..27373f5792a4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -782,7 +782,7 @@ start_journal_io:
err = journal_submit_commit_record(journal, commit_transaction,
&cbh, crc32_sum);
if (err)
- __jbd2_journal_abort_hard(journal);
+ jbd2_journal_abort(journal, err);
}
blk_finish_plug(&plug);
@@ -875,7 +875,7 @@ start_journal_io:
err = journal_submit_commit_record(journal, commit_transaction,
&cbh, crc32_sum);
if (err)
- __jbd2_journal_abort_hard(journal);
+ jbd2_journal_abort(journal, err);
}
if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
@@ -976,29 +976,33 @@ restart_loop:
* it. */
/*
- * A buffer which has been freed while still being journaled by
- * a previous transaction.
- */
- if (buffer_freed(bh)) {
+ * A buffer which has been freed while still being journaled
+ * by a previous transaction, refile the buffer to BJ_Forget of
+ * the running transaction. If the just committed transaction
+ * contains "add to orphan" operation, we can completely
+ * invalidate the buffer now. We are rather through in that
+ * since the buffer may be still accessible when blocksize <
+ * pagesize and it is attached to the last partial page.
+ */
+ if (buffer_freed(bh) && !jh->b_next_transaction) {
+ struct address_space *mapping;
+
+ clear_buffer_freed(bh);
+ clear_buffer_jbddirty(bh);
+
/*
- * If the running transaction is the one containing
- * "add to orphan" operation (b_next_transaction !=
- * NULL), we have to wait for that transaction to
- * commit before we can really get rid of the buffer.
- * So just clear b_modified to not confuse transaction
- * credit accounting and refile the buffer to
- * BJ_Forget of the running transaction. If the just
- * committed transaction contains "add to orphan"
- * operation, we can completely invalidate the buffer
- * now. We are rather through in that since the
- * buffer may be still accessible when blocksize <
- * pagesize and it is attached to the last partial
- * page.
+ * Block device buffers need to stay mapped all the
+ * time, so it is enough to clear buffer_jbddirty and
+ * buffer_freed bits. For the file mapping buffers (i.e.
+ * journalled data) we need to unmap buffer and clear
+ * more bits. We also need to be careful about the check
+ * because the data page mapping can get cleared under
+ * out hands, which alse need not to clear more bits
+ * because the page and buffers will be freed and can
+ * never be reused once we are done with them.
*/
- jh->b_modified = 0;
- if (!jh->b_next_transaction) {
- clear_buffer_freed(bh);
- clear_buffer_jbddirty(bh);
+ mapping = READ_ONCE(bh->b_page->mapping);
+ if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
clear_buffer_mapped(bh);
clear_buffer_new(bh);
clear_buffer_req(bh);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 5e408ee24a1a..a49d0e670ddf 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -96,7 +96,6 @@ EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
EXPORT_SYMBOL(jbd2_inode_cache);
-static void __journal_abort_soft (journal_t *journal, int errno);
static int jbd2_journal_create_slab(size_t slab_size);
#ifdef CONFIG_JBD2_DEBUG
@@ -795,18 +794,22 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
{
int err = 0;
unsigned long long ret;
+ sector_t block = 0;
if (journal->j_inode) {
- ret = bmap(journal->j_inode, blocknr);
- if (ret)
- *retp = ret;
- else {
+ block = blocknr;
+ ret = bmap(journal->j_inode, &block);
+
+ if (ret || !block) {
printk(KERN_ALERT "%s: journal block not found "
"at offset %lu on %s\n",
__func__, blocknr, journal->j_devname);
err = -EIO;
- __journal_abort_soft(journal, err);
+ jbd2_journal_abort(journal, err);
+ } else {
+ *retp = block;
}
+
} else {
*retp = blocknr; /* +journal->j_blk_offset */
}
@@ -982,6 +985,7 @@ static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos)
static void *jbd2_seq_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
+ (*pos)++;
return NULL;
}
@@ -1074,12 +1078,11 @@ static int jbd2_seq_info_release(struct inode *inode, struct file *file)
return seq_release(inode, file);
}
-static const struct file_operations jbd2_seq_info_fops = {
- .owner = THIS_MODULE,
- .open = jbd2_seq_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = jbd2_seq_info_release,
+static const struct proc_ops jbd2_info_proc_ops = {
+ .proc_open = jbd2_seq_info_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = jbd2_seq_info_release,
};
static struct proc_dir_entry *proc_jbd2_stats;
@@ -1089,7 +1092,7 @@ static void jbd2_stats_proc_init(journal_t *journal)
journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats);
if (journal->j_proc_entry) {
proc_create_data("info", S_IRUGO, journal->j_proc_entry,
- &jbd2_seq_info_fops, journal);
+ &jbd2_info_proc_ops, journal);
}
}
@@ -1244,11 +1247,14 @@ journal_t *jbd2_journal_init_dev(struct block_device *bdev,
journal_t *jbd2_journal_init_inode(struct inode *inode)
{
journal_t *journal;
+ sector_t blocknr;
char *p;
- unsigned long long blocknr;
+ int err = 0;
+
+ blocknr = 0;
+ err = bmap(inode, &blocknr);
- blocknr = bmap(inode, 0);
- if (!blocknr) {
+ if (err || !blocknr) {
pr_err("%s: Cannot locate journal superblock\n",
__func__);
return NULL;
@@ -1710,6 +1716,11 @@ int jbd2_journal_load(journal_t *journal)
journal->j_devname);
return -EFSCORRUPTED;
}
+ /*
+ * clear JBD2_ABORT flag initialized in journal_init_common
+ * here to update log tail information with the newest seq.
+ */
+ journal->j_flags &= ~JBD2_ABORT;
/* OK, we've finished with the dynamic journal bits:
* reinitialise the dynamic contents of the superblock in memory
@@ -1717,7 +1728,6 @@ int jbd2_journal_load(journal_t *journal)
if (journal_reset(journal))
goto recovery_error;
- journal->j_flags &= ~JBD2_ABORT;
journal->j_flags |= JBD2_LOADED;
return 0;
@@ -2098,67 +2108,6 @@ int jbd2_journal_wipe(journal_t *journal, int write)
return err;
}
-/*
- * Journal abort has very specific semantics, which we describe
- * for journal abort.
- *
- * Two internal functions, which provide abort to the jbd layer
- * itself are here.
- */
-
-/*
- * Quick version for internal journal use (doesn't lock the journal).
- * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
- * and don't attempt to make any other journal updates.
- */
-void __jbd2_journal_abort_hard(journal_t *journal)
-{
- transaction_t *transaction;
-
- if (journal->j_flags & JBD2_ABORT)
- return;
-
- printk(KERN_ERR "Aborting journal on device %s.\n",
- journal->j_devname);
-
- write_lock(&journal->j_state_lock);
- journal->j_flags |= JBD2_ABORT;
- transaction = journal->j_running_transaction;
- if (transaction)
- __jbd2_log_start_commit(journal, transaction->t_tid);
- write_unlock(&journal->j_state_lock);
-}
-
-/* Soft abort: record the abort error status in the journal superblock,
- * but don't do any other IO. */
-static void __journal_abort_soft (journal_t *journal, int errno)
-{
- int old_errno;
-
- write_lock(&journal->j_state_lock);
- old_errno = journal->j_errno;
- if (!journal->j_errno || errno == -ESHUTDOWN)
- journal->j_errno = errno;
-
- if (journal->j_flags & JBD2_ABORT) {
- write_unlock(&journal->j_state_lock);
- if (!old_errno && old_errno != -ESHUTDOWN &&
- errno == -ESHUTDOWN)
- jbd2_journal_update_sb_errno(journal);
- return;
- }
- write_unlock(&journal->j_state_lock);
-
- __jbd2_journal_abort_hard(journal);
-
- if (errno) {
- jbd2_journal_update_sb_errno(journal);
- write_lock(&journal->j_state_lock);
- journal->j_flags |= JBD2_REC_ERR;
- write_unlock(&journal->j_state_lock);
- }
-}
-
/**
* void jbd2_journal_abort () - Shutdown the journal immediately.
* @journal: the journal to shutdown.
@@ -2198,16 +2147,51 @@ static void __journal_abort_soft (journal_t *journal, int errno)
* failure to disk. ext3_error, for example, now uses this
* functionality.
*
- * Errors which originate from within the journaling layer will NOT
- * supply an errno; a null errno implies that absolutely no further
- * writes are done to the journal (unless there are any already in
- * progress).
- *
*/
void jbd2_journal_abort(journal_t *journal, int errno)
{
- __journal_abort_soft(journal, errno);
+ transaction_t *transaction;
+
+ /*
+ * ESHUTDOWN always takes precedence because a file system check
+ * caused by any other journal abort error is not required after
+ * a shutdown triggered.
+ */
+ write_lock(&journal->j_state_lock);
+ if (journal->j_flags & JBD2_ABORT) {
+ int old_errno = journal->j_errno;
+
+ write_unlock(&journal->j_state_lock);
+ if (old_errno != -ESHUTDOWN && errno == -ESHUTDOWN) {
+ journal->j_errno = errno;
+ jbd2_journal_update_sb_errno(journal);
+ }
+ return;
+ }
+
+ /*
+ * Mark the abort as occurred and start current running transaction
+ * to release all journaled buffer.
+ */
+ pr_err("Aborting journal on device %s.\n", journal->j_devname);
+
+ journal->j_flags |= JBD2_ABORT;
+ journal->j_errno = errno;
+ transaction = journal->j_running_transaction;
+ if (transaction)
+ __jbd2_log_start_commit(journal, transaction->t_tid);
+ write_unlock(&journal->j_state_lock);
+
+ /*
+ * Record errno to the journal super block, so that fsck and jbd2
+ * layer could realise that a filesystem check is needed.
+ */
+ jbd2_journal_update_sb_errno(journal);
+
+ write_lock(&journal->j_state_lock);
+ journal->j_flags |= JBD2_REC_ERR;
+ write_unlock(&journal->j_state_lock);
}
/**
@@ -2556,7 +2540,6 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
{
struct journal_head *jh = bh2jh(bh);
- J_ASSERT_JH(jh, jh->b_jcount >= 0);
J_ASSERT_JH(jh, jh->b_transaction == NULL);
J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 27b9f9dee434..2dd848a743ed 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(jbd2__journal_start);
* modified buffers in the log. We block until the log can guarantee
* that much space. Additionally, if rsv_blocks > 0, we also create another
* handle with rsv_blocks reserved blocks in the journal. This handle is
- * is stored in h_rsv_handle. It is not attached to any particular transaction
+ * stored in h_rsv_handle. It is not attached to any particular transaction
* and thus doesn't block transaction commit. If the caller uses this reserved
* handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
* on the parent handle will dispose the reserved one. Reserved handle has to
@@ -1595,7 +1595,7 @@ out:
* Allow this call even if the handle has aborted --- it may be part of
* the caller's cleanup after an abort.
*/
-int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
+int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal;
@@ -2329,14 +2329,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
return -EBUSY;
}
/*
- * OK, buffer won't be reachable after truncate. We just set
- * j_next_transaction to the running transaction (if there is
- * one) and mark buffer as freed so that commit code knows it
- * should clear dirty bits when it is done with the buffer.
+ * OK, buffer won't be reachable after truncate. We just clear
+ * b_modified to not confuse transaction credit accounting, and
+ * set j_next_transaction to the running transaction (if there
+ * is one) and mark buffer as freed so that commit code knows
+ * it should clear dirty bits when it is done with the buffer.
*/
set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction;
+ jh->b_modified = 0;
spin_unlock(&journal->j_list_lock);
spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 0e6406c4f362..05d7878dfad1 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -167,27 +167,21 @@ enum {
Opt_rp_size,
};
-static const struct fs_parameter_spec jffs2_param_specs[] = {
- fsparam_enum ("compr", Opt_override_compr),
- fsparam_u32 ("rp_size", Opt_rp_size),
- {}
-};
-
-static const struct fs_parameter_enum jffs2_param_enums[] = {
- { Opt_override_compr, "none", JFFS2_COMPR_MODE_NONE },
+static const struct constant_table jffs2_param_compr[] = {
+ {"none", JFFS2_COMPR_MODE_NONE },
#ifdef CONFIG_JFFS2_LZO
- { Opt_override_compr, "lzo", JFFS2_COMPR_MODE_FORCELZO },
+ {"lzo", JFFS2_COMPR_MODE_FORCELZO },
#endif
#ifdef CONFIG_JFFS2_ZLIB
- { Opt_override_compr, "zlib", JFFS2_COMPR_MODE_FORCEZLIB },
+ {"zlib", JFFS2_COMPR_MODE_FORCEZLIB },
#endif
{}
};
-const struct fs_parameter_description jffs2_fs_parameters = {
- .name = "jffs2",
- .specs = jffs2_param_specs,
- .enums = jffs2_param_enums,
+static const struct fs_parameter_spec jffs2_fs_parameters[] = {
+ fsparam_enum ("compr", Opt_override_compr, jffs2_param_compr),
+ fsparam_u32 ("rp_size", Opt_rp_size),
+ {}
};
static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
@@ -196,7 +190,7 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
struct jffs2_sb_info *c = fc->s_fs_info;
int opt;
- opt = fs_parse(fc, &jffs2_fs_parameters, param, &result);
+ opt = fs_parse(fc, jffs2_fs_parameters, param, &result);
if (opt < 0)
return opt;
@@ -339,7 +333,7 @@ static struct file_system_type jffs2_fs_type = {
.owner = THIS_MODULE,
.name = "jffs2",
.init_fs_context = jffs2_init_fs_context,
- .parameters = &jffs2_fs_parameters,
+ .parameters = jffs2_fs_parameters,
.kill_sb = jffs2_kill_sb,
};
MODULE_ALIAS_FS("jffs2");
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c
index 888cdd685a1e..44b62b3c322e 100644
--- a/fs/jfs/jfs_debug.c
+++ b/fs/jfs/jfs_debug.c
@@ -43,12 +43,12 @@ static ssize_t jfs_loglevel_proc_write(struct file *file,
return count;
}
-static const struct file_operations jfs_loglevel_proc_fops = {
- .open = jfs_loglevel_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = jfs_loglevel_proc_write,
+static const struct proc_ops jfs_loglevel_proc_ops = {
+ .proc_open = jfs_loglevel_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = jfs_loglevel_proc_write,
};
#endif
@@ -68,7 +68,7 @@ void jfs_proc_init(void)
#endif
#ifdef CONFIG_JFS_DEBUG
proc_create_single("TxAnchor", 0, base, jfs_txanchor_proc_show);
- proc_create("loglevel", 0, base, &jfs_loglevel_proc_fops);
+ proc_create("loglevel", 0, base, &jfs_loglevel_proc_ops);
#endif
}
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index caade185e568..7dfcab2a2da6 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -4027,7 +4027,6 @@ static int dbGetL2AGSize(s64 nblocks)
*/
#define MAXL0PAGES (1 + LPERCTL)
#define MAXL1PAGES (1 + LPERCTL * MAXL0PAGES)
-#define MAXL2PAGES (1 + LPERCTL * MAXL1PAGES)
/*
* convert number of map pages to the zero origin top dmapctl level
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 9d96e6871e1a..9aec80b9d7c6 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1266,7 +1266,7 @@ void kernfs_activate(struct kernfs_node *kn)
pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) {
- if (!pos || (pos->flags & KERNFS_ACTIVATED))
+ if (pos->flags & KERNFS_ACTIVATED)
continue;
WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index eac277c63d42..d0f7a5abd9a9 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -160,9 +160,9 @@ static inline void set_inode_attr(struct inode *inode,
{
inode->i_uid = attrs->ia_uid;
inode->i_gid = attrs->ia_gid;
- inode->i_atime = timestamp_truncate(attrs->ia_atime, inode);
- inode->i_mtime = timestamp_truncate(attrs->ia_mtime, inode);
- inode->i_ctime = timestamp_truncate(attrs->ia_ctime, inode);
+ inode->i_atime = attrs->ia_atime;
+ inode->i_mtime = attrs->ia_mtime;
+ inode->i_ctime = attrs->ia_ctime;
}
static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
diff --git a/fs/libfs.c b/fs/libfs.c
index 1463b038ffc4..c686bd9caac6 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -19,6 +19,7 @@
#include <linux/buffer_head.h> /* sync_mapping_buffers */
#include <linux/fs_context.h>
#include <linux/pseudo_fs.h>
+#include <linux/fsnotify.h>
#include <linux/uaccess.h>
@@ -239,6 +240,75 @@ const struct inode_operations simple_dir_inode_operations = {
};
EXPORT_SYMBOL(simple_dir_inode_operations);
+static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
+{
+ struct dentry *child = NULL;
+ struct list_head *p = prev ? &prev->d_child : &parent->d_subdirs;
+
+ spin_lock(&parent->d_lock);
+ while ((p = p->next) != &parent->d_subdirs) {
+ struct dentry *d = container_of(p, struct dentry, d_child);
+ if (simple_positive(d)) {
+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(d))
+ child = dget_dlock(d);
+ spin_unlock(&d->d_lock);
+ if (likely(child))
+ break;
+ }
+ }
+ spin_unlock(&parent->d_lock);
+ dput(prev);
+ return child;
+}
+
+void simple_recursive_removal(struct dentry *dentry,
+ void (*callback)(struct dentry *))
+{
+ struct dentry *this = dget(dentry);
+ while (true) {
+ struct dentry *victim = NULL, *child;
+ struct inode *inode = this->d_inode;
+
+ inode_lock(inode);
+ if (d_is_dir(this))
+ inode->i_flags |= S_DEAD;
+ while ((child = find_next_child(this, victim)) == NULL) {
+ // kill and ascend
+ // update metadata while it's still locked
+ inode->i_ctime = current_time(inode);
+ clear_nlink(inode);
+ inode_unlock(inode);
+ victim = this;
+ this = this->d_parent;
+ inode = this->d_inode;
+ inode_lock(inode);
+ if (simple_positive(victim)) {
+ d_invalidate(victim); // avoid lost mounts
+ if (d_is_dir(victim))
+ fsnotify_rmdir(inode, victim);
+ else
+ fsnotify_unlink(inode, victim);
+ if (callback)
+ callback(victim);
+ dput(victim); // unpin it
+ }
+ if (victim == dentry) {
+ inode->i_ctime = inode->i_mtime =
+ current_time(inode);
+ if (d_is_dir(dentry))
+ drop_nlink(inode);
+ inode_unlock(inode);
+ dput(dentry);
+ return;
+ }
+ }
+ inode_unlock(inode);
+ this = child;
+ }
+}
+EXPORT_SYMBOL(simple_recursive_removal);
+
static const struct super_operations simple_super_operations = {
.statfs = simple_statfs,
};
diff --git a/fs/lockd/procfs.c b/fs/lockd/procfs.c
index ca9228a56d65..a01f08c8c2f3 100644
--- a/fs/lockd/procfs.c
+++ b/fs/lockd/procfs.c
@@ -60,11 +60,11 @@ nlm_end_grace_read(struct file *file, char __user *buf, size_t size,
return simple_read_from_buffer(buf, size, pos, resp, sizeof(resp));
}
-static const struct file_operations lockd_end_grace_operations = {
- .write = nlm_end_grace_write,
- .read = nlm_end_grace_read,
- .llseek = default_llseek,
- .release = simple_transaction_release,
+static const struct proc_ops lockd_end_grace_proc_ops = {
+ .proc_write = nlm_end_grace_write,
+ .proc_read = nlm_end_grace_read,
+ .proc_lseek = default_llseek,
+ .proc_release = simple_transaction_release,
};
int __init
@@ -76,7 +76,7 @@ lockd_create_procfs(void)
if (!entry)
return -ENOMEM;
entry = proc_create("nlm_end_grace", S_IRUGO|S_IWUSR, entry,
- &lockd_end_grace_operations);
+ &lockd_end_grace_proc_ops);
if (!entry) {
remove_proc_entry("fs/lockd", NULL);
return -ENOMEM;
diff --git a/fs/namei.c b/fs/namei.c
index d2720dc71d0e..db6565c99825 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -491,7 +491,7 @@ struct nameidata {
struct path root;
struct inode *inode; /* path.dentry.d_inode */
unsigned int flags;
- unsigned seq, m_seq;
+ unsigned seq, m_seq, r_seq;
int last_type;
unsigned depth;
int total_link_count;
@@ -641,6 +641,14 @@ static bool legitimize_links(struct nameidata *nd)
static bool legitimize_root(struct nameidata *nd)
{
+ /*
+ * For scoped-lookups (where nd->root has been zeroed), we need to
+ * restart the whole lookup from scratch -- because set_root() is wrong
+ * for these lookups (nd->dfd is the root, not the filesystem root).
+ */
+ if (!nd->root.mnt && (nd->flags & LOOKUP_IS_SCOPED))
+ return false;
+ /* Nothing to do if nd->root is zero or is managed by the VFS user. */
if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT))
return true;
nd->flags |= LOOKUP_ROOT_GRABBED;
@@ -776,12 +784,37 @@ static int complete_walk(struct nameidata *nd)
int status;
if (nd->flags & LOOKUP_RCU) {
- if (!(nd->flags & LOOKUP_ROOT))
+ /*
+ * We don't want to zero nd->root for scoped-lookups or
+ * externally-managed nd->root.
+ */
+ if (!(nd->flags & (LOOKUP_ROOT | LOOKUP_IS_SCOPED)))
nd->root.mnt = NULL;
if (unlikely(unlazy_walk(nd)))
return -ECHILD;
}
+ if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
+ /*
+ * While the guarantee of LOOKUP_IS_SCOPED is (roughly) "don't
+ * ever step outside the root during lookup" and should already
+ * be guaranteed by the rest of namei, we want to avoid a namei
+ * BUG resulting in userspace being given a path that was not
+ * scoped within the root at some point during the lookup.
+ *
+ * So, do a final sanity-check to make sure that in the
+ * worst-case scenario (a complete bypass of LOOKUP_IS_SCOPED)
+ * we won't silently return an fd completely outside of the
+ * requested root to userspace.
+ *
+ * Userspace could move the path outside the root after this
+ * check, but as discussed elsewhere this is not a concern (the
+ * resolved file was inside the root at some point).
+ */
+ if (!path_is_under(&nd->path, &nd->root))
+ return -EXDEV;
+ }
+
if (likely(!(nd->flags & LOOKUP_JUMPED)))
return 0;
@@ -798,10 +831,18 @@ static int complete_walk(struct nameidata *nd)
return status;
}
-static void set_root(struct nameidata *nd)
+static int set_root(struct nameidata *nd)
{
struct fs_struct *fs = current->fs;
+ /*
+ * Jumping to the real root in a scoped-lookup is a BUG in namei, but we
+ * still have to ensure it doesn't happen because it will cause a breakout
+ * from the dirfd.
+ */
+ if (WARN_ON(nd->flags & LOOKUP_IS_SCOPED))
+ return -ENOTRECOVERABLE;
+
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
@@ -814,6 +855,7 @@ static void set_root(struct nameidata *nd)
get_fs_root(fs, &nd->root);
nd->flags |= LOOKUP_ROOT_GRABBED;
}
+ return 0;
}
static void path_put_conditional(struct path *path, struct nameidata *nd)
@@ -837,6 +879,18 @@ static inline void path_to_nameidata(const struct path *path,
static int nd_jump_root(struct nameidata *nd)
{
+ if (unlikely(nd->flags & LOOKUP_BENEATH))
+ return -EXDEV;
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
+ /* Absolute path arguments to path_init() are allowed. */
+ if (nd->path.mnt != NULL && nd->path.mnt != nd->root.mnt)
+ return -EXDEV;
+ }
+ if (!nd->root.mnt) {
+ int error = set_root(nd);
+ if (error)
+ return error;
+ }
if (nd->flags & LOOKUP_RCU) {
struct dentry *d;
nd->path = nd->root;
@@ -859,14 +913,32 @@ static int nd_jump_root(struct nameidata *nd)
* Helper to directly jump to a known parsed path from ->get_link,
* caller must have taken a reference to path beforehand.
*/
-void nd_jump_link(struct path *path)
+int nd_jump_link(struct path *path)
{
+ int error = -ELOOP;
struct nameidata *nd = current->nameidata;
- path_put(&nd->path);
+ if (unlikely(nd->flags & LOOKUP_NO_MAGICLINKS))
+ goto err;
+
+ error = -EXDEV;
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV)) {
+ if (nd->path.mnt != path->mnt)
+ goto err;
+ }
+ /* Not currently safe for scoped-lookups. */
+ if (unlikely(nd->flags & LOOKUP_IS_SCOPED))
+ goto err;
+
+ path_put(&nd->path);
nd->path = *path;
nd->inode = nd->path.dentry->d_inode;
nd->flags |= LOOKUP_JUMPED;
+ return 0;
+
+err:
+ path_put(path);
+ return error;
}
static inline void put_link(struct nameidata *nd)
@@ -1001,7 +1073,8 @@ static int may_linkat(struct path *link)
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
- * @dir: the sticky parent directory
+ * @dir_mode: mode bits of directory
+ * @dir_uid: owner of directory
* @inode: the inode of the file to open
*
* Block an O_CREAT open of a FIFO (or a regular file) when:
@@ -1017,18 +1090,18 @@ static int may_linkat(struct path *link)
*
* Returns 0 if the open is allowed, -ve on error.
*/
-static int may_create_in_sticky(struct dentry * const dir,
+static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
struct inode * const inode)
{
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
- likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
- uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
+ likely(!(dir_mode & S_ISVTX)) ||
+ uid_eq(inode->i_uid, dir_uid) ||
uid_eq(current_fsuid(), inode->i_uid))
return 0;
- if (likely(dir->d_inode->i_mode & 0002) ||
- (dir->d_inode->i_mode & 0020 &&
+ if (likely(dir_mode & 0002) ||
+ (dir_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
const char *operation = S_ISFIFO(inode->i_mode) ?
@@ -1049,6 +1122,9 @@ const char *get_link(struct nameidata *nd)
int error;
const char *res;
+ if (unlikely(nd->flags & LOOKUP_NO_SYMLINKS))
+ return ERR_PTR(-ELOOP);
+
if (!(nd->flags & LOOKUP_RCU)) {
touch_atime(&last->link);
cond_resched();
@@ -1083,10 +1159,9 @@ const char *get_link(struct nameidata *nd)
return res;
}
if (*res == '/') {
- if (!nd->root.mnt)
- set_root(nd);
- if (unlikely(nd_jump_root(nd)))
- return ERR_PTR(-ECHILD);
+ error = nd_jump_root(nd);
+ if (unlikely(error))
+ return ERR_PTR(error);
while (unlikely(*++res == '/'))
;
}
@@ -1268,10 +1343,14 @@ static int follow_managed(struct path *path, struct nameidata *nd)
break;
}
- if (need_mntput && path->mnt == mnt)
- mntput(path->mnt);
- if (need_mntput)
- nd->flags |= LOOKUP_JUMPED;
+ if (need_mntput) {
+ if (path->mnt == mnt)
+ mntput(path->mnt);
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+ ret = -EXDEV;
+ else
+ nd->flags |= LOOKUP_JUMPED;
+ }
if (ret == -EISDIR || !ret)
ret = 1;
if (ret > 0 && unlikely(d_flags_negative(flags)))
@@ -1332,6 +1411,8 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
mounted = __lookup_mnt(path->mnt, path->dentry);
if (!mounted)
break;
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+ return false;
path->mnt = &mounted->mnt;
path->dentry = mounted->mnt.mnt_root;
nd->flags |= LOOKUP_JUMPED;
@@ -1352,8 +1433,11 @@ static int follow_dotdot_rcu(struct nameidata *nd)
struct inode *inode = nd->inode;
while (1) {
- if (path_equal(&nd->path, &nd->root))
+ if (path_equal(&nd->path, &nd->root)) {
+ if (unlikely(nd->flags & LOOKUP_BENEATH))
+ return -ECHILD;
break;
+ }
if (nd->path.dentry != nd->path.mnt->mnt_root) {
struct dentry *old = nd->path.dentry;
struct dentry *parent = old->d_parent;
@@ -1366,7 +1450,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
nd->path.dentry = parent;
nd->seq = seq;
if (unlikely(!path_connected(&nd->path)))
- return -ENOENT;
+ return -ECHILD;
break;
} else {
struct mount *mnt = real_mount(nd->path.mnt);
@@ -1378,6 +1462,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
return -ECHILD;
if (&mparent->mnt == nd->path.mnt)
break;
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+ return -ECHILD;
/* we know that mountpoint was pinned */
nd->path.dentry = mountpoint;
nd->path.mnt = &mparent->mnt;
@@ -1392,6 +1478,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
return -ECHILD;
if (!mounted)
break;
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+ return -ECHILD;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
inode = nd->path.dentry->d_inode;
@@ -1479,9 +1567,12 @@ static int path_parent_directory(struct path *path)
static int follow_dotdot(struct nameidata *nd)
{
- while(1) {
- if (path_equal(&nd->path, &nd->root))
+ while (1) {
+ if (path_equal(&nd->path, &nd->root)) {
+ if (unlikely(nd->flags & LOOKUP_BENEATH))
+ return -EXDEV;
break;
+ }
if (nd->path.dentry != nd->path.mnt->mnt_root) {
int ret = path_parent_directory(&nd->path);
if (ret)
@@ -1490,6 +1581,8 @@ static int follow_dotdot(struct nameidata *nd)
}
if (!follow_up(&nd->path))
break;
+ if (unlikely(nd->flags & LOOKUP_NO_XDEV))
+ return -EXDEV;
}
follow_mount(&nd->path);
nd->inode = nd->path.dentry->d_inode;
@@ -1698,12 +1791,33 @@ static inline int may_lookup(struct nameidata *nd)
static inline int handle_dots(struct nameidata *nd, int type)
{
if (type == LAST_DOTDOT) {
- if (!nd->root.mnt)
- set_root(nd);
- if (nd->flags & LOOKUP_RCU) {
- return follow_dotdot_rcu(nd);
- } else
- return follow_dotdot(nd);
+ int error = 0;
+
+ if (!nd->root.mnt) {
+ error = set_root(nd);
+ if (error)
+ return error;
+ }
+ if (nd->flags & LOOKUP_RCU)
+ error = follow_dotdot_rcu(nd);
+ else
+ error = follow_dotdot(nd);
+ if (error)
+ return error;
+
+ if (unlikely(nd->flags & LOOKUP_IS_SCOPED)) {
+ /*
+ * If there was a racing rename or mount along our
+ * path, then we can't be sure that ".." hasn't jumped
+ * above nd->root (and so userspace should retry or use
+ * some fallback).
+ */
+ smp_rmb();
+ if (unlikely(__read_seqcount_retry(&mount_lock.seqcount, nd->m_seq)))
+ return -EAGAIN;
+ if (unlikely(__read_seqcount_retry(&rename_lock.seqcount, nd->r_seq)))
+ return -EAGAIN;
+ }
}
return 0;
}
@@ -2157,6 +2271,7 @@ OK:
/* must be paired with terminate_walk() */
static const char *path_init(struct nameidata *nd, unsigned flags)
{
+ int error;
const char *s = nd->name->name;
if (!*s)
@@ -2167,6 +2282,11 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
+
+ nd->m_seq = __read_seqcount_begin(&mount_lock.seqcount);
+ nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount);
+ smp_rmb();
+
if (flags & LOOKUP_ROOT) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
@@ -2175,9 +2295,8 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
- nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+ nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
nd->root_seq = nd->seq;
- nd->m_seq = read_seqbegin(&mount_lock);
} else {
path_get(&nd->path);
}
@@ -2188,13 +2307,16 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->path.mnt = NULL;
nd->path.dentry = NULL;
- nd->m_seq = read_seqbegin(&mount_lock);
- if (*s == '/') {
- set_root(nd);
- if (likely(!nd_jump_root(nd)))
- return s;
- return ERR_PTR(-ECHILD);
- } else if (nd->dfd == AT_FDCWD) {
+ /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
+ if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
+ error = nd_jump_root(nd);
+ if (unlikely(error))
+ return ERR_PTR(error);
+ return s;
+ }
+
+ /* Relative pathname -- get the starting-point it is relative to. */
+ if (nd->dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
@@ -2209,7 +2331,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
get_fs_pwd(current->fs, &nd->path);
nd->inode = nd->path.dentry->d_inode;
}
- return s;
} else {
/* Caller must check execute permissions on the starting path component */
struct fd f = fdget_raw(nd->dfd);
@@ -2234,8 +2355,19 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->inode = nd->path.dentry->d_inode;
}
fdput(f);
- return s;
}
+
+ /* For scoped-lookups we need to set the root to the dirfd as well. */
+ if (flags & LOOKUP_IS_SCOPED) {
+ nd->root = nd->path;
+ if (flags & LOOKUP_RCU) {
+ nd->root_seq = nd->seq;
+ } else {
+ path_get(&nd->root);
+ nd->flags |= LOOKUP_ROOT_GRABBED;
+ }
+ }
+ return s;
}
static const char *trailing_symlink(struct nameidata *nd)
@@ -3201,6 +3333,8 @@ static int do_last(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
+ kuid_t dir_uid = nd->inode->i_uid;
+ umode_t dir_mode = nd->inode->i_mode;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
@@ -3331,7 +3465,7 @@ finish_open:
error = -EISDIR;
if (d_is_dir(nd->path.dentry))
goto out;
- error = may_create_in_sticky(dir,
+ error = may_create_in_sticky(dir_mode, dir_uid,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
goto out;
diff --git a/fs/namespace.c b/fs/namespace.c
index 5e1bf611a9eb..85b5f7bea82e 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2979,39 +2979,10 @@ static void shrink_submounts(struct mount *mnt)
}
}
-/*
- * Some copy_from_user() implementations do not return the exact number of
- * bytes remaining to copy on a fault. But copy_mount_options() requires that.
- * Note that this function differs from copy_from_user() in that it will oops
- * on bad values of `to', rather than returning a short copy.
- */
-static long exact_copy_from_user(void *to, const void __user * from,
- unsigned long n)
-{
- char *t = to;
- const char __user *f = from;
- char c;
-
- if (!access_ok(from, n))
- return n;
-
- while (n) {
- if (__get_user(c, f)) {
- memset(t, 0, n);
- break;
- }
- *t++ = c;
- f++;
- n--;
- }
- return n;
-}
-
void *copy_mount_options(const void __user * data)
{
- int i;
- unsigned long size;
char *copy;
+ unsigned size;
if (!data)
return NULL;
@@ -3020,22 +2991,16 @@ void *copy_mount_options(const void __user * data)
if (!copy)
return ERR_PTR(-ENOMEM);
- /* We only care that *some* data at the address the user
- * gave us is valid. Just in case, we'll zero
- * the remainder of the page.
- */
- /* copy_from_user cannot cross TASK_SIZE ! */
- size = TASK_SIZE - (unsigned long)untagged_addr(data);
- if (size > PAGE_SIZE)
- size = PAGE_SIZE;
+ size = PAGE_SIZE - offset_in_page(data);
- i = size - exact_copy_from_user(copy, data, size);
- if (!i) {
+ if (copy_from_user(copy, data, size)) {
kfree(copy);
return ERR_PTR(-EFAULT);
}
- if (i != PAGE_SIZE)
- memset(copy + i, 0, PAGE_SIZE - i);
+ if (size != PAGE_SIZE) {
+ if (copy_from_user(copy + size, data + size, PAGE_SIZE - size))
+ memset(copy + size, 0, PAGE_SIZE - size);
+ }
return copy;
}
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 295a7a21b774..40b6c5ac46c0 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -90,7 +90,7 @@ config NFS_V4
config NFS_SWAP
bool "Provide swap over NFS support"
default n
- depends on NFS_FS
+ depends on NFS_FS && SWAP
select SUNRPC_SWAP
help
This option enables swapon to work on files located on NFS mounts.
@@ -196,3 +196,12 @@ config NFS_DEBUG
depends on NFS_FS && SUNRPC_DEBUG
select CRC32
default y
+
+config NFS_DISABLE_UDP_SUPPORT
+ bool "NFS: Disable NFS UDP protocol support"
+ depends on NFS_FS
+ default y
+ help
+ Choose Y here to disable the use of NFS over UDP. NFS over UDP
+ on modern networks (1Gb+) can lead to data corruption caused by
+ fragmentation during high loads.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 34cdeaecccf6..2433c3e03cfa 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -9,7 +9,7 @@ CFLAGS_nfstrace.o += -I$(src)
nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
io.o direct.o pagelist.o read.o symlink.o unlink.o \
write.o namespace.o mount_clnt.o nfstrace.o \
- export.o sysfs.o
+ export.o sysfs.o fs_context.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 03a20f5716c7..79ff172eb1c8 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -18,6 +18,7 @@
#include "callback.h"
#include "internal.h"
#include "nfs4session.h"
+#include "nfs4trace.h"
#define CB_OP_TAGLEN_MAXSZ (512)
#define CB_OP_HDR_RES_MAXSZ (2 * 4) // opcode, status
@@ -946,9 +947,13 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
if (hdr_arg.minorversion == 0) {
cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
- if (cps.clp)
- nfs_put_client(cps.clp);
+ if (!cps.clp) {
+ trace_nfs_cb_no_clp(rqstp->rq_xid, hdr_arg.cb_ident);
+ goto out_invalidcred;
+ }
+ if (!check_gss_callback_principal(cps.clp, rqstp)) {
+ trace_nfs_cb_badprinc(rqstp->rq_xid, hdr_arg.cb_ident);
+ nfs_put_client(cps.clp);
goto out_invalidcred;
}
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 02110a30a49e..989c30c98511 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -474,6 +474,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
to->to_maxval = to->to_initval;
to->to_exponential = 0;
break;
+#ifndef CONFIG_NFS_DISABLE_UDP_SUPPORT
case XPRT_TRANSPORT_UDP:
if (retrans == NFS_UNSPEC_RETRANS)
to->to_retries = NFS_DEF_UDP_RETRANS;
@@ -484,6 +485,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
to->to_maxval = NFS_MAX_UDP_TIMEOUT;
to->to_exponential = 1;
break;
+#endif
default:
BUG();
}
@@ -580,8 +582,10 @@ static int nfs_start_lockd(struct nfs_server *server)
default:
nlm_init.protocol = IPPROTO_TCP;
break;
+#ifndef CONFIG_NFS_DISABLE_UDP_SUPPORT
case XPRT_TRANSPORT_UDP:
nlm_init.protocol = IPPROTO_UDP;
+#endif
}
host = nlmclnt_init(&nlm_init);
@@ -658,28 +662,28 @@ EXPORT_SYMBOL_GPL(nfs_init_client);
* Create a version 2 or 3 client
*/
static int nfs_init_server(struct nfs_server *server,
- const struct nfs_parsed_mount_data *data,
- struct nfs_subversion *nfs_mod)
+ const struct fs_context *fc)
{
+ const struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct rpc_timeout timeparms;
struct nfs_client_initdata cl_init = {
- .hostname = data->nfs_server.hostname,
- .addr = (const struct sockaddr *)&data->nfs_server.address,
- .addrlen = data->nfs_server.addrlen,
- .nfs_mod = nfs_mod,
- .proto = data->nfs_server.protocol,
- .net = data->net,
+ .hostname = ctx->nfs_server.hostname,
+ .addr = (const struct sockaddr *)&ctx->nfs_server.address,
+ .addrlen = ctx->nfs_server.addrlen,
+ .nfs_mod = ctx->nfs_mod,
+ .proto = ctx->nfs_server.protocol,
+ .net = fc->net_ns,
.timeparms = &timeparms,
.cred = server->cred,
- .nconnect = data->nfs_server.nconnect,
+ .nconnect = ctx->nfs_server.nconnect,
.init_flags = (1UL << NFS_CS_REUSEPORT),
};
struct nfs_client *clp;
int error;
- nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
- data->timeo, data->retrans);
- if (data->flags & NFS_MOUNT_NORESVPORT)
+ nfs_init_timeout_values(&timeparms, ctx->nfs_server.protocol,
+ ctx->timeo, ctx->retrans);
+ if (ctx->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
/* Allocate or find a client reference we can use */
@@ -690,46 +694,46 @@ static int nfs_init_server(struct nfs_server *server,
server->nfs_client = clp;
/* Initialise the client representation from the mount data */
- server->flags = data->flags;
- server->options = data->options;
+ server->flags = ctx->flags;
+ server->options = ctx->options;
server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
- if (data->rsize)
- server->rsize = nfs_block_size(data->rsize, NULL);
- if (data->wsize)
- server->wsize = nfs_block_size(data->wsize, NULL);
+ if (ctx->rsize)
+ server->rsize = nfs_block_size(ctx->rsize, NULL);
+ if (ctx->wsize)
+ server->wsize = nfs_block_size(ctx->wsize, NULL);
- server->acregmin = data->acregmin * HZ;
- server->acregmax = data->acregmax * HZ;
- server->acdirmin = data->acdirmin * HZ;
- server->acdirmax = data->acdirmax * HZ;
+ server->acregmin = ctx->acregmin * HZ;
+ server->acregmax = ctx->acregmax * HZ;
+ server->acdirmin = ctx->acdirmin * HZ;
+ server->acdirmax = ctx->acdirmax * HZ;
/* Start lockd here, before we might error out */
error = nfs_start_lockd(server);
if (error < 0)
goto error;
- server->port = data->nfs_server.port;
- server->auth_info = data->auth_info;
+ server->port = ctx->nfs_server.port;
+ server->auth_info = ctx->auth_info;
error = nfs_init_server_rpcclient(server, &timeparms,
- data->selected_flavor);
+ ctx->selected_flavor);
if (error < 0)
goto error;
/* Preserve the values of mount_server-related mount options */
- if (data->mount_server.addrlen) {
- memcpy(&server->mountd_address, &data->mount_server.address,
- data->mount_server.addrlen);
- server->mountd_addrlen = data->mount_server.addrlen;
+ if (ctx->mount_server.addrlen) {
+ memcpy(&server->mountd_address, &ctx->mount_server.address,
+ ctx->mount_server.addrlen);
+ server->mountd_addrlen = ctx->mount_server.addrlen;
}
- server->mountd_version = data->mount_server.version;
- server->mountd_port = data->mount_server.port;
- server->mountd_protocol = data->mount_server.protocol;
+ server->mountd_version = ctx->mount_server.version;
+ server->mountd_port = ctx->mount_server.port;
+ server->mountd_protocol = ctx->mount_server.protocol;
- server->namelen = data->namlen;
+ server->namelen = ctx->namlen;
return 0;
error:
@@ -951,9 +955,9 @@ EXPORT_SYMBOL_GPL(nfs_free_server);
* Create a version 2 or 3 volume record
* - keyed on server and FSID
*/
-struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info,
- struct nfs_subversion *nfs_mod)
+struct nfs_server *nfs_create_server(struct fs_context *fc)
{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct nfs_server *server;
struct nfs_fattr *fattr;
int error;
@@ -970,18 +974,18 @@ struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info,
goto error;
/* Get a client representation */
- error = nfs_init_server(server, mount_info->parsed, nfs_mod);
+ error = nfs_init_server(server, fc);
if (error < 0)
goto error;
/* Probe the root fh to retrieve its FSID */
- error = nfs_probe_fsinfo(server, mount_info->mntfh, fattr);
+ error = nfs_probe_fsinfo(server, ctx->mntfh, fattr);
if (error < 0)
goto error;
if (server->nfs_client->rpc_ops->version == 3) {
if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
server->namelen = NFS3_MAXNAMLEN;
- if (!(mount_info->parsed->flags & NFS_MOUNT_NORDIRPLUS))
+ if (!(ctx->flags & NFS_MOUNT_NORDIRPLUS))
server->caps |= NFS_CAP_READDIRPLUS;
} else {
if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
@@ -989,8 +993,8 @@ struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info,
}
if (!(fattr->valid & NFS_ATTR_FATTR)) {
- error = nfs_mod->rpc_ops->getattr(server, mount_info->mntfh,
- fattr, NULL, NULL);
+ error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh,
+ fattr, NULL, NULL);
if (error < 0) {
dprintk("nfs_create_server: getattr error = %d\n", -error);
goto error;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index fe57b2b5314a..1865322de142 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -25,13 +25,46 @@
#include "internal.h"
#include "nfs4trace.h"
-static void nfs_free_delegation(struct nfs_delegation *delegation)
+#define NFS_DEFAULT_DELEGATION_WATERMARK (5000U)
+
+static atomic_long_t nfs_active_delegations;
+static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK;
+
+static void __nfs_free_delegation(struct nfs_delegation *delegation)
{
put_cred(delegation->cred);
delegation->cred = NULL;
kfree_rcu(delegation, rcu);
}
+static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
+{
+ if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+ delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
+ atomic_long_dec(&nfs_active_delegations);
+ if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
+ nfs_clear_verifier_delegated(delegation->inode);
+ }
+}
+
+static struct nfs_delegation *nfs_get_delegation(struct nfs_delegation *delegation)
+{
+ refcount_inc(&delegation->refcount);
+ return delegation;
+}
+
+static void nfs_put_delegation(struct nfs_delegation *delegation)
+{
+ if (refcount_dec_and_test(&delegation->refcount))
+ __nfs_free_delegation(delegation);
+}
+
+static void nfs_free_delegation(struct nfs_delegation *delegation)
+{
+ nfs_mark_delegation_revoked(delegation);
+ nfs_put_delegation(delegation);
+}
+
/**
* nfs_mark_delegation_referenced - set delegation's REFERENCED flag
* @delegation: delegation to process
@@ -222,13 +255,18 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
+ const struct cred *cred;
int res = 0;
- if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
- res = nfs4_proc_delegreturn(inode,
- delegation->cred,
+ if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+ spin_lock(&delegation->lock);
+ cred = get_cred(delegation->cred);
+ spin_unlock(&delegation->lock);
+ res = nfs4_proc_delegreturn(inode, cred,
&delegation->stateid,
issync);
+ put_cred(cred);
+ }
return res;
}
@@ -254,9 +292,13 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
- if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
- ret = delegation;
+ if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ /* Refcount matched in nfs_end_delegation_return() */
+ ret = nfs_get_delegation(delegation);
+ }
spin_unlock(&delegation->lock);
+ if (ret)
+ nfs_clear_verifier_delegated(&nfsi->vfs_inode);
out:
return ret;
}
@@ -343,7 +385,8 @@ nfs_update_inplace_delegation(struct nfs_delegation *delegation,
delegation->stateid.seqid = update->stateid.seqid;
smp_wmb();
delegation->type = update->type;
- clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
+ if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+ atomic_long_inc(&nfs_active_delegations);
}
}
@@ -373,6 +416,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
if (delegation == NULL)
return -ENOMEM;
nfs4_stateid_copy(&delegation->stateid, stateid);
+ refcount_set(&delegation->refcount, 1);
delegation->type = type;
delegation->pagemod_limit = pagemod_limit;
delegation->change_attr = inode_peek_iversion_raw(inode);
@@ -423,6 +467,8 @@ add_new:
rcu_assign_pointer(nfsi->delegation, delegation);
delegation = NULL;
+ atomic_long_inc(&nfs_active_delegations);
+
trace_nfs4_set_delegation(inode, type);
spin_lock(&inode->i_lock);
@@ -432,7 +478,7 @@ add_new:
out:
spin_unlock(&clp->cl_lock);
if (delegation != NULL)
- nfs_free_delegation(delegation);
+ __nfs_free_delegation(delegation);
if (freeme != NULL) {
nfs_do_return_delegation(inode, freeme, 0);
nfs_free_delegation(freeme);
@@ -470,6 +516,8 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
err = nfs_do_return_delegation(inode, delegation, issync);
out:
+ /* Refcount matched in nfs_start_delegation_return_locked() */
+ nfs_put_delegation(delegation);
return err;
}
@@ -479,7 +527,7 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
ret = true;
- if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
+ else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
struct inode *inode;
spin_lock(&delegation->lock);
@@ -488,6 +536,8 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
ret = true;
spin_unlock(&delegation->lock);
}
+ if (ret)
+ clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
ret = false;
@@ -607,6 +657,7 @@ void nfs_inode_evict_delegation(struct inode *inode)
delegation = nfs_inode_detach_delegation(inode);
if (delegation != NULL) {
+ set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
nfs_do_return_delegation(inode, delegation, 1);
nfs_free_delegation(delegation);
@@ -637,6 +688,43 @@ int nfs4_inode_return_delegation(struct inode *inode)
}
/**
+ * nfs_inode_return_delegation_on_close - asynchronously return a delegation
+ * @inode: inode to process
+ *
+ * This routine is called on file close in order to determine if the
+ * inode delegation needs to be returned immediately.
+ */
+void nfs4_inode_return_delegation_on_close(struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+ struct nfs_delegation *ret = NULL;
+
+ if (!inode)
+ return;
+ rcu_read_lock();
+ delegation = nfs4_get_valid_delegation(inode);
+ if (!delegation)
+ goto out;
+ if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
+ atomic_long_read(&nfs_active_delegations) >= nfs_delegation_watermark) {
+ spin_lock(&delegation->lock);
+ if (delegation->inode &&
+ list_empty(&NFS_I(inode)->open_files) &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+ /* Refcount matched in nfs_end_delegation_return() */
+ ret = nfs_get_delegation(delegation);
+ }
+ spin_unlock(&delegation->lock);
+ if (ret)
+ nfs_clear_verifier_delegated(inode);
+ }
+out:
+ rcu_read_unlock();
+ nfs_end_delegation_return(inode, ret, 0);
+}
+
+/**
* nfs4_inode_make_writeable
* @inode: pointer to inode
*
@@ -760,13 +848,6 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
rcu_read_unlock();
}
-static void nfs_mark_delegation_revoked(struct nfs_server *server,
- struct nfs_delegation *delegation)
-{
- set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
- delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
-}
-
static void nfs_revoke_delegation(struct inode *inode,
const nfs4_stateid *stateid)
{
@@ -794,7 +875,7 @@ static void nfs_revoke_delegation(struct inode *inode,
}
spin_unlock(&delegation->lock);
}
- nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
+ nfs_mark_delegation_revoked(delegation);
ret = true;
out:
rcu_read_unlock();
@@ -833,7 +914,7 @@ void nfs_delegation_mark_returned(struct inode *inode,
delegation->stateid.seqid = stateid->seqid;
}
- nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation);
+ nfs_mark_delegation_revoked(delegation);
out_clear_returning:
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
@@ -1036,10 +1117,11 @@ restart:
delegation = nfs_start_delegation_return_locked(NFS_I(inode));
rcu_read_unlock();
if (delegation != NULL) {
- delegation = nfs_detach_delegation(NFS_I(inode),
- delegation, server);
- if (delegation != NULL)
+ if (nfs_detach_delegation(NFS_I(inode), delegation,
+ server) != NULL)
nfs_free_delegation(delegation);
+ /* Match nfs_start_delegation_return_locked */
+ nfs_put_delegation(delegation);
}
iput(inode);
nfs_sb_deactive(server->super);
@@ -1317,3 +1399,5 @@ out:
rcu_read_unlock();
return ret;
}
+
+module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 15d3484be028..9b00a0b7f832 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -22,6 +22,7 @@ struct nfs_delegation {
unsigned long pagemod_limit;
__u64 change_attr;
unsigned long flags;
+ refcount_t refcount;
spinlock_t lock;
struct rcu_head rcu;
};
@@ -42,6 +43,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
fmode_t type, const nfs4_stateid *stateid, unsigned long pagemod_limit);
int nfs4_inode_return_delegation(struct inode *inode);
+void nfs4_inode_return_delegation_on_close(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_evict_delegation(struct inode *inode);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e180033e35cf..193d6fb363b7 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -58,7 +58,7 @@ static void nfs_readdir_clear_array(struct page*);
const struct file_operations nfs_dir_operations = {
.llseek = nfs_llseek_dir,
.read = generic_read_dir,
- .iterate = nfs_readdir,
+ .iterate_shared = nfs_readdir,
.open = nfs_opendir,
.release = nfs_closedir,
.fsync = nfs_fsync_dir,
@@ -155,6 +155,7 @@ typedef struct {
loff_t current_index;
decode_dirent_t decode;
+ unsigned long dir_verifier;
unsigned long timestamp;
unsigned long gencount;
unsigned int cache_entry_index;
@@ -162,6 +163,17 @@ typedef struct {
bool eof;
} nfs_readdir_descriptor_t;
+static
+void nfs_readdir_init_array(struct page *page)
+{
+ struct nfs_cache_array *array;
+
+ array = kmap_atomic(page);
+ memset(array, 0, sizeof(struct nfs_cache_array));
+ array->eof_index = -1;
+ kunmap_atomic(array);
+}
+
/*
* we are freeing strings created by nfs_add_to_readdir_array()
*/
@@ -174,6 +186,7 @@ void nfs_readdir_clear_array(struct page *page)
array = kmap_atomic(page);
for (i = 0; i < array->size; i++)
kfree(array->array[i].string.name);
+ array->size = 0;
kunmap_atomic(array);
}
@@ -186,7 +199,7 @@ static
int nfs_readdir_make_qstr(struct qstr *string, const char *name, unsigned int len)
{
string->len = len;
- string->name = kmemdup(name, len, GFP_KERNEL);
+ string->name = kmemdup_nul(name, len, GFP_KERNEL);
if (string->name == NULL)
return -ENOMEM;
/*
@@ -341,6 +354,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
again:
timestamp = jiffies;
gencount = nfs_inc_attr_generation_counter();
+ desc->dir_verifier = nfs_save_change_attribute(inode);
error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
NFS_SERVER(inode)->dtsize, desc->plus);
if (error < 0) {
@@ -437,18 +451,19 @@ void nfs_force_use_readdirplus(struct inode *dir)
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
!list_empty(&nfsi->open_files)) {
set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
- invalidate_mapping_pages(dir->i_mapping, 0, -1);
+ invalidate_mapping_pages(dir->i_mapping,
+ nfsi->page_index + 1, -1);
}
}
static
-void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry,
+ unsigned long dir_verifier)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct dentry *dentry;
struct dentry *alias;
- struct inode *dir = d_inode(parent);
struct inode *inode;
int status;
@@ -487,7 +502,7 @@ again:
if (nfs_same_file(dentry, entry)) {
if (!entry->fh->size)
goto out;
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_set_verifier(dentry, dir_verifier);
status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
if (!status)
nfs_setsecurity(d_inode(dentry), entry->fattr, entry->label);
@@ -513,7 +528,7 @@ again:
dput(dentry);
dentry = alias;
}
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_set_verifier(dentry, dir_verifier);
out:
dput(dentry);
}
@@ -551,7 +566,8 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
count++;
if (desc->plus)
- nfs_prime_dcache(file_dentry(desc->file), entry);
+ nfs_prime_dcache(file_dentry(desc->file), entry,
+ desc->dir_verifier);
status = nfs_readdir_add_to_array(entry, page);
if (status != 0)
@@ -610,6 +626,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
int status = -ENOMEM;
unsigned int array_size = ARRAY_SIZE(pages);
+ nfs_readdir_init_array(page);
+
entry.prev_cookie = 0;
entry.cookie = desc->last_cookie;
entry.eof = 0;
@@ -626,8 +644,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
}
array = kmap(page);
- memset(array, 0, sizeof(struct nfs_cache_array));
- array->eof_index = -1;
status = nfs_readdir_alloc_pages(pages, array_size);
if (status < 0)
@@ -682,6 +698,7 @@ int nfs_readdir_filler(void *data, struct page* page)
unlock_page(page);
return 0;
error:
+ nfs_readdir_clear_array(page);
unlock_page(page);
return ret;
}
@@ -689,8 +706,6 @@ int nfs_readdir_filler(void *data, struct page* page)
static
void cache_page_release(nfs_readdir_descriptor_t *desc)
{
- if (!desc->page->mapping)
- nfs_readdir_clear_array(desc->page);
put_page(desc->page);
desc->page = NULL;
}
@@ -704,19 +719,32 @@ struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
/*
* Returns 0 if desc->dir_cookie was found on page desc->page_index
+ * and locks the page to prevent removal from the page cache.
*/
static
-int find_cache_page(nfs_readdir_descriptor_t *desc)
+int find_and_lock_cache_page(nfs_readdir_descriptor_t *desc)
{
+ struct inode *inode = file_inode(desc->file);
+ struct nfs_inode *nfsi = NFS_I(inode);
int res;
desc->page = get_cache_page(desc);
if (IS_ERR(desc->page))
return PTR_ERR(desc->page);
-
- res = nfs_readdir_search_array(desc);
+ res = lock_page_killable(desc->page);
if (res != 0)
- cache_page_release(desc);
+ goto error;
+ res = -EAGAIN;
+ if (desc->page->mapping != NULL) {
+ res = nfs_readdir_search_array(desc);
+ if (res == 0) {
+ nfsi->page_index = desc->page_index;
+ return 0;
+ }
+ }
+ unlock_page(desc->page);
+error:
+ cache_page_release(desc);
return res;
}
@@ -731,7 +759,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
desc->last_cookie = 0;
}
do {
- res = find_cache_page(desc);
+ res = find_and_lock_cache_page(desc);
} while (res == -EAGAIN);
return res;
}
@@ -770,7 +798,6 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
desc->eof = true;
kunmap(desc->page);
- cache_page_release(desc);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
(unsigned long long)*desc->dir_cookie, res);
return res;
@@ -816,13 +843,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
status = nfs_do_filldir(desc);
+ out_release:
+ nfs_readdir_clear_array(desc->page);
+ cache_page_release(desc);
out:
dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
__func__, status);
return status;
- out_release:
- cache_page_release(desc);
- goto out;
}
/* The file offset position represents the dirent entry number. A
@@ -887,6 +914,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
break;
res = nfs_do_filldir(desc);
+ unlock_page(desc->page);
+ cache_page_release(desc);
if (res < 0)
break;
} while (!desc->eof);
@@ -957,14 +986,113 @@ static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
* full lookup on all child dentries of 'dir' whenever a change occurs
* on the server that might have invalidated our dcache.
*
+ * Note that we reserve bit '0' as a tag to let us know when a dentry
+ * was revalidated while holding a delegation on its inode.
+ *
* The caller should be holding dir->i_lock
*/
void nfs_force_lookup_revalidate(struct inode *dir)
{
- NFS_I(dir)->cache_change_attribute++;
+ NFS_I(dir)->cache_change_attribute += 2;
}
EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate);
+/**
+ * nfs_verify_change_attribute - Detects NFS remote directory changes
+ * @dir: pointer to parent directory inode
+ * @verf: previously saved change attribute
+ *
+ * Return "false" if the verifiers doesn't match the change attribute.
+ * This would usually indicate that the directory contents have changed on
+ * the server, and that any dentries need revalidating.
+ */
+static bool nfs_verify_change_attribute(struct inode *dir, unsigned long verf)
+{
+ return (verf & ~1UL) == nfs_save_change_attribute(dir);
+}
+
+static void nfs_set_verifier_delegated(unsigned long *verf)
+{
+ *verf |= 1UL;
+}
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+static void nfs_unset_verifier_delegated(unsigned long *verf)
+{
+ *verf &= ~1UL;
+}
+#endif /* IS_ENABLED(CONFIG_NFS_V4) */
+
+static bool nfs_test_verifier_delegated(unsigned long verf)
+{
+ return verf & 1;
+}
+
+static bool nfs_verifier_is_delegated(struct dentry *dentry)
+{
+ return nfs_test_verifier_delegated(dentry->d_time);
+}
+
+static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
+{
+ struct inode *inode = d_inode(dentry);
+
+ if (!nfs_verifier_is_delegated(dentry) &&
+ !nfs_verify_change_attribute(d_inode(dentry->d_parent), verf))
+ goto out;
+ if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ nfs_set_verifier_delegated(&verf);
+out:
+ dentry->d_time = verf;
+}
+
+/**
+ * nfs_set_verifier - save a parent directory verifier in the dentry
+ * @dentry: pointer to dentry
+ * @verf: verifier to save
+ *
+ * Saves the parent directory verifier in @dentry. If the inode has
+ * a delegation, we also tag the dentry as having been revalidated
+ * while holding a delegation so that we know we don't have to
+ * look it up again after a directory change.
+ */
+void nfs_set_verifier(struct dentry *dentry, unsigned long verf)
+{
+
+ spin_lock(&dentry->d_lock);
+ nfs_set_verifier_locked(dentry, verf);
+ spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_set_verifier);
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+/**
+ * nfs_clear_verifier_delegated - clear the dir verifier delegation tag
+ * @inode: pointer to inode
+ *
+ * Iterates through the dentries in the inode alias list and clears
+ * the tag used to indicate that the dentry has been revalidated
+ * while holding a delegation.
+ * This function is intended for use when the delegation is being
+ * returned or revoked.
+ */
+void nfs_clear_verifier_delegated(struct inode *inode)
+{
+ struct dentry *alias;
+
+ if (!inode)
+ return;
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&alias->d_lock);
+ nfs_unset_verifier_delegated(&alias->d_time);
+ spin_unlock(&alias->d_lock);
+ }
+ spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
+#endif /* IS_ENABLED(CONFIG_NFS_V4) */
+
/*
* A check for whether or not the parent directory has changed.
* In the case it has, we assume that the dentries are untrustworthy
@@ -1133,6 +1261,7 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
struct nfs_fh *fhandle;
struct nfs_fattr *fattr;
struct nfs4_label *label;
+ unsigned long dir_verifier;
int ret;
ret = -ENOMEM;
@@ -1142,10 +1271,18 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
if (fhandle == NULL || fattr == NULL || IS_ERR(label))
goto out;
- ret = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+ dir_verifier = nfs_save_change_attribute(dir);
+ ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
if (ret < 0) {
- if (ret == -ESTALE || ret == -ENOENT)
+ switch (ret) {
+ case -ESTALE:
+ case -ENOENT:
ret = 0;
+ break;
+ case -ETIMEDOUT:
+ if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
+ ret = 1;
+ }
goto out;
}
ret = 0;
@@ -1155,7 +1292,7 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
goto out;
nfs_setsecurity(inode, fattr, label);
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_set_verifier(dentry, dir_verifier);
/* set a readdirplus hint that we had a cache miss */
nfs_force_use_readdirplus(dir);
@@ -1197,7 +1334,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
goto out_bad;
}
- if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
+ if (nfs_verifier_is_delegated(dentry))
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
/* Force a full look up iff the parent directory has changed */
@@ -1382,6 +1519,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
struct nfs_fh *fhandle = NULL;
struct nfs_fattr *fattr = NULL;
struct nfs4_label *label = NULL;
+ unsigned long dir_verifier;
int error;
dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
@@ -1407,8 +1545,9 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
if (IS_ERR(label))
goto out;
+ dir_verifier = nfs_save_change_attribute(dir);
trace_nfs_lookup_enter(dir, dentry, flags);
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+ error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
if (error == -ENOENT)
goto no_entry;
if (error < 0) {
@@ -1430,7 +1569,7 @@ no_entry:
goto out_label;
dentry = res;
}
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_set_verifier(dentry, dir_verifier);
out_label:
trace_nfs_lookup_exit(dir, dentry, flags, error);
nfs4_label_free(label);
@@ -1635,7 +1774,7 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
if (inode == NULL)
goto full_reval;
- if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
+ if (nfs_verifier_is_delegated(dentry))
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
/* NFS only supports OPEN on regular files */
@@ -1683,7 +1822,7 @@ nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
d_drop(dentry);
if (fhandle->size == 0) {
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, NULL);
+ error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, NULL);
if (error)
goto out_error;
}
@@ -2312,11 +2451,11 @@ static int nfs_access_get_cached(struct inode *inode, const struct cred *cred, s
/* Found an entry, is our attribute cache valid? */
if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
break;
+ if (!retry)
+ break;
err = -ECHILD;
if (!may_block)
goto out;
- if (!retry)
- goto out_zap;
spin_unlock(&inode->i_lock);
err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (err)
@@ -2353,7 +2492,7 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre
lh = rcu_dereference(nfsi->access_cache_entry_lru.prev);
cache = list_entry(lh, struct nfs_access_entry, lru);
if (lh == &nfsi->access_cache_entry_lru ||
- cred != cache->cred)
+ cred_fscmp(cred, cache->cred) != 0)
cache = NULL;
if (cache == NULL)
goto out;
@@ -2476,7 +2615,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
{
struct nfs_access_entry cache;
bool may_block = (mask & MAY_NOT_BLOCK) == 0;
- int cache_mask;
+ int cache_mask = -1;
int status;
trace_nfs_access_enter(inode);
@@ -2515,7 +2654,7 @@ out_cached:
if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0)
status = -EACCES;
out:
- trace_nfs_access_exit(inode, status);
+ trace_nfs_access_exit(inode, mask, cache_mask, status);
return status;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 040a50fd9bf3..b768a0b42e82 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -245,10 +245,10 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
data->ds_commit_index);
/* verifier not set so always fail */
- if (verfp->committed < 0)
+ if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
return 1;
- return nfs_direct_cmp_verf(verfp, &data->verf);
+ return nfs_direct_cmp_verf(verfp, data->res.verf);
}
/**
@@ -824,7 +824,8 @@ static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
/* fake unstable write to let common nfs resend pages */
hdr->verf.committed = NFS_UNSTABLE;
- hdr->good_bytes = hdr->args.count;
+ hdr->good_bytes = hdr->args.offset + hdr->args.count -
+ hdr->io_start;
}
spin_unlock(&dreq->lock);
}
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index aec769a500a1..89bd5581f317 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -93,7 +93,7 @@ static void nfs_dns_ent_init(struct cache_head *cnew,
key = container_of(ckey, struct nfs_dns_ent, h);
kfree(new->hostname);
- new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
+ new->hostname = kmemdup_nul(key->hostname, key->namelen, GFP_KERNEL);
if (new->hostname) {
new->namelen = key->namelen;
nfs_dns_ent_update(cnew, ckey);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 8eb731d9be3e..f96367a2463e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -204,44 +204,39 @@ EXPORT_SYMBOL_GPL(nfs_file_mmap);
static int
nfs_file_fsync_commit(struct file *file, int datasync)
{
- struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = file_inode(file);
- int do_resend, status;
- int ret = 0;
+ int ret;
dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
- do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
- status = nfs_commit_inode(inode, FLUSH_SYNC);
- if (status == 0)
- status = file_check_and_advance_wb_err(file);
- if (status < 0) {
- ret = status;
- goto out;
- }
- do_resend |= test_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
- if (do_resend)
- ret = -EAGAIN;
-out:
- return ret;
+ ret = nfs_commit_inode(inode, FLUSH_SYNC);
+ if (ret < 0)
+ return ret;
+ return file_check_and_advance_wb_err(file);
}
int
nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- int ret;
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = file_inode(file);
+ int ret;
trace_nfs_fsync_enter(inode);
- do {
+ for (;;) {
ret = file_write_and_wait_range(file, start, end);
if (ret != 0)
break;
ret = nfs_file_fsync_commit(file, datasync);
- if (!ret)
- ret = pnfs_sync_inode(inode, !!datasync);
+ if (ret != 0)
+ break;
+ ret = pnfs_sync_inode(inode, !!datasync);
+ if (ret != 0)
+ break;
+ if (!test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags))
+ break;
/*
* If nfs_file_fsync_commit detected a server reboot, then
* resend all dirty pages that might have been covered by
@@ -249,7 +244,7 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
*/
start = 0;
end = LLONG_MAX;
- } while (ret == -EAGAIN);
+ }
trace_nfs_fsync_exit(inode, ret);
return ret;
@@ -489,7 +484,19 @@ static int nfs_launder_page(struct page *page)
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{
+ unsigned long blocks;
+ long long isize;
struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
+ struct inode *inode = file->f_mapping->host;
+
+ spin_lock(&inode->i_lock);
+ blocks = inode->i_blocks;
+ isize = inode->i_size;
+ spin_unlock(&inode->i_lock);
+ if (blocks*512 < isize) {
+ pr_warn("swap activate: swapfile has holes\n");
+ return -EINVAL;
+ }
*span = sis->pages;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 5657b7f2611f..bb9148b83166 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1266,9 +1266,10 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
int idx, u64 offset, u64 length,
- u32 status, int opnum, int error)
+ u32 *op_status, int opnum, int error)
{
struct nfs4_ff_layout_mirror *mirror;
+ u32 status = *op_status;
int err;
if (status == 0) {
@@ -1286,10 +1287,10 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -ENOBUFS:
case -EPIPE:
case -EPERM:
- status = NFS4ERR_NXIO;
+ *op_status = status = NFS4ERR_NXIO;
break;
case -EACCES:
- status = NFS4ERR_ACCESS;
+ *op_status = status = NFS4ERR_ACCESS;
break;
default:
return;
@@ -1321,16 +1322,19 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
int new_idx = hdr->pgio_mirror_idx;
int err;
- trace_nfs4_pnfs_read(hdr, task->tk_status);
- if (task->tk_status < 0)
+ if (task->tk_status < 0) {
ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
hdr->args.offset, hdr->args.count,
- hdr->res.op_status, OP_READ,
+ &hdr->res.op_status, OP_READ,
task->tk_status);
+ trace_ff_layout_read_error(hdr);
+ }
+
err = ff_layout_async_handle_error(task, hdr->args.context->state,
hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx);
+ trace_nfs4_pnfs_read(hdr, err);
clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
switch (err) {
@@ -1494,16 +1498,19 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
loff_t end_offs = 0;
int err;
- trace_nfs4_pnfs_write(hdr, task->tk_status);
- if (task->tk_status < 0)
+ if (task->tk_status < 0) {
ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
hdr->args.offset, hdr->args.count,
- hdr->res.op_status, OP_WRITE,
+ &hdr->res.op_status, OP_WRITE,
task->tk_status);
+ trace_ff_layout_write_error(hdr);
+ }
+
err = ff_layout_async_handle_error(task, hdr->args.context->state,
hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx);
+ trace_nfs4_pnfs_write(hdr, err);
clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
switch (err) {
@@ -1537,15 +1544,18 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
{
int err;
- trace_nfs4_pnfs_commit_ds(data, task->tk_status);
- if (task->tk_status < 0)
+ if (task->tk_status < 0) {
ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
data->args.offset, data->args.count,
- data->res.op_status, OP_COMMIT,
+ &data->res.op_status, OP_COMMIT,
task->tk_status);
+ trace_ff_layout_commit_error(data);
+ }
+
err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
data->lseg, data->ds_commit_index);
+ trace_nfs4_pnfs_commit_ds(data, err);
switch (err) {
case -NFS4ERR_RESET_TO_PNFS:
pnfs_generic_prepare_to_resend_writes(data);
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
new file mode 100644
index 000000000000..e1b938457ab9
--- /dev/null
+++ b/fs/nfs/fs_context.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/fs/nfs/fs_context.c
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ * Conversion to new mount api Copyright (C) David Howells
+ *
+ * NFS mount handling.
+ *
+ * Split from fs/nfs/super.c by David Howells <dhowells@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include "nfs.h"
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_MOUNT
+
+#if IS_ENABLED(CONFIG_NFS_V3)
+#define NFS_DEFAULT_VERSION 3
+#else
+#define NFS_DEFAULT_VERSION 2
+#endif
+
+#define NFS_MAX_CONNECTIONS 16
+
+enum nfs_param {
+ Opt_ac,
+ Opt_acdirmax,
+ Opt_acdirmin,
+ Opt_acl,
+ Opt_acregmax,
+ Opt_acregmin,
+ Opt_actimeo,
+ Opt_addr,
+ Opt_bg,
+ Opt_bsize,
+ Opt_clientaddr,
+ Opt_cto,
+ Opt_fg,
+ Opt_fscache,
+ Opt_fscache_flag,
+ Opt_hard,
+ Opt_intr,
+ Opt_local_lock,
+ Opt_lock,
+ Opt_lookupcache,
+ Opt_migration,
+ Opt_minorversion,
+ Opt_mountaddr,
+ Opt_mounthost,
+ Opt_mountport,
+ Opt_mountproto,
+ Opt_mountvers,
+ Opt_namelen,
+ Opt_nconnect,
+ Opt_port,
+ Opt_posix,
+ Opt_proto,
+ Opt_rdirplus,
+ Opt_rdma,
+ Opt_resvport,
+ Opt_retrans,
+ Opt_retry,
+ Opt_rsize,
+ Opt_sec,
+ Opt_sharecache,
+ Opt_sloppy,
+ Opt_soft,
+ Opt_softerr,
+ Opt_softreval,
+ Opt_source,
+ Opt_tcp,
+ Opt_timeo,
+ Opt_udp,
+ Opt_v,
+ Opt_vers,
+ Opt_wsize,
+};
+
+enum {
+ Opt_local_lock_all,
+ Opt_local_lock_flock,
+ Opt_local_lock_none,
+ Opt_local_lock_posix,
+};
+
+static const struct constant_table nfs_param_enums_local_lock[] = {
+ { "all", Opt_local_lock_all },
+ { "flock", Opt_local_lock_flock },
+ { "none", Opt_local_lock_none },
+ {}
+};
+
+enum {
+ Opt_lookupcache_all,
+ Opt_lookupcache_none,
+ Opt_lookupcache_positive,
+};
+
+static const struct constant_table nfs_param_enums_lookupcache[] = {
+ { "all", Opt_lookupcache_all },
+ { "none", Opt_lookupcache_none },
+ { "pos", Opt_lookupcache_positive },
+ { "positive", Opt_lookupcache_positive },
+ {}
+};
+
+static const struct fs_parameter_spec nfs_fs_parameters[] = {
+ fsparam_flag_no("ac", Opt_ac),
+ fsparam_u32 ("acdirmax", Opt_acdirmax),
+ fsparam_u32 ("acdirmin", Opt_acdirmin),
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_u32 ("acregmax", Opt_acregmax),
+ fsparam_u32 ("acregmin", Opt_acregmin),
+ fsparam_u32 ("actimeo", Opt_actimeo),
+ fsparam_string("addr", Opt_addr),
+ fsparam_flag ("bg", Opt_bg),
+ fsparam_u32 ("bsize", Opt_bsize),
+ fsparam_string("clientaddr", Opt_clientaddr),
+ fsparam_flag_no("cto", Opt_cto),
+ fsparam_flag ("fg", Opt_fg),
+ fsparam_flag_no("fsc", Opt_fscache_flag),
+ fsparam_string("fsc", Opt_fscache),
+ fsparam_flag ("hard", Opt_hard),
+ __fsparam(NULL, "intr", Opt_intr,
+ fs_param_neg_with_no|fs_param_deprecated, NULL),
+ fsparam_enum ("local_lock", Opt_local_lock, nfs_param_enums_local_lock),
+ fsparam_flag_no("lock", Opt_lock),
+ fsparam_enum ("lookupcache", Opt_lookupcache, nfs_param_enums_lookupcache),
+ fsparam_flag_no("migration", Opt_migration),
+ fsparam_u32 ("minorversion", Opt_minorversion),
+ fsparam_string("mountaddr", Opt_mountaddr),
+ fsparam_string("mounthost", Opt_mounthost),
+ fsparam_u32 ("mountport", Opt_mountport),
+ fsparam_string("mountproto", Opt_mountproto),
+ fsparam_u32 ("mountvers", Opt_mountvers),
+ fsparam_u32 ("namlen", Opt_namelen),
+ fsparam_u32 ("nconnect", Opt_nconnect),
+ fsparam_string("nfsvers", Opt_vers),
+ fsparam_u32 ("port", Opt_port),
+ fsparam_flag_no("posix", Opt_posix),
+ fsparam_string("proto", Opt_proto),
+ fsparam_flag_no("rdirplus", Opt_rdirplus),
+ fsparam_flag ("rdma", Opt_rdma),
+ fsparam_flag_no("resvport", Opt_resvport),
+ fsparam_u32 ("retrans", Opt_retrans),
+ fsparam_string("retry", Opt_retry),
+ fsparam_u32 ("rsize", Opt_rsize),
+ fsparam_string("sec", Opt_sec),
+ fsparam_flag_no("sharecache", Opt_sharecache),
+ fsparam_flag ("sloppy", Opt_sloppy),
+ fsparam_flag ("soft", Opt_soft),
+ fsparam_flag ("softerr", Opt_softerr),
+ fsparam_flag ("softreval", Opt_softreval),
+ fsparam_string("source", Opt_source),
+ fsparam_flag ("tcp", Opt_tcp),
+ fsparam_u32 ("timeo", Opt_timeo),
+ fsparam_flag ("udp", Opt_udp),
+ fsparam_flag ("v2", Opt_v),
+ fsparam_flag ("v3", Opt_v),
+ fsparam_flag ("v4", Opt_v),
+ fsparam_flag ("v4.0", Opt_v),
+ fsparam_flag ("v4.1", Opt_v),
+ fsparam_flag ("v4.2", Opt_v),
+ fsparam_string("vers", Opt_vers),
+ fsparam_u32 ("wsize", Opt_wsize),
+ {}
+};
+
+enum {
+ Opt_vers_2,
+ Opt_vers_3,
+ Opt_vers_4,
+ Opt_vers_4_0,
+ Opt_vers_4_1,
+ Opt_vers_4_2,
+};
+
+static const struct constant_table nfs_vers_tokens[] = {
+ { "2", Opt_vers_2 },
+ { "3", Opt_vers_3 },
+ { "4", Opt_vers_4 },
+ { "4.0", Opt_vers_4_0 },
+ { "4.1", Opt_vers_4_1 },
+ { "4.2", Opt_vers_4_2 },
+};
+
+enum {
+ Opt_xprt_rdma,
+ Opt_xprt_rdma6,
+ Opt_xprt_tcp,
+ Opt_xprt_tcp6,
+ Opt_xprt_udp,
+ Opt_xprt_udp6,
+ nr__Opt_xprt
+};
+
+static const struct constant_table nfs_xprt_protocol_tokens[nr__Opt_xprt] = {
+ { "rdma", Opt_xprt_rdma },
+ { "rdma6", Opt_xprt_rdma6 },
+ { "tcp", Opt_xprt_tcp },
+ { "tcp6", Opt_xprt_tcp6 },
+ { "udp", Opt_xprt_udp },
+ { "udp6", Opt_xprt_udp6 },
+};
+
+enum {
+ Opt_sec_krb5,
+ Opt_sec_krb5i,
+ Opt_sec_krb5p,
+ Opt_sec_lkey,
+ Opt_sec_lkeyi,
+ Opt_sec_lkeyp,
+ Opt_sec_none,
+ Opt_sec_spkm,
+ Opt_sec_spkmi,
+ Opt_sec_spkmp,
+ Opt_sec_sys,
+ nr__Opt_sec
+};
+
+static const struct constant_table nfs_secflavor_tokens[] = {
+ { "krb5", Opt_sec_krb5 },
+ { "krb5i", Opt_sec_krb5i },
+ { "krb5p", Opt_sec_krb5p },
+ { "lkey", Opt_sec_lkey },
+ { "lkeyi", Opt_sec_lkeyi },
+ { "lkeyp", Opt_sec_lkeyp },
+ { "none", Opt_sec_none },
+ { "null", Opt_sec_none },
+ { "spkm3", Opt_sec_spkm },
+ { "spkm3i", Opt_sec_spkmi },
+ { "spkm3p", Opt_sec_spkmp },
+ { "sys", Opt_sec_sys },
+};
+
+/*
+ * Sanity-check a server address provided by the mount command.
+ *
+ * Address family must be initialized, and address must not be
+ * the ANY address for that family.
+ */
+static int nfs_verify_server_address(struct sockaddr *addr)
+{
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sa = (struct sockaddr_in *)addr;
+ return sa->sin_addr.s_addr != htonl(INADDR_ANY);
+ }
+ case AF_INET6: {
+ struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr;
+ return !ipv6_addr_any(sa);
+ }
+ }
+
+ dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
+ return 0;
+}
+
+/*
+ * Sanity check the NFS transport protocol.
+ *
+ */
+static void nfs_validate_transport_protocol(struct nfs_fs_context *ctx)
+{
+ switch (ctx->nfs_server.protocol) {
+ case XPRT_TRANSPORT_UDP:
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_RDMA:
+ break;
+ default:
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ }
+}
+
+/*
+ * For text based NFSv2/v3 mounts, the mount protocol transport default
+ * settings should depend upon the specified NFS transport.
+ */
+static void nfs_set_mount_transport_protocol(struct nfs_fs_context *ctx)
+{
+ nfs_validate_transport_protocol(ctx);
+
+ if (ctx->mount_server.protocol == XPRT_TRANSPORT_UDP ||
+ ctx->mount_server.protocol == XPRT_TRANSPORT_TCP)
+ return;
+ switch (ctx->nfs_server.protocol) {
+ case XPRT_TRANSPORT_UDP:
+ ctx->mount_server.protocol = XPRT_TRANSPORT_UDP;
+ break;
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_RDMA:
+ ctx->mount_server.protocol = XPRT_TRANSPORT_TCP;
+ }
+}
+
+/*
+ * Add 'flavor' to 'auth_info' if not already present.
+ * Returns true if 'flavor' ends up in the list, false otherwise
+ */
+static int nfs_auth_info_add(struct fs_context *fc,
+ struct nfs_auth_info *auth_info,
+ rpc_authflavor_t flavor)
+{
+ unsigned int i;
+ unsigned int max_flavor_len = ARRAY_SIZE(auth_info->flavors);
+
+ /* make sure this flavor isn't already in the list */
+ for (i = 0; i < auth_info->flavor_len; i++) {
+ if (flavor == auth_info->flavors[i])
+ return 0;
+ }
+
+ if (auth_info->flavor_len + 1 >= max_flavor_len)
+ return nfs_invalf(fc, "NFS: too many sec= flavors");
+
+ auth_info->flavors[auth_info->flavor_len++] = flavor;
+ return 0;
+}
+
+/*
+ * Parse the value of the 'sec=' option.
+ */
+static int nfs_parse_security_flavors(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ rpc_authflavor_t pseudoflavor;
+ char *string = param->string, *p;
+ int ret;
+
+ dfprintk(MOUNT, "NFS: parsing %s=%s option\n", param->key, param->string);
+
+ while ((p = strsep(&string, ":")) != NULL) {
+ if (!*p)
+ continue;
+ switch (lookup_constant(nfs_secflavor_tokens, p, -1)) {
+ case Opt_sec_none:
+ pseudoflavor = RPC_AUTH_NULL;
+ break;
+ case Opt_sec_sys:
+ pseudoflavor = RPC_AUTH_UNIX;
+ break;
+ case Opt_sec_krb5:
+ pseudoflavor = RPC_AUTH_GSS_KRB5;
+ break;
+ case Opt_sec_krb5i:
+ pseudoflavor = RPC_AUTH_GSS_KRB5I;
+ break;
+ case Opt_sec_krb5p:
+ pseudoflavor = RPC_AUTH_GSS_KRB5P;
+ break;
+ case Opt_sec_lkey:
+ pseudoflavor = RPC_AUTH_GSS_LKEY;
+ break;
+ case Opt_sec_lkeyi:
+ pseudoflavor = RPC_AUTH_GSS_LKEYI;
+ break;
+ case Opt_sec_lkeyp:
+ pseudoflavor = RPC_AUTH_GSS_LKEYP;
+ break;
+ case Opt_sec_spkm:
+ pseudoflavor = RPC_AUTH_GSS_SPKM;
+ break;
+ case Opt_sec_spkmi:
+ pseudoflavor = RPC_AUTH_GSS_SPKMI;
+ break;
+ case Opt_sec_spkmp:
+ pseudoflavor = RPC_AUTH_GSS_SPKMP;
+ break;
+ default:
+ return nfs_invalf(fc, "NFS: sec=%s option not recognized", p);
+ }
+
+ ret = nfs_auth_info_add(fc, &ctx->auth_info, pseudoflavor);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nfs_parse_version_string(struct fs_context *fc,
+ const char *string)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+
+ ctx->flags &= ~NFS_MOUNT_VER3;
+ switch (lookup_constant(nfs_vers_tokens, string, -1)) {
+ case Opt_vers_2:
+ ctx->version = 2;
+ break;
+ case Opt_vers_3:
+ ctx->flags |= NFS_MOUNT_VER3;
+ ctx->version = 3;
+ break;
+ case Opt_vers_4:
+ /* Backward compatibility option. In future,
+ * the mount program should always supply
+ * a NFSv4 minor version number.
+ */
+ ctx->version = 4;
+ break;
+ case Opt_vers_4_0:
+ ctx->version = 4;
+ ctx->minorversion = 0;
+ break;
+ case Opt_vers_4_1:
+ ctx->version = 4;
+ ctx->minorversion = 1;
+ break;
+ case Opt_vers_4_2:
+ ctx->version = 4;
+ ctx->minorversion = 2;
+ break;
+ default:
+ return nfs_invalf(fc, "NFS: Unsupported NFS version");
+ }
+ return 0;
+}
+
+/*
+ * Parse a single mount parameter.
+ */
+static int nfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct fs_parse_result result;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ unsigned short protofamily, mountfamily;
+ unsigned int len;
+ int ret, opt;
+
+ dfprintk(MOUNT, "NFS: parsing nfs mount option '%s'\n", param->key);
+
+ opt = fs_parse(fc, nfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return ctx->sloppy ? 1 : opt;
+
+ switch (opt) {
+ case Opt_source:
+ if (fc->source)
+ return nfs_invalf(fc, "NFS: Multiple sources not supported");
+ fc->source = param->string;
+ param->string = NULL;
+ break;
+
+ /*
+ * boolean options: foo/nofoo
+ */
+ case Opt_soft:
+ ctx->flags |= NFS_MOUNT_SOFT;
+ ctx->flags &= ~NFS_MOUNT_SOFTERR;
+ break;
+ case Opt_softerr:
+ ctx->flags |= NFS_MOUNT_SOFTERR | NFS_MOUNT_SOFTREVAL;
+ ctx->flags &= ~NFS_MOUNT_SOFT;
+ break;
+ case Opt_hard:
+ ctx->flags &= ~(NFS_MOUNT_SOFT |
+ NFS_MOUNT_SOFTERR |
+ NFS_MOUNT_SOFTREVAL);
+ break;
+ case Opt_softreval:
+ if (result.negated)
+ ctx->flags &= ~NFS_MOUNT_SOFTREVAL;
+ else
+ ctx->flags &= NFS_MOUNT_SOFTREVAL;
+ break;
+ case Opt_posix:
+ if (result.negated)
+ ctx->flags &= ~NFS_MOUNT_POSIX;
+ else
+ ctx->flags |= NFS_MOUNT_POSIX;
+ break;
+ case Opt_cto:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NOCTO;
+ else
+ ctx->flags &= ~NFS_MOUNT_NOCTO;
+ break;
+ case Opt_ac:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NOAC;
+ else
+ ctx->flags &= ~NFS_MOUNT_NOAC;
+ break;
+ case Opt_lock:
+ if (result.negated) {
+ ctx->flags |= NFS_MOUNT_NONLM;
+ ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL);
+ } else {
+ ctx->flags &= ~NFS_MOUNT_NONLM;
+ ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL);
+ }
+ break;
+ case Opt_udp:
+ ctx->flags &= ~NFS_MOUNT_TCP;
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ break;
+ case Opt_tcp:
+ ctx->flags |= NFS_MOUNT_TCP;
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ break;
+ case Opt_rdma:
+ ctx->flags |= NFS_MOUNT_TCP; /* for side protocols */
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
+ xprt_load_transport(param->key);
+ break;
+ case Opt_acl:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NOACL;
+ else
+ ctx->flags &= ~NFS_MOUNT_NOACL;
+ break;
+ case Opt_rdirplus:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NORDIRPLUS;
+ else
+ ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ break;
+ case Opt_sharecache:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_UNSHARED;
+ else
+ ctx->flags &= ~NFS_MOUNT_UNSHARED;
+ break;
+ case Opt_resvport:
+ if (result.negated)
+ ctx->flags |= NFS_MOUNT_NORESVPORT;
+ else
+ ctx->flags &= ~NFS_MOUNT_NORESVPORT;
+ break;
+ case Opt_fscache_flag:
+ if (result.negated)
+ ctx->options &= ~NFS_OPTION_FSCACHE;
+ else
+ ctx->options |= NFS_OPTION_FSCACHE;
+ kfree(ctx->fscache_uniq);
+ ctx->fscache_uniq = NULL;
+ break;
+ case Opt_fscache:
+ ctx->options |= NFS_OPTION_FSCACHE;
+ kfree(ctx->fscache_uniq);
+ ctx->fscache_uniq = param->string;
+ param->string = NULL;
+ break;
+ case Opt_migration:
+ if (result.negated)
+ ctx->options &= ~NFS_OPTION_MIGRATION;
+ else
+ ctx->options |= NFS_OPTION_MIGRATION;
+ break;
+
+ /*
+ * options that take numeric values
+ */
+ case Opt_port:
+ if (result.uint_32 > USHRT_MAX)
+ goto out_of_bounds;
+ ctx->nfs_server.port = result.uint_32;
+ break;
+ case Opt_rsize:
+ ctx->rsize = result.uint_32;
+ break;
+ case Opt_wsize:
+ ctx->wsize = result.uint_32;
+ break;
+ case Opt_bsize:
+ ctx->bsize = result.uint_32;
+ break;
+ case Opt_timeo:
+ if (result.uint_32 < 1 || result.uint_32 > INT_MAX)
+ goto out_of_bounds;
+ ctx->timeo = result.uint_32;
+ break;
+ case Opt_retrans:
+ if (result.uint_32 > INT_MAX)
+ goto out_of_bounds;
+ ctx->retrans = result.uint_32;
+ break;
+ case Opt_acregmin:
+ ctx->acregmin = result.uint_32;
+ break;
+ case Opt_acregmax:
+ ctx->acregmax = result.uint_32;
+ break;
+ case Opt_acdirmin:
+ ctx->acdirmin = result.uint_32;
+ break;
+ case Opt_acdirmax:
+ ctx->acdirmax = result.uint_32;
+ break;
+ case Opt_actimeo:
+ ctx->acregmin = result.uint_32;
+ ctx->acregmax = result.uint_32;
+ ctx->acdirmin = result.uint_32;
+ ctx->acdirmax = result.uint_32;
+ break;
+ case Opt_namelen:
+ ctx->namlen = result.uint_32;
+ break;
+ case Opt_mountport:
+ if (result.uint_32 > USHRT_MAX)
+ goto out_of_bounds;
+ ctx->mount_server.port = result.uint_32;
+ break;
+ case Opt_mountvers:
+ if (result.uint_32 < NFS_MNT_VERSION ||
+ result.uint_32 > NFS_MNT3_VERSION)
+ goto out_of_bounds;
+ ctx->mount_server.version = result.uint_32;
+ break;
+ case Opt_minorversion:
+ if (result.uint_32 > NFS4_MAX_MINOR_VERSION)
+ goto out_of_bounds;
+ ctx->minorversion = result.uint_32;
+ break;
+
+ /*
+ * options that take text values
+ */
+ case Opt_v:
+ ret = nfs_parse_version_string(fc, param->key + 1);
+ if (ret < 0)
+ return ret;
+ break;
+ case Opt_vers:
+ ret = nfs_parse_version_string(fc, param->string);
+ if (ret < 0)
+ return ret;
+ break;
+ case Opt_sec:
+ ret = nfs_parse_security_flavors(fc, param);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case Opt_proto:
+ protofamily = AF_INET;
+ switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
+ case Opt_xprt_udp6:
+ protofamily = AF_INET6;
+ /* fall through */
+ case Opt_xprt_udp:
+ ctx->flags &= ~NFS_MOUNT_TCP;
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ break;
+ case Opt_xprt_tcp6:
+ protofamily = AF_INET6;
+ /* fall through */
+ case Opt_xprt_tcp:
+ ctx->flags |= NFS_MOUNT_TCP;
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ break;
+ case Opt_xprt_rdma6:
+ protofamily = AF_INET6;
+ /* fall through */
+ case Opt_xprt_rdma:
+ /* vector side protocols to TCP */
+ ctx->flags |= NFS_MOUNT_TCP;
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
+ xprt_load_transport(param->string);
+ break;
+ default:
+ return nfs_invalf(fc, "NFS: Unrecognized transport protocol");
+ }
+
+ ctx->protofamily = protofamily;
+ break;
+
+ case Opt_mountproto:
+ mountfamily = AF_INET;
+ switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) {
+ case Opt_xprt_udp6:
+ mountfamily = AF_INET6;
+ /* fall through */
+ case Opt_xprt_udp:
+ ctx->mount_server.protocol = XPRT_TRANSPORT_UDP;
+ break;
+ case Opt_xprt_tcp6:
+ mountfamily = AF_INET6;
+ /* fall through */
+ case Opt_xprt_tcp:
+ ctx->mount_server.protocol = XPRT_TRANSPORT_TCP;
+ break;
+ case Opt_xprt_rdma: /* not used for side protocols */
+ default:
+ return nfs_invalf(fc, "NFS: Unrecognized transport protocol");
+ }
+ ctx->mountfamily = mountfamily;
+ break;
+
+ case Opt_addr:
+ len = rpc_pton(fc->net_ns, param->string, param->size,
+ &ctx->nfs_server.address,
+ sizeof(ctx->nfs_server._address));
+ if (len == 0)
+ goto out_invalid_address;
+ ctx->nfs_server.addrlen = len;
+ break;
+ case Opt_clientaddr:
+ kfree(ctx->client_address);
+ ctx->client_address = param->string;
+ param->string = NULL;
+ break;
+ case Opt_mounthost:
+ kfree(ctx->mount_server.hostname);
+ ctx->mount_server.hostname = param->string;
+ param->string = NULL;
+ break;
+ case Opt_mountaddr:
+ len = rpc_pton(fc->net_ns, param->string, param->size,
+ &ctx->mount_server.address,
+ sizeof(ctx->mount_server._address));
+ if (len == 0)
+ goto out_invalid_address;
+ ctx->mount_server.addrlen = len;
+ break;
+ case Opt_nconnect:
+ if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_CONNECTIONS)
+ goto out_of_bounds;
+ ctx->nfs_server.nconnect = result.uint_32;
+ break;
+ case Opt_lookupcache:
+ switch (result.uint_32) {
+ case Opt_lookupcache_all:
+ ctx->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE);
+ break;
+ case Opt_lookupcache_positive:
+ ctx->flags &= ~NFS_MOUNT_LOOKUP_CACHE_NONE;
+ ctx->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG;
+ break;
+ case Opt_lookupcache_none:
+ ctx->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ break;
+ case Opt_local_lock:
+ switch (result.uint_32) {
+ case Opt_local_lock_all:
+ ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ break;
+ case Opt_local_lock_flock:
+ ctx->flags |= NFS_MOUNT_LOCAL_FLOCK;
+ break;
+ case Opt_local_lock_posix:
+ ctx->flags |= NFS_MOUNT_LOCAL_FCNTL;
+ break;
+ case Opt_local_lock_none:
+ ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ break;
+
+ /*
+ * Special options
+ */
+ case Opt_sloppy:
+ ctx->sloppy = true;
+ dfprintk(MOUNT, "NFS: relaxing parsing rules\n");
+ break;
+ }
+
+ return 0;
+
+out_invalid_value:
+ return nfs_invalf(fc, "NFS: Bad mount option value specified");
+out_invalid_address:
+ return nfs_invalf(fc, "NFS: Bad IP address specified");
+out_of_bounds:
+ return nfs_invalf(fc, "NFS: Value for '%s' out of range", param->key);
+}
+
+/*
+ * Split fc->source into "hostname:export_path".
+ *
+ * The leftmost colon demarks the split between the server's hostname
+ * and the export path. If the hostname starts with a left square
+ * bracket, then it may contain colons.
+ *
+ * Note: caller frees hostname and export path, even on error.
+ */
+static int nfs_parse_source(struct fs_context *fc,
+ size_t maxnamlen, size_t maxpathlen)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ const char *dev_name = fc->source;
+ size_t len;
+ const char *end;
+
+ if (unlikely(!dev_name || !*dev_name)) {
+ dfprintk(MOUNT, "NFS: device name not specified\n");
+ return -EINVAL;
+ }
+
+ /* Is the host name protected with square brakcets? */
+ if (*dev_name == '[') {
+ end = strchr(++dev_name, ']');
+ if (end == NULL || end[1] != ':')
+ goto out_bad_devname;
+
+ len = end - dev_name;
+ end++;
+ } else {
+ const char *comma;
+
+ end = strchr(dev_name, ':');
+ if (end == NULL)
+ goto out_bad_devname;
+ len = end - dev_name;
+
+ /* kill possible hostname list: not supported */
+ comma = memchr(dev_name, ',', len);
+ if (comma)
+ len = comma - dev_name;
+ }
+
+ if (len > maxnamlen)
+ goto out_hostname;
+
+ /* N.B. caller will free nfs_server.hostname in all cases */
+ ctx->nfs_server.hostname = kmemdup_nul(dev_name, len, GFP_KERNEL);
+ if (!ctx->nfs_server.hostname)
+ goto out_nomem;
+ len = strlen(++end);
+ if (len > maxpathlen)
+ goto out_path;
+ ctx->nfs_server.export_path = kmemdup_nul(end, len, GFP_KERNEL);
+ if (!ctx->nfs_server.export_path)
+ goto out_nomem;
+
+ dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", ctx->nfs_server.export_path);
+ return 0;
+
+out_bad_devname:
+ return nfs_invalf(fc, "NFS: device name not in host:path format");
+out_nomem:
+ nfs_errorf(fc, "NFS: not enough memory to parse device name");
+ return -ENOMEM;
+out_hostname:
+ nfs_errorf(fc, "NFS: server hostname too long");
+ return -ENAMETOOLONG;
+out_path:
+ nfs_errorf(fc, "NFS: export pathname too long");
+ return -ENAMETOOLONG;
+}
+
+static inline bool is_remount_fc(struct fs_context *fc)
+{
+ return fc->root != NULL;
+}
+
+/*
+ * Parse monolithic NFS2/NFS3 mount data
+ * - fills in the mount root filehandle
+ *
+ * For option strings, user space handles the following behaviors:
+ *
+ * + DNS: mapping server host name to IP address ("addr=" option)
+ *
+ * + failure mode: how to behave if a mount request can't be handled
+ * immediately ("fg/bg" option)
+ *
+ * + retry: how often to retry a mount request ("retry=" option)
+ *
+ * + breaking back: trying proto=udp after proto=tcp, v2 after v3,
+ * mountproto=tcp after mountproto=udp, and so on
+ */
+static int nfs23_parse_monolithic(struct fs_context *fc,
+ struct nfs_mount_data *data)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct nfs_fh *mntfh = ctx->mntfh;
+ struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+ int extra_flags = NFS_MOUNT_LEGACY_INTERFACE;
+
+ if (data == NULL)
+ goto out_no_data;
+
+ ctx->version = NFS_DEFAULT_VERSION;
+ switch (data->version) {
+ case 1:
+ data->namlen = 0; /* fall through */
+ case 2:
+ data->bsize = 0; /* fall through */
+ case 3:
+ if (data->flags & NFS_MOUNT_VER3)
+ goto out_no_v3;
+ data->root.size = NFS2_FHSIZE;
+ memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
+ /* Turn off security negotiation */
+ extra_flags |= NFS_MOUNT_SECFLAVOUR;
+ /* fall through */
+ case 4:
+ if (data->flags & NFS_MOUNT_SECFLAVOUR)
+ goto out_no_sec;
+ /* fall through */
+ case 5:
+ memset(data->context, 0, sizeof(data->context));
+ /* fall through */
+ case 6:
+ if (data->flags & NFS_MOUNT_VER3) {
+ if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
+ goto out_invalid_fh;
+ mntfh->size = data->root.size;
+ ctx->version = 3;
+ } else {
+ mntfh->size = NFS2_FHSIZE;
+ ctx->version = 2;
+ }
+
+
+ memcpy(mntfh->data, data->root.data, mntfh->size);
+ if (mntfh->size < sizeof(mntfh->data))
+ memset(mntfh->data + mntfh->size, 0,
+ sizeof(mntfh->data) - mntfh->size);
+
+ /*
+ * Translate to nfs_fs_context, which nfs_fill_super
+ * can deal with.
+ */
+ ctx->flags = data->flags & NFS_MOUNT_FLAGMASK;
+ ctx->flags |= extra_flags;
+ ctx->rsize = data->rsize;
+ ctx->wsize = data->wsize;
+ ctx->timeo = data->timeo;
+ ctx->retrans = data->retrans;
+ ctx->acregmin = data->acregmin;
+ ctx->acregmax = data->acregmax;
+ ctx->acdirmin = data->acdirmin;
+ ctx->acdirmax = data->acdirmax;
+ ctx->need_mount = false;
+
+ memcpy(sap, &data->addr, sizeof(data->addr));
+ ctx->nfs_server.addrlen = sizeof(data->addr);
+ ctx->nfs_server.port = ntohs(data->addr.sin_port);
+ if (sap->sa_family != AF_INET ||
+ !nfs_verify_server_address(sap))
+ goto out_no_address;
+
+ if (!(data->flags & NFS_MOUNT_TCP))
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ /* N.B. caller will free nfs_server.hostname in all cases */
+ ctx->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL);
+ if (!ctx->nfs_server.hostname)
+ goto out_nomem;
+
+ ctx->namlen = data->namlen;
+ ctx->bsize = data->bsize;
+
+ if (data->flags & NFS_MOUNT_SECFLAVOUR)
+ ctx->selected_flavor = data->pseudoflavor;
+ else
+ ctx->selected_flavor = RPC_AUTH_UNIX;
+
+ if (!(data->flags & NFS_MOUNT_NONLM))
+ ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK|
+ NFS_MOUNT_LOCAL_FCNTL);
+ else
+ ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK|
+ NFS_MOUNT_LOCAL_FCNTL);
+
+ /*
+ * The legacy version 6 binary mount data from userspace has a
+ * field used only to transport selinux information into the
+ * the kernel. To continue to support that functionality we
+ * have a touch of selinux knowledge here in the NFS code. The
+ * userspace code converted context=blah to just blah so we are
+ * converting back to the full string selinux understands.
+ */
+ if (data->context[0]){
+#ifdef CONFIG_SECURITY_SELINUX
+ int ret;
+
+ data->context[NFS_MAX_CONTEXT_LEN] = '\0';
+ ret = vfs_parse_fs_string(fc, "context",
+ data->context, strlen(data->context));
+ if (ret < 0)
+ return ret;
+#else
+ return -EINVAL;
+#endif
+ }
+
+ break;
+ default:
+ goto generic;
+ }
+
+ ctx->skip_reconfig_option_check = true;
+ return 0;
+
+generic:
+ return generic_parse_monolithic(fc, data);
+
+out_no_data:
+ if (is_remount_fc(fc)) {
+ ctx->skip_reconfig_option_check = true;
+ return 0;
+ }
+ return nfs_invalf(fc, "NFS: mount program didn't pass any mount data");
+
+out_no_v3:
+ return nfs_invalf(fc, "NFS: nfs_mount_data version does not support v3");
+
+out_no_sec:
+ return nfs_invalf(fc, "NFS: nfs_mount_data version supports only AUTH_SYS");
+
+out_nomem:
+ dfprintk(MOUNT, "NFS: not enough memory to handle mount options");
+ return -ENOMEM;
+
+out_no_address:
+ return nfs_invalf(fc, "NFS: mount program didn't pass remote address");
+
+out_invalid_fh:
+ return nfs_invalf(fc, "NFS: invalid root filehandle");
+}
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+/*
+ * Validate NFSv4 mount options
+ */
+static int nfs4_parse_monolithic(struct fs_context *fc,
+ struct nfs4_mount_data *data)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+ char *c;
+
+ if (data == NULL)
+ goto out_no_data;
+
+ ctx->version = 4;
+
+ switch (data->version) {
+ case 1:
+ if (data->host_addrlen > sizeof(ctx->nfs_server.address))
+ goto out_no_address;
+ if (data->host_addrlen == 0)
+ goto out_no_address;
+ ctx->nfs_server.addrlen = data->host_addrlen;
+ if (copy_from_user(sap, data->host_addr, data->host_addrlen))
+ return -EFAULT;
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+ ctx->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
+
+ if (data->auth_flavourlen) {
+ rpc_authflavor_t pseudoflavor;
+ if (data->auth_flavourlen > 1)
+ goto out_inval_auth;
+ if (copy_from_user(&pseudoflavor,
+ data->auth_flavours,
+ sizeof(pseudoflavor)))
+ return -EFAULT;
+ ctx->selected_flavor = pseudoflavor;
+ } else
+ ctx->selected_flavor = RPC_AUTH_UNIX;
+
+ c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ ctx->nfs_server.hostname = c;
+
+ c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ ctx->nfs_server.export_path = c;
+ dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c);
+
+ c = strndup_user(data->client_addr.data, 16);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ ctx->client_address = c;
+
+ /*
+ * Translate to nfs_fs_context, which nfs_fill_super
+ * can deal with.
+ */
+
+ ctx->flags = data->flags & NFS4_MOUNT_FLAGMASK;
+ ctx->rsize = data->rsize;
+ ctx->wsize = data->wsize;
+ ctx->timeo = data->timeo;
+ ctx->retrans = data->retrans;
+ ctx->acregmin = data->acregmin;
+ ctx->acregmax = data->acregmax;
+ ctx->acdirmin = data->acdirmin;
+ ctx->acdirmax = data->acdirmax;
+ ctx->nfs_server.protocol = data->proto;
+ nfs_validate_transport_protocol(ctx);
+ if (ctx->nfs_server.protocol == XPRT_TRANSPORT_UDP)
+ goto out_invalid_transport_udp;
+
+ break;
+ default:
+ goto generic;
+ }
+
+ ctx->skip_reconfig_option_check = true;
+ return 0;
+
+generic:
+ return generic_parse_monolithic(fc, data);
+
+out_no_data:
+ if (is_remount_fc(fc)) {
+ ctx->skip_reconfig_option_check = true;
+ return 0;
+ }
+ return nfs_invalf(fc, "NFS4: mount program didn't pass any mount data");
+
+out_inval_auth:
+ return nfs_invalf(fc, "NFS4: Invalid number of RPC auth flavours %d",
+ data->auth_flavourlen);
+
+out_no_address:
+ return nfs_invalf(fc, "NFS4: mount program didn't pass remote address");
+
+out_invalid_transport_udp:
+ return nfs_invalf(fc, "NFSv4: Unsupported transport protocol udp");
+}
+#endif
+
+/*
+ * Parse a monolithic block of data from sys_mount().
+ */
+static int nfs_fs_context_parse_monolithic(struct fs_context *fc,
+ void *data)
+{
+ if (fc->fs_type == &nfs_fs_type)
+ return nfs23_parse_monolithic(fc, data);
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+ if (fc->fs_type == &nfs4_fs_type)
+ return nfs4_parse_monolithic(fc, data);
+#endif
+
+ return nfs_invalf(fc, "NFS: Unsupported monolithic data version");
+}
+
+/*
+ * Validate the preparsed information in the config.
+ */
+static int nfs_fs_context_validate(struct fs_context *fc)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct nfs_subversion *nfs_mod;
+ struct sockaddr *sap = (struct sockaddr *)&ctx->nfs_server.address;
+ int max_namelen = PAGE_SIZE;
+ int max_pathlen = NFS_MAXPATHLEN;
+ int port = 0;
+ int ret;
+
+ if (!fc->source)
+ goto out_no_device_name;
+
+ /* Check for sanity first. */
+ if (ctx->minorversion && ctx->version != 4)
+ goto out_minorversion_mismatch;
+
+ if (ctx->options & NFS_OPTION_MIGRATION &&
+ (ctx->version != 4 || ctx->minorversion != 0))
+ goto out_migration_misuse;
+
+ /* Verify that any proto=/mountproto= options match the address
+ * families in the addr=/mountaddr= options.
+ */
+ if (ctx->protofamily != AF_UNSPEC &&
+ ctx->protofamily != ctx->nfs_server.address.sa_family)
+ goto out_proto_mismatch;
+
+ if (ctx->mountfamily != AF_UNSPEC) {
+ if (ctx->mount_server.addrlen) {
+ if (ctx->mountfamily != ctx->mount_server.address.sa_family)
+ goto out_mountproto_mismatch;
+ } else {
+ if (ctx->mountfamily != ctx->nfs_server.address.sa_family)
+ goto out_mountproto_mismatch;
+ }
+ }
+
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+
+ if (ctx->version == 4) {
+ if (IS_ENABLED(CONFIG_NFS_V4)) {
+ if (ctx->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
+ port = NFS_RDMA_PORT;
+ else
+ port = NFS_PORT;
+ max_namelen = NFS4_MAXNAMLEN;
+ max_pathlen = NFS4_MAXPATHLEN;
+ nfs_validate_transport_protocol(ctx);
+ if (ctx->nfs_server.protocol == XPRT_TRANSPORT_UDP)
+ goto out_invalid_transport_udp;
+ ctx->flags &= ~(NFS_MOUNT_NONLM | NFS_MOUNT_NOACL |
+ NFS_MOUNT_VER3 | NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ } else {
+ goto out_v4_not_compiled;
+ }
+ } else {
+ nfs_set_mount_transport_protocol(ctx);
+#ifdef CONFIG_NFS_DISABLE_UDP_SUPPORT
+ if (ctx->nfs_server.protocol == XPRT_TRANSPORT_UDP)
+ goto out_invalid_transport_udp;
+#endif
+ if (ctx->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
+ port = NFS_RDMA_PORT;
+ }
+
+ nfs_set_port(sap, &ctx->nfs_server.port, port);
+
+ ret = nfs_parse_source(fc, max_namelen, max_pathlen);
+ if (ret < 0)
+ return ret;
+
+ /* Load the NFS protocol module if we haven't done so yet */
+ if (!ctx->nfs_mod) {
+ nfs_mod = get_nfs_version(ctx->version);
+ if (IS_ERR(nfs_mod)) {
+ ret = PTR_ERR(nfs_mod);
+ goto out_version_unavailable;
+ }
+ ctx->nfs_mod = nfs_mod;
+ }
+ return 0;
+
+out_no_device_name:
+ return nfs_invalf(fc, "NFS: Device name not specified");
+out_v4_not_compiled:
+ nfs_errorf(fc, "NFS: NFSv4 is not compiled into kernel");
+ return -EPROTONOSUPPORT;
+out_invalid_transport_udp:
+ return nfs_invalf(fc, "NFSv4: Unsupported transport protocol udp");
+out_no_address:
+ return nfs_invalf(fc, "NFS: mount program didn't pass remote address");
+out_mountproto_mismatch:
+ return nfs_invalf(fc, "NFS: Mount server address does not match mountproto= option");
+out_proto_mismatch:
+ return nfs_invalf(fc, "NFS: Server address does not match proto= option");
+out_minorversion_mismatch:
+ return nfs_invalf(fc, "NFS: Mount option vers=%u does not support minorversion=%u",
+ ctx->version, ctx->minorversion);
+out_migration_misuse:
+ return nfs_invalf(fc, "NFS: 'Migration' not supported for this NFS version");
+out_version_unavailable:
+ nfs_errorf(fc, "NFS: Version unavailable");
+ return ret;
+}
+
+/*
+ * Create an NFS superblock by the appropriate method.
+ */
+static int nfs_get_tree(struct fs_context *fc)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ int err = nfs_fs_context_validate(fc);
+
+ if (err)
+ return err;
+ if (!ctx->internal)
+ return ctx->nfs_mod->rpc_ops->try_get_tree(fc);
+ else
+ return nfs_get_tree_common(fc);
+}
+
+/*
+ * Handle duplication of a configuration. The caller copied *src into *sc, but
+ * it can't deal with resource pointers in the filesystem context, so we have
+ * to do that. We need to clear pointers, copy data or get extra refs as
+ * appropriate.
+ */
+static int nfs_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
+{
+ struct nfs_fs_context *src = nfs_fc2context(src_fc), *ctx;
+
+ ctx = kmemdup(src, sizeof(struct nfs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->mntfh = nfs_alloc_fhandle();
+ if (!ctx->mntfh) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ nfs_copy_fh(ctx->mntfh, src->mntfh);
+
+ __module_get(ctx->nfs_mod->owner);
+ ctx->client_address = NULL;
+ ctx->mount_server.hostname = NULL;
+ ctx->nfs_server.export_path = NULL;
+ ctx->nfs_server.hostname = NULL;
+ ctx->fscache_uniq = NULL;
+ ctx->clone_data.fattr = NULL;
+ fc->fs_private = ctx;
+ return 0;
+}
+
+static void nfs_fs_context_free(struct fs_context *fc)
+{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+
+ if (ctx) {
+ if (ctx->server)
+ nfs_free_server(ctx->server);
+ if (ctx->nfs_mod)
+ put_nfs_version(ctx->nfs_mod);
+ kfree(ctx->client_address);
+ kfree(ctx->mount_server.hostname);
+ kfree(ctx->nfs_server.export_path);
+ kfree(ctx->nfs_server.hostname);
+ kfree(ctx->fscache_uniq);
+ nfs_free_fhandle(ctx->mntfh);
+ nfs_free_fattr(ctx->clone_data.fattr);
+ kfree(ctx);
+ }
+}
+
+static const struct fs_context_operations nfs_fs_context_ops = {
+ .free = nfs_fs_context_free,
+ .dup = nfs_fs_context_dup,
+ .parse_param = nfs_fs_context_parse_param,
+ .parse_monolithic = nfs_fs_context_parse_monolithic,
+ .get_tree = nfs_get_tree,
+ .reconfigure = nfs_reconfigure,
+};
+
+/*
+ * Prepare superblock configuration. We use the namespaces attached to the
+ * context. This may be the current process's namespaces, or it may be a
+ * container's namespaces.
+ */
+static int nfs_init_fs_context(struct fs_context *fc)
+{
+ struct nfs_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(struct nfs_fs_context), GFP_KERNEL);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+
+ ctx->mntfh = nfs_alloc_fhandle();
+ if (unlikely(!ctx->mntfh)) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
+
+ ctx->protofamily = AF_UNSPEC;
+ ctx->mountfamily = AF_UNSPEC;
+ ctx->mount_server.port = NFS_UNSPEC_PORT;
+
+ if (fc->root) {
+ /* reconfigure, start with the current config */
+ struct nfs_server *nfss = fc->root->d_sb->s_fs_info;
+ struct net *net = nfss->nfs_client->cl_net;
+
+ ctx->flags = nfss->flags;
+ ctx->rsize = nfss->rsize;
+ ctx->wsize = nfss->wsize;
+ ctx->retrans = nfss->client->cl_timeout->to_retries;
+ ctx->selected_flavor = nfss->client->cl_auth->au_flavor;
+ ctx->acregmin = nfss->acregmin / HZ;
+ ctx->acregmax = nfss->acregmax / HZ;
+ ctx->acdirmin = nfss->acdirmin / HZ;
+ ctx->acdirmax = nfss->acdirmax / HZ;
+ ctx->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ;
+ ctx->nfs_server.port = nfss->port;
+ ctx->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
+ ctx->version = nfss->nfs_client->rpc_ops->version;
+ ctx->minorversion = nfss->nfs_client->cl_minorversion;
+
+ memcpy(&ctx->nfs_server.address, &nfss->nfs_client->cl_addr,
+ ctx->nfs_server.addrlen);
+
+ if (fc->net_ns != net) {
+ put_net(fc->net_ns);
+ fc->net_ns = get_net(net);
+ }
+
+ ctx->nfs_mod = nfss->nfs_client->cl_nfs_mod;
+ __module_get(ctx->nfs_mod->owner);
+ } else {
+ /* defaults */
+ ctx->timeo = NFS_UNSPEC_TIMEO;
+ ctx->retrans = NFS_UNSPEC_RETRANS;
+ ctx->acregmin = NFS_DEF_ACREGMIN;
+ ctx->acregmax = NFS_DEF_ACREGMAX;
+ ctx->acdirmin = NFS_DEF_ACDIRMIN;
+ ctx->acdirmax = NFS_DEF_ACDIRMAX;
+ ctx->nfs_server.port = NFS_UNSPEC_PORT;
+ ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ ctx->selected_flavor = RPC_AUTH_MAXFLAVOR;
+ ctx->minorversion = 0;
+ ctx->need_mount = true;
+ }
+ fc->fs_private = ctx;
+ fc->ops = &nfs_fs_context_ops;
+ return 0;
+}
+
+struct file_system_type nfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs",
+ .init_fs_context = nfs_init_fs_context,
+ .parameters = nfs_fs_parameters,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
+};
+MODULE_ALIAS_FS("nfs");
+EXPORT_SYMBOL_GPL(nfs_fs_type);
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+struct file_system_type nfs4_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .init_fs_context = nfs_init_fs_context,
+ .parameters = nfs_fs_parameters,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
+};
+MODULE_ALIAS_FS("nfs4");
+MODULE_ALIAS("nfs4");
+EXPORT_SYMBOL_GPL(nfs4_fs_type);
+#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
index 15f271401dcc..573b1da9342c 100644
--- a/fs/nfs/fscache-index.c
+++ b/fs/nfs/fscache-index.c
@@ -84,8 +84,10 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
return FSCACHE_CHECKAUX_OBSOLETE;
memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+ auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 3800ab6f08fa..52270bfac120 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -128,7 +128,7 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
return;
key->nfs_client = nfss->nfs_client;
- key->key.super.s_flags = sb->s_flags & NFS_MS_MASK;
+ key->key.super.s_flags = sb->s_flags & NFS_SB_MASK;
key->key.nfs_server.flags = nfss->flags;
key->key.nfs_server.rsize = nfss->rsize;
key->key.nfs_server.wsize = nfss->wsize;
@@ -238,8 +238,10 @@ void nfs_fscache_init_inode(struct inode *inode)
return;
memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+ auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
@@ -263,8 +265,10 @@ void nfs_fscache_clear_inode(struct inode *inode)
dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+ auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
fscache_relinquish_cookie(cookie, &auxdata, false);
nfsi->fscache = NULL;
}
@@ -305,8 +309,10 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp)
return;
memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
- auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
+ auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
if (inode_is_open_for_write(inode)) {
dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index ad041cfbf9ec..6754c8607230 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -62,9 +62,11 @@ struct nfs_fscache_key {
* cache object.
*/
struct nfs_fscache_inode_auxdata {
- struct timespec mtime;
- struct timespec ctime;
- u64 change_attr;
+ s64 mtime_sec;
+ s64 mtime_nsec;
+ s64 ctime_sec;
+ s64 ctime_nsec;
+ u64 change_attr;
};
/*
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 878c4c5982d9..b012c2668a1f 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -64,66 +64,71 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
/*
* get an NFS2/NFS3 root dentry from the root filehandle
*/
-struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh,
- const char *devname)
+int nfs_get_root(struct super_block *s, struct fs_context *fc)
{
- struct nfs_server *server = NFS_SB(sb);
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct nfs_server *server = NFS_SB(s);
struct nfs_fsinfo fsinfo;
- struct dentry *ret;
+ struct dentry *root;
struct inode *inode;
- void *name = kstrdup(devname, GFP_KERNEL);
- int error;
+ char *name;
+ int error = -ENOMEM;
+ name = kstrdup(fc->source, GFP_KERNEL);
if (!name)
- return ERR_PTR(-ENOMEM);
+ goto out;
/* get the actual root for this mount */
fsinfo.fattr = nfs_alloc_fattr();
- if (fsinfo.fattr == NULL) {
- kfree(name);
- return ERR_PTR(-ENOMEM);
- }
+ if (fsinfo.fattr == NULL)
+ goto out_name;
- error = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
+ error = server->nfs_client->rpc_ops->getroot(server, ctx->mntfh, &fsinfo);
if (error < 0) {
dprintk("nfs_get_root: getattr error = %d\n", -error);
- ret = ERR_PTR(error);
- goto out;
+ nfs_errorf(fc, "NFS: Couldn't getattr on root");
+ goto out_fattr;
}
- inode = nfs_fhget(sb, mntfh, fsinfo.fattr, NULL);
+ inode = nfs_fhget(s, ctx->mntfh, fsinfo.fattr, NULL);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
- ret = ERR_CAST(inode);
- goto out;
+ error = PTR_ERR(inode);
+ nfs_errorf(fc, "NFS: Couldn't get root inode");
+ goto out_fattr;
}
- error = nfs_superblock_set_dummy_root(sb, inode);
- if (error != 0) {
- ret = ERR_PTR(error);
- goto out;
- }
+ error = nfs_superblock_set_dummy_root(s, inode);
+ if (error != 0)
+ goto out_fattr;
/* root dentries normally start off anonymous and get spliced in later
* if the dentry tree reaches them; however if the dentry already
* exists, we'll pick it up at this point and use it as the root
*/
- ret = d_obtain_root(inode);
- if (IS_ERR(ret)) {
+ root = d_obtain_root(inode);
+ if (IS_ERR(root)) {
dprintk("nfs_get_root: get root dentry failed\n");
- goto out;
+ error = PTR_ERR(root);
+ nfs_errorf(fc, "NFS: Couldn't get root dentry");
+ goto out_fattr;
}
- security_d_instantiate(ret, inode);
- spin_lock(&ret->d_lock);
- if (IS_ROOT(ret) && !ret->d_fsdata &&
- !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
- ret->d_fsdata = name;
+ security_d_instantiate(root, inode);
+ spin_lock(&root->d_lock);
+ if (IS_ROOT(root) && !root->d_fsdata &&
+ !(root->d_flags & DCACHE_NFSFS_RENAMED)) {
+ root->d_fsdata = name;
name = NULL;
}
- spin_unlock(&ret->d_lock);
-out:
- kfree(name);
+ spin_unlock(&root->d_lock);
+ fc->root = root;
+ error = 0;
+
+out_fattr:
nfs_free_fattr(fsinfo.fattr);
- return ret;
+out_name:
+ kfree(name);
+out:
+ return error;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b0b4b9f303fd..11bf15800ac9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1061,7 +1061,7 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct
rcu_read_lock();
list_for_each_entry_rcu(pos, &nfsi->open_files, list) {
- if (cred != NULL && pos->cred != cred)
+ if (cred != NULL && cred_fscmp(pos->cred, cred) != 0)
continue;
if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
continue;
@@ -1156,7 +1156,13 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) getattr failed, error=%d\n",
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), status);
- if (status == -ESTALE) {
+ switch (status) {
+ case -ETIMEDOUT:
+ /* A soft timeout occurred. Use cached information? */
+ if (server->flags & NFS_MOUNT_SOFTREVAL)
+ status = 0;
+ break;
+ case -ESTALE:
nfs_zap_caches(inode);
if (!S_ISDIR(inode->i_mode))
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
@@ -2108,6 +2114,7 @@ static void init_once(void *foo)
init_rwsem(&nfsi->rmdir_sem);
mutex_init(&nfsi->commit_mutex);
nfs4_init_once(nfsi);
+ nfsi->cache_change_attribute = 0;
}
static int __init nfs_init_inodecache(void)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 24a65da58aa9..f80c47d5ff27 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -4,17 +4,19 @@
*/
#include "nfs4_fs.h"
-#include <linux/mount.h>
+#include <linux/fs_context.h>
#include <linux/security.h>
#include <linux/crc32.h>
+#include <linux/sunrpc/addr.h>
#include <linux/nfs_page.h>
#include <linux/wait_bit.h>
-#define NFS_MS_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
+#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
extern const struct export_operations nfs_export_ops;
struct nfs_string;
+struct nfs_pageio_descriptor;
static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
{
@@ -31,17 +33,14 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
return 1;
}
-struct nfs_clone_mount {
- const struct super_block *sb;
- const struct dentry *dentry;
- struct nfs_fh *fh;
- struct nfs_fattr *fattr;
- char *hostname;
- char *mnt_path;
- struct sockaddr *addr;
- size_t addrlen;
- rpc_authflavor_t authflavor;
-};
+static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
+{
+ if (!(NFS_SB(dentry->d_sb)->flags & NFS_MOUNT_SOFTREVAL))
+ return false;
+ if (!d_is_positive(dentry) || !NFS_FH(d_inode(dentry))->size)
+ return false;
+ return true;
+}
/*
* Note: RFC 1813 doesn't limit the number of auth flavors that
@@ -82,12 +81,16 @@ struct nfs_client_initdata {
/*
* In-kernel mount arguments
*/
-struct nfs_parsed_mount_data {
- int flags;
+struct nfs_fs_context {
+ bool internal;
+ bool skip_reconfig_option_check;
+ bool need_mount;
+ bool sloppy;
+ unsigned int flags; /* NFS{,4}_MOUNT_* flags */
unsigned int rsize, wsize;
unsigned int timeo, retrans;
- unsigned int acregmin, acregmax,
- acdirmin, acdirmax;
+ unsigned int acregmin, acregmax;
+ unsigned int acdirmin, acdirmax;
unsigned int namlen;
unsigned int options;
unsigned int bsize;
@@ -97,10 +100,14 @@ struct nfs_parsed_mount_data {
unsigned int version;
unsigned int minorversion;
char *fscache_uniq;
- bool need_mount;
+ unsigned short protofamily;
+ unsigned short mountfamily;
struct {
- struct sockaddr_storage address;
+ union {
+ struct sockaddr address;
+ struct sockaddr_storage _address;
+ };
size_t addrlen;
char *hostname;
u32 version;
@@ -109,19 +116,41 @@ struct nfs_parsed_mount_data {
} mount_server;
struct {
- struct sockaddr_storage address;
+ union {
+ struct sockaddr address;
+ struct sockaddr_storage _address;
+ };
size_t addrlen;
char *hostname;
char *export_path;
int port;
unsigned short protocol;
unsigned short nconnect;
+ unsigned short export_path_len;
} nfs_server;
- void *lsm_opts;
- struct net *net;
+ struct nfs_fh *mntfh;
+ struct nfs_server *server;
+ struct nfs_subversion *nfs_mod;
+
+ /* Information for a cloned mount. */
+ struct nfs_clone_mount {
+ struct super_block *sb;
+ struct dentry *dentry;
+ struct nfs_fattr *fattr;
+ unsigned int inherited_bsize;
+ } clone_data;
};
+#define nfs_errorf(fc, fmt, ...) errorf(fc, fmt, ## __VA_ARGS__)
+#define nfs_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
+#define nfs_warnf(fc, fmt, ...) warnf(fc, fmt, ## __VA_ARGS__)
+
+static inline struct nfs_fs_context *nfs_fc2context(const struct fs_context *fc)
+{
+ return fc->fs_private;
+}
+
/* mount_clnt.c */
struct nfs_mount_request {
struct sockaddr *sap;
@@ -137,14 +166,6 @@ struct nfs_mount_request {
struct net *net;
};
-struct nfs_mount_info {
- void (*fill_super)(struct super_block *, struct nfs_mount_info *);
- int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
- struct nfs_parsed_mount_data *parsed;
- struct nfs_clone_mount *cloned;
- struct nfs_fh *mntfh;
-};
-
extern int nfs_mount(struct nfs_mount_request *info);
extern void nfs_umount(const struct nfs_mount_request *info);
@@ -170,13 +191,9 @@ extern struct nfs_client *nfs4_find_client_ident(struct net *, int);
extern struct nfs_client *
nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
struct nfs4_sessionid *, u32);
-extern struct nfs_server *nfs_create_server(struct nfs_mount_info *,
- struct nfs_subversion *);
-extern struct nfs_server *nfs4_create_server(
- struct nfs_mount_info *,
- struct nfs_subversion *);
-extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
- struct nfs_fh *);
+extern struct nfs_server *nfs_create_server(struct fs_context *);
+extern struct nfs_server *nfs4_create_server(struct fs_context *);
+extern struct nfs_server *nfs4_create_referral_server(struct fs_context *);
extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
struct sockaddr *sap, size_t salen,
struct net *net);
@@ -227,7 +244,9 @@ static inline void nfs_fs_proc_exit(void)
extern const struct svc_version nfs4_callback_version1;
extern const struct svc_version nfs4_callback_version4;
-struct nfs_pageio_descriptor;
+/* fs_context.c */
+extern struct file_system_type nfs_fs_type;
+
/* pagelist.c */
extern int __init nfs_init_nfspagecache(void);
extern void nfs_destroy_nfspagecache(void);
@@ -387,23 +406,10 @@ extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
/* super.c */
extern const struct super_operations nfs_sops;
-extern struct file_system_type nfs_fs_type;
-extern struct file_system_type nfs_xdev_fs_type;
-#if IS_ENABLED(CONFIG_NFS_V4)
-extern struct file_system_type nfs4_referral_fs_type;
-#endif
bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t);
-struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
- struct nfs_subversion *);
-int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
-int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
-struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
- struct nfs_mount_info *, struct nfs_subversion *);
-struct dentry *nfs_fs_mount(struct file_system_type *, int, const char *, void *);
-struct dentry * nfs_xdev_mount_common(struct file_system_type *, int,
- const char *, struct nfs_mount_info *);
+int nfs_try_get_tree(struct fs_context *);
+int nfs_get_tree_common(struct fs_context *);
void nfs_kill_super(struct super_block *);
-void nfs_fill_super(struct super_block *, struct nfs_mount_info *);
extern struct rpc_stat nfs_rpcstat;
@@ -430,18 +436,12 @@ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
extern char *nfs_path(char **p, struct dentry *dentry,
char *buffer, ssize_t buflen, unsigned flags);
extern struct vfsmount *nfs_d_automount(struct path *path);
-struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
- struct nfs_fh *, struct nfs_fattr *);
-struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
- struct nfs_fattr *, rpc_authflavor_t);
+int nfs_submount(struct fs_context *, struct nfs_server *);
+int nfs_do_submount(struct fs_context *);
/* getroot.c */
-extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
- const char *);
+extern int nfs_get_root(struct super_block *s, struct fs_context *fc);
#if IS_ENABLED(CONFIG_NFS_V4)
-extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
- const char *);
-
extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool);
#endif
@@ -460,7 +460,7 @@ int nfs_show_options(struct seq_file *, struct dentry *);
int nfs_show_devname(struct seq_file *, struct dentry *);
int nfs_show_path(struct seq_file *, struct dentry *);
int nfs_show_stats(struct seq_file *, struct dentry *);
-int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
+int nfs_reconfigure(struct fs_context *);
/* write.c */
extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
@@ -706,9 +706,9 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
}
/*
- * Convert a struct timespec into a 64-bit change attribute
+ * Convert a struct timespec64 into a 64-bit change attribute
*
- * This does approximately the same thing as timespec_to_ns(),
+ * This does approximately the same thing as timespec64_to_ns(),
* but for calculation efficiency, we multiply the seconds by
* 1024*1024*1024.
*/
@@ -777,3 +777,16 @@ static inline bool nfs_error_is_fatal_on_server(int err)
}
return nfs_error_is_fatal(err);
}
+
+/*
+ * Select between a default port value and a user-specified port value.
+ * If a zero value is set, then autobind will be used.
+ */
+static inline void nfs_set_port(struct sockaddr *sap, int *port,
+ const unsigned short default_port)
+{
+ if (*port == NFS_UNSPEC_PORT)
+ *port = default_port;
+
+ rpc_set_port(sap, *port);
+}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index cb7c10e9721e..35c8cb2d7637 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -29,9 +29,7 @@
*/
#define encode_dirpath_sz (1 + XDR_QUADLEN(MNTPATHLEN))
#define MNT_status_sz (1)
-#define MNT_fhs_status_sz (1)
#define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE)
-#define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE))
#define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS)
/*
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 5e0e9d29f5c5..ad6077404947 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -19,6 +19,7 @@
#include <linux/vfs.h>
#include <linux/sunrpc/gss_api.h>
#include "internal.h"
+#include "nfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -139,34 +140,65 @@ EXPORT_SYMBOL_GPL(nfs_path);
*/
struct vfsmount *nfs_d_automount(struct path *path)
{
- struct vfsmount *mnt;
+ struct nfs_fs_context *ctx;
+ struct fs_context *fc;
+ struct vfsmount *mnt = ERR_PTR(-ENOMEM);
struct nfs_server *server = NFS_SERVER(d_inode(path->dentry));
- struct nfs_fh *fh = NULL;
- struct nfs_fattr *fattr = NULL;
+ struct nfs_client *client = server->nfs_client;
+ int ret;
if (IS_ROOT(path->dentry))
return ERR_PTR(-ESTALE);
- mnt = ERR_PTR(-ENOMEM);
- fh = nfs_alloc_fhandle();
- fattr = nfs_alloc_fattr();
- if (fh == NULL || fattr == NULL)
- goto out;
+ /* Open a new filesystem context, transferring parameters from the
+ * parent superblock, including the network namespace.
+ */
+ fc = fs_context_for_submount(&nfs_fs_type, path->dentry);
+ if (IS_ERR(fc))
+ return ERR_CAST(fc);
- mnt = server->nfs_client->rpc_ops->submount(server, path->dentry, fh, fattr);
+ ctx = nfs_fc2context(fc);
+ ctx->clone_data.dentry = path->dentry;
+ ctx->clone_data.sb = path->dentry->d_sb;
+ ctx->clone_data.fattr = nfs_alloc_fattr();
+ if (!ctx->clone_data.fattr)
+ goto out_fc;
+
+ if (fc->net_ns != client->cl_net) {
+ put_net(fc->net_ns);
+ fc->net_ns = get_net(client->cl_net);
+ }
+
+ /* for submounts we want the same server; referrals will reassign */
+ memcpy(&ctx->nfs_server.address, &client->cl_addr, client->cl_addrlen);
+ ctx->nfs_server.addrlen = client->cl_addrlen;
+ ctx->nfs_server.port = server->port;
+
+ ctx->version = client->rpc_ops->version;
+ ctx->minorversion = client->cl_minorversion;
+ ctx->nfs_mod = client->cl_nfs_mod;
+ __module_get(ctx->nfs_mod->owner);
+
+ ret = client->rpc_ops->submount(fc, server);
+ if (ret < 0) {
+ mnt = ERR_PTR(ret);
+ goto out_fc;
+ }
+
+ up_write(&fc->root->d_sb->s_umount);
+ mnt = vfs_create_mount(fc);
if (IS_ERR(mnt))
- goto out;
+ goto out_fc;
if (nfs_mountpoint_expiry_timeout < 0)
- goto out;
+ goto out_fc;
mntget(mnt); /* prevent immediate expiration */
mnt_set_expiry(mnt, &nfs_automount_list);
schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
-out:
- nfs_free_fattr(fattr);
- nfs_free_fhandle(fh);
+out_fc:
+ put_fs_context(fc);
return mnt;
}
@@ -213,16 +245,6 @@ void nfs_release_automount_timer(void)
cancel_delayed_work(&nfs_automount_task);
}
-/*
- * Clone a mountpoint of the appropriate type
- */
-static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
- const char *devname,
- struct nfs_clone_mount *mountdata)
-{
- return vfs_submount(mountdata->dentry, &nfs_xdev_fs_type, devname, mountdata);
-}
-
/**
* nfs_do_submount - set up mountpoint when crossing a filesystem boundary
* @dentry: parent directory
@@ -231,46 +253,62 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
* @authflavor: security flavor to use when performing the mount
*
*/
-struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
- struct nfs_fattr *fattr, rpc_authflavor_t authflavor)
+int nfs_do_submount(struct fs_context *fc)
{
- struct nfs_clone_mount mountdata = {
- .sb = dentry->d_sb,
- .dentry = dentry,
- .fh = fh,
- .fattr = fattr,
- .authflavor = authflavor,
- };
- struct vfsmount *mnt;
- char *page = (char *) __get_free_page(GFP_USER);
- char *devname;
-
- if (page == NULL)
- return ERR_PTR(-ENOMEM);
-
- devname = nfs_devname(dentry, page, PAGE_SIZE);
- if (IS_ERR(devname))
- mnt = ERR_CAST(devname);
- else
- mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
-
- free_page((unsigned long)page);
- return mnt;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct dentry *dentry = ctx->clone_data.dentry;
+ struct nfs_server *server;
+ char *buffer, *p;
+ int ret;
+
+ /* create a new volume representation */
+ server = ctx->nfs_mod->rpc_ops->clone_server(NFS_SB(ctx->clone_data.sb),
+ ctx->mntfh,
+ ctx->clone_data.fattr,
+ ctx->selected_flavor);
+
+ if (IS_ERR(server))
+ return PTR_ERR(server);
+
+ ctx->server = server;
+
+ buffer = kmalloc(4096, GFP_USER);
+ if (!buffer)
+ return -ENOMEM;
+
+ ctx->internal = true;
+ ctx->clone_data.inherited_bsize = ctx->clone_data.sb->s_blocksize_bits;
+
+ p = nfs_devname(dentry, buffer, 4096);
+ if (IS_ERR(p)) {
+ nfs_errorf(fc, "NFS: Couldn't determine submount pathname");
+ ret = PTR_ERR(p);
+ } else {
+ ret = vfs_parse_fs_string(fc, "source", p, buffer + 4096 - p);
+ if (!ret)
+ ret = vfs_get_tree(fc);
+ }
+ kfree(buffer);
+ return ret;
}
EXPORT_SYMBOL_GPL(nfs_do_submount);
-struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
- struct nfs_fh *fh, struct nfs_fattr *fattr)
+int nfs_submount(struct fs_context *fc, struct nfs_server *server)
{
- int err;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct dentry *dentry = ctx->clone_data.dentry;
struct dentry *parent = dget_parent(dentry);
+ int err;
/* Look it up again to get its attributes */
- err = server->nfs_client->rpc_ops->lookup(d_inode(parent), &dentry->d_name, fh, fattr, NULL);
+ err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry,
+ ctx->mntfh, ctx->clone_data.fattr,
+ NULL);
dput(parent);
if (err != 0)
- return ERR_PTR(err);
+ return err;
- return nfs_do_submount(dentry, fh, fattr, server->client->cl_auth->au_flavor);
+ ctx->selected_flavor = server->client->cl_auth->au_flavor;
+ return nfs_do_submount(fc);
}
EXPORT_SYMBOL_GPL(nfs_submount);
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index d94c7abdf25a..f6676af37d5d 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -360,17 +360,17 @@ static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr,
else
*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
- if (attr->ia_valid & ATTR_ATIME_SET) {
+ if (attr->ia_valid & ATTR_ATIME_SET)
p = xdr_encode_time(p, &attr->ia_atime);
- } else if (attr->ia_valid & ATTR_ATIME) {
+ else if (attr->ia_valid & ATTR_ATIME)
p = xdr_encode_current_server_time(p, &attr->ia_atime);
- } else
+ else
p = xdr_time_not_set(p);
- if (attr->ia_valid & ATTR_MTIME_SET) {
+ if (attr->ia_valid & ATTR_MTIME_SET)
xdr_encode_time(p, &attr->ia_mtime);
- } else if (attr->ia_valid & ATTR_MTIME) {
+ else if (attr->ia_valid & ATTR_MTIME)
xdr_encode_current_server_time(p, &attr->ia_mtime);
- } else
+ else
xdr_time_not_set(p);
}
diff --git a/fs/nfs/nfs3_fs.h b/fs/nfs/nfs3_fs.h
index f82e11c4cb56..1b950b66b3bb 100644
--- a/fs/nfs/nfs3_fs.h
+++ b/fs/nfs/nfs3_fs.h
@@ -27,7 +27,7 @@ static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
#endif /* CONFIG_NFS_V3_ACL */
/* nfs3client.c */
-struct nfs_server *nfs3_create_server(struct nfs_mount_info *, struct nfs_subversion *);
+struct nfs_server *nfs3_create_server(struct fs_context *);
struct nfs_server *nfs3_clone_server(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 223904bc40a7..5601e47360c2 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -46,10 +46,10 @@ static inline void nfs_init_server_aclclient(struct nfs_server *server)
}
#endif
-struct nfs_server *nfs3_create_server(struct nfs_mount_info *mount_info,
- struct nfs_subversion *nfs_mod)
+struct nfs_server *nfs3_create_server(struct fs_context *fc)
{
- struct nfs_server *server = nfs_create_server(mount_info, nfs_mod);
+ struct nfs_server *server = nfs_create_server(fc);
+
/* Create a client RPC handle for the NFS v3 ACL management interface */
if (!IS_ERR(server))
nfs_init_server_aclclient(server);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 9eb2f1a503ab..a46d1d5d16d8 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -110,10 +110,15 @@ nfs3_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = fattr,
};
int status;
+ unsigned short task_flags = 0;
+
+ /* Is this is an attribute revalidation, subject to softreval? */
+ if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
+ task_flags |= RPC_TASK_TIMEOUT;
dprintk("NFS call getattr\n");
nfs_fattr_init(fattr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = rpc_call_sync(server->client, &msg, task_flags);
dprintk("NFS reply getattr: %d\n", status);
return status;
}
@@ -140,23 +145,23 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
if (status == 0) {
+ nfs_setattr_update_inode(inode, sattr, fattr);
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
nfs_zap_acl_cache(inode);
- nfs_setattr_update_inode(inode, sattr, fattr);
}
dprintk("NFS reply setattr: %d\n", status);
return status;
}
static int
-nfs3_proc_lookup(struct inode *dir, const struct qstr *name,
+nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
struct nfs3_diropargs arg = {
.fh = NFS_FH(dir),
- .name = name->name,
- .len = name->len
+ .name = dentry->d_name.name,
+ .len = dentry->d_name.len
};
struct nfs3_diropres res = {
.fh = fhandle,
@@ -168,20 +173,25 @@ nfs3_proc_lookup(struct inode *dir, const struct qstr *name,
.rpc_resp = &res,
};
int status;
+ unsigned short task_flags = 0;
- dprintk("NFS call lookup %s\n", name->name);
+ /* Is this is an attribute revalidation, subject to softreval? */
+ if (nfs_lookup_is_soft_revalidate(dentry))
+ task_flags |= RPC_TASK_TIMEOUT;
+
+ dprintk("NFS call lookup %pd2\n", dentry);
res.dir_attr = nfs_alloc_fattr();
if (res.dir_attr == NULL)
return -ENOMEM;
nfs_fattr_init(fattr);
- status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
nfs_refresh_inode(dir, res.dir_attr);
if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR)) {
msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
msg.rpc_argp = fhandle;
msg.rpc_resp = fattr;
- status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
}
nfs_free_fattr(res.dir_attr);
dprintk("NFS reply lookup: %d\n", status);
@@ -990,7 +1000,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.nlmclnt_ops = &nlmclnt_fl_close_lock_ops,
.getroot = nfs3_proc_get_root,
.submount = nfs_submount,
- .try_mount = nfs_try_mount,
+ .try_get_tree = nfs_try_get_tree,
.getattr = nfs3_proc_getattr,
.setattr = nfs3_proc_setattr,
.lookup = nfs3_proc_lookup,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 927eb680f161..69971f6c840d 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -2334,6 +2334,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
void *data)
{
struct nfs_commitres *result = data;
+ struct nfs_writeverf *verf = result->verf;
enum nfs_stat status;
int error;
@@ -2346,7 +2347,9 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
result->op_status = status;
if (status != NFS3_OK)
goto out_status;
- error = decode_writeverf3(xdr, &result->verf->verifier);
+ error = decode_writeverf3(xdr, &verf->verifier);
+ if (!error)
+ verf->committed = NFS_FILE_SYNC;
out:
return error;
out_status:
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 1fe83e0f663e..e2ae54b35dfe 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -61,8 +61,11 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
lock, FMODE_WRITE);
- if (status)
+ if (status) {
+ if (status == -EAGAIN)
+ status = -NFS4ERR_BAD_STATEID;
return status;
+ }
res.falloc_fattr = nfs_alloc_fattr();
if (!res.falloc_fattr)
@@ -287,8 +290,11 @@ static ssize_t _nfs42_proc_copy(struct file *src,
} else {
status = nfs4_set_rw_stateid(&args->src_stateid,
src_lock->open_context, src_lock, FMODE_READ);
- if (status)
+ if (status) {
+ if (status == -EAGAIN)
+ status = -NFS4ERR_BAD_STATEID;
return status;
+ }
}
status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
pos_src, pos_src + (loff_t)count - 1);
@@ -297,8 +303,11 @@ static ssize_t _nfs42_proc_copy(struct file *src,
status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
dst_lock, FMODE_WRITE);
- if (status)
+ if (status) {
+ if (status == -EAGAIN)
+ status = -NFS4ERR_BAD_STATEID;
return status;
+ }
status = nfs_sync_inode(dst_inode);
if (status)
@@ -334,14 +343,14 @@ static ssize_t _nfs42_proc_copy(struct file *src,
status = handle_async_copy(res, dst_server, src_server, src,
dst, &args->src_stateid, restart);
if (status)
- return status;
+ goto out;
}
if ((!res->synchronous || !args->sync) &&
res->write_res.verifier.committed != NFS_FILE_SYNC) {
status = process_copy_commit(dst, pos_dst, res);
if (status)
- return status;
+ goto out;
}
truncate_pagecache_range(dst_inode, pos_dst,
@@ -546,8 +555,11 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
FMODE_READ);
nfs_put_lock_context(l_ctx);
- if (status)
+ if (status) {
+ if (status == -EAGAIN)
+ status = -NFS4ERR_BAD_STATEID;
return status;
+ }
status = nfs4_call_sync(src_server->client, src_server, &msg,
&args->cna_seq_args, &res->cnr_seq_res, 0);
@@ -618,8 +630,11 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
lock, FMODE_READ);
- if (status)
+ if (status) {
+ if (status == -EAGAIN)
+ status = -NFS4ERR_BAD_STATEID;
return status;
+ }
status = nfs_filemap_write_and_wait_range(inode->i_mapping,
offset, LLONG_MAX);
@@ -994,13 +1009,18 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
src_lock, FMODE_READ);
- if (status)
+ if (status) {
+ if (status == -EAGAIN)
+ status = -NFS4ERR_BAD_STATEID;
return status;
-
+ }
status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
dst_lock, FMODE_WRITE);
- if (status)
+ if (status) {
+ if (status == -EAGAIN)
+ status = -NFS4ERR_BAD_STATEID;
return status;
+ }
res.dst_fattr = nfs_alloc_fattr();
if (!res.dst_fattr)
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index a7a73b1d1fec..8be1ba7c62bb 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -268,14 +268,13 @@ extern const struct dentry_operations nfs4_dentry_operations;
int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
unsigned, umode_t);
-/* super.c */
+/* fs_context.c */
extern struct file_system_type nfs4_fs_type;
/* nfs4namespace.c */
struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *,
const struct qstr *);
-struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
- struct nfs_fh *, struct nfs_fattr *);
+int nfs4_submount(struct fs_context *, struct nfs_server *);
int nfs4_replace_transport(struct nfs_server *server,
const struct nfs4_fs_locations *locations);
@@ -303,8 +302,10 @@ extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struc
extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *,
struct page *page, const struct cred *);
extern int nfs4_proc_fsid_present(struct inode *, const struct cred *);
-extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, const struct qstr *,
- struct nfs_fh *, struct nfs_fattr *);
+extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *,
+ struct dentry *,
+ struct nfs_fh *,
+ struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
extern const struct xattr_handler *nfs4_xattr_handlers[];
extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
@@ -446,9 +447,7 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
extern void nfs4_kill_renewd(struct nfs_client *);
extern void nfs4_renew_state(struct work_struct *);
-extern void nfs4_set_lease_period(struct nfs_client *clp,
- unsigned long lease,
- unsigned long lastrenewed);
+extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
/* nfs4state.c */
@@ -526,7 +525,6 @@ extern const nfs4_stateid invalid_stateid;
/* nfs4super.c */
struct nfs_mount_info;
extern struct nfs_subversion nfs_v4;
-struct dentry *nfs4_try_mount(int, const char *, struct nfs_mount_info *, struct nfs_subversion *);
extern bool nfs4_disable_idmapping;
extern unsigned short max_session_slots;
extern unsigned short max_session_cb_slots;
@@ -536,6 +534,9 @@ extern bool recover_lost_locks;
#define NFS4_CLIENT_ID_UNIQ_LEN (64)
extern char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN];
+extern int nfs4_try_get_tree(struct fs_context *);
+extern int nfs4_get_referral_tree(struct fs_context *);
+
/* nfs4sysctl.c */
#ifdef CONFIG_SYSCTL
int nfs4_register_sysctl(void);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 460d6251c405..0cd767e5c977 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1055,66 +1055,64 @@ out:
/*
* Create a version 4 volume record
*/
-static int nfs4_init_server(struct nfs_server *server,
- struct nfs_parsed_mount_data *data)
+static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct rpc_timeout timeparms;
int error;
- nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
- data->timeo, data->retrans);
+ nfs_init_timeout_values(&timeparms, ctx->nfs_server.protocol,
+ ctx->timeo, ctx->retrans);
/* Initialise the client representation from the mount data */
- server->flags = data->flags;
- server->options = data->options;
- server->auth_info = data->auth_info;
+ server->flags = ctx->flags;
+ server->options = ctx->options;
+ server->auth_info = ctx->auth_info;
/* Use the first specified auth flavor. If this flavor isn't
* allowed by the server, use the SECINFO path to try the
* other specified flavors */
- if (data->auth_info.flavor_len >= 1)
- data->selected_flavor = data->auth_info.flavors[0];
+ if (ctx->auth_info.flavor_len >= 1)
+ ctx->selected_flavor = ctx->auth_info.flavors[0];
else
- data->selected_flavor = RPC_AUTH_UNIX;
+ ctx->selected_flavor = RPC_AUTH_UNIX;
/* Get a client record */
error = nfs4_set_client(server,
- data->nfs_server.hostname,
- (const struct sockaddr *)&data->nfs_server.address,
- data->nfs_server.addrlen,
- data->client_address,
- data->nfs_server.protocol,
- &timeparms,
- data->minorversion,
- data->nfs_server.nconnect,
- data->net);
+ ctx->nfs_server.hostname,
+ &ctx->nfs_server.address,
+ ctx->nfs_server.addrlen,
+ ctx->client_address,
+ ctx->nfs_server.protocol,
+ &timeparms,
+ ctx->minorversion,
+ ctx->nfs_server.nconnect,
+ fc->net_ns);
if (error < 0)
return error;
- if (data->rsize)
- server->rsize = nfs_block_size(data->rsize, NULL);
- if (data->wsize)
- server->wsize = nfs_block_size(data->wsize, NULL);
+ if (ctx->rsize)
+ server->rsize = nfs_block_size(ctx->rsize, NULL);
+ if (ctx->wsize)
+ server->wsize = nfs_block_size(ctx->wsize, NULL);
- server->acregmin = data->acregmin * HZ;
- server->acregmax = data->acregmax * HZ;
- server->acdirmin = data->acdirmin * HZ;
- server->acdirmax = data->acdirmax * HZ;
- server->port = data->nfs_server.port;
+ server->acregmin = ctx->acregmin * HZ;
+ server->acregmax = ctx->acregmax * HZ;
+ server->acdirmin = ctx->acdirmin * HZ;
+ server->acdirmax = ctx->acdirmax * HZ;
+ server->port = ctx->nfs_server.port;
return nfs_init_server_rpcclient(server, &timeparms,
- data->selected_flavor);
+ ctx->selected_flavor);
}
/*
* Create a version 4 volume record
* - keyed on server and FSID
*/
-/*struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
- struct nfs_fh *mntfh)*/
-struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info,
- struct nfs_subversion *nfs_mod)
+struct nfs_server *nfs4_create_server(struct fs_context *fc)
{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct nfs_server *server;
bool auth_probe;
int error;
@@ -1125,14 +1123,14 @@ struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info,
server->cred = get_cred(current_cred());
- auth_probe = mount_info->parsed->auth_info.flavor_len < 1;
+ auth_probe = ctx->auth_info.flavor_len < 1;
/* set up the general RPC client */
- error = nfs4_init_server(server, mount_info->parsed);
+ error = nfs4_init_server(server, fc);
if (error < 0)
goto error;
- error = nfs4_server_common_setup(server, mount_info->mntfh, auth_probe);
+ error = nfs4_server_common_setup(server, ctx->mntfh, auth_probe);
if (error < 0)
goto error;
@@ -1146,9 +1144,9 @@ error:
/*
* Create an NFS4 referral server record
*/
-struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
- struct nfs_fh *mntfh)
+struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct nfs_client *parent_client;
struct nfs_server *server, *parent_server;
bool auth_probe;
@@ -1158,7 +1156,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (!server)
return ERR_PTR(-ENOMEM);
- parent_server = NFS_SB(data->sb);
+ parent_server = NFS_SB(ctx->clone_data.sb);
parent_client = parent_server->nfs_client;
server->cred = get_cred(parent_server->cred);
@@ -1168,10 +1166,11 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
/* Get a client representation */
#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
- rpc_set_port(data->addr, NFS_RDMA_PORT);
- error = nfs4_set_client(server, data->hostname,
- data->addr,
- data->addrlen,
+ rpc_set_port(&ctx->nfs_server.address, NFS_RDMA_PORT);
+ error = nfs4_set_client(server,
+ ctx->nfs_server.hostname,
+ &ctx->nfs_server.address,
+ ctx->nfs_server.addrlen,
parent_client->cl_ipaddr,
XPRT_TRANSPORT_RDMA,
parent_server->client->cl_timeout,
@@ -1182,10 +1181,11 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
goto init_server;
#endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */
- rpc_set_port(data->addr, NFS_PORT);
- error = nfs4_set_client(server, data->hostname,
- data->addr,
- data->addrlen,
+ rpc_set_port(&ctx->nfs_server.address, NFS_PORT);
+ error = nfs4_set_client(server,
+ ctx->nfs_server.hostname,
+ &ctx->nfs_server.address,
+ ctx->nfs_server.addrlen,
parent_client->cl_ipaddr,
XPRT_TRANSPORT_TCP,
parent_server->client->cl_timeout,
@@ -1198,13 +1198,14 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
init_server:
#endif
- error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
+ error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout,
+ ctx->selected_flavor);
if (error < 0)
goto error;
auth_probe = parent_server->auth_info.flavor_len < 1;
- error = nfs4_server_common_setup(server, mntfh, auth_probe);
+ error = nfs4_server_common_setup(server, ctx->mntfh, auth_probe);
if (error < 0)
goto error;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 620de905cba9..1297919e0fce 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/falloc.h>
+#include <linux/mount.h>
#include <linux/nfs_fs.h>
#include "delegation.h"
#include "internal.h"
@@ -86,7 +87,6 @@ nfs4_file_open(struct inode *inode, struct file *filp)
if (inode != d_inode(dentry))
goto out_drop;
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
nfs_file_set_open_context(filp, ctx);
nfs_fscache_open_file(inode, filp);
err = 0;
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 2e460c33ae48..84026e7b8a5f 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -8,6 +8,7 @@
* NFSv4 namespace
*/
+#include <linux/module.h>
#include <linux/dcache.h>
#include <linux/mount.h>
#include <linux/namei.h>
@@ -21,37 +22,64 @@
#include <linux/inet.h>
#include "internal.h"
#include "nfs4_fs.h"
+#include "nfs.h"
#include "dns_resolve.h"
#define NFSDBG_FACILITY NFSDBG_VFS
/*
+ * Work out the length that an NFSv4 path would render to as a standard posix
+ * path, with a leading slash but no terminating slash.
+ */
+static ssize_t nfs4_pathname_len(const struct nfs4_pathname *pathname)
+{
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < pathname->ncomponents; i++) {
+ const struct nfs4_string *component = &pathname->components[i];
+
+ if (component->len > NAME_MAX)
+ goto too_long;
+ len += 1 + component->len; /* Adding "/foo" */
+ if (len > PATH_MAX)
+ goto too_long;
+ }
+ return len;
+
+too_long:
+ return -ENAMETOOLONG;
+}
+
+/*
* Convert the NFSv4 pathname components into a standard posix path.
- *
- * Note that the resulting string will be placed at the end of the buffer
*/
-static inline char *nfs4_pathname_string(const struct nfs4_pathname *pathname,
- char *buffer, ssize_t buflen)
+static char *nfs4_pathname_string(const struct nfs4_pathname *pathname,
+ unsigned short *_len)
{
- char *end = buffer + buflen;
- int n;
+ ssize_t len;
+ char *buf, *p;
+ int i;
+
+ len = nfs4_pathname_len(pathname);
+ if (len < 0)
+ return ERR_PTR(len);
+ *_len = len;
+
+ p = buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < pathname->ncomponents; i++) {
+ const struct nfs4_string *component = &pathname->components[i];
- *--end = '\0';
- buflen--;
-
- n = pathname->ncomponents;
- while (--n >= 0) {
- const struct nfs4_string *component = &pathname->components[n];
- buflen -= component->len + 1;
- if (buflen < 0)
- goto Elong;
- end -= component->len;
- memcpy(end, component->data, component->len);
- *--end = '/';
+ *p++ = '/';
+ memcpy(p, component->data, component->len);
+ p += component->len;
}
- return end;
-Elong:
- return ERR_PTR(-ENAMETOOLONG);
+
+ *p = 0;
+ return buf;
}
/*
@@ -100,21 +128,36 @@ static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
*/
static int nfs4_validate_fspath(struct dentry *dentry,
const struct nfs4_fs_locations *locations,
- char *page, char *page2)
+ struct nfs_fs_context *ctx)
{
- const char *path, *fs_path;
+ const char *path;
+ char *fs_path;
+ unsigned short len;
+ char *buf;
+ int n;
- path = nfs4_path(dentry, page, PAGE_SIZE);
- if (IS_ERR(path))
+ buf = kmalloc(4096, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ path = nfs4_path(dentry, buf, 4096);
+ if (IS_ERR(path)) {
+ kfree(buf);
return PTR_ERR(path);
+ }
- fs_path = nfs4_pathname_string(&locations->fs_path, page2, PAGE_SIZE);
- if (IS_ERR(fs_path))
+ fs_path = nfs4_pathname_string(&locations->fs_path, &len);
+ if (IS_ERR(fs_path)) {
+ kfree(buf);
return PTR_ERR(fs_path);
+ }
- if (strncmp(path, fs_path, strlen(fs_path)) != 0) {
+ n = strncmp(path, fs_path, len);
+ kfree(buf);
+ kfree(fs_path);
+ if (n != 0) {
dprintk("%s: path %s does not begin with fsroot %s\n",
- __func__, path, fs_path);
+ __func__, path, ctx->nfs_server.export_path);
return -ENOENT;
}
@@ -236,55 +279,77 @@ out:
return new;
}
-static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
- char *page, char *page2,
- const struct nfs4_fs_location *location)
+static int try_location(struct fs_context *fc,
+ const struct nfs4_fs_location *location)
{
- const size_t addr_bufsize = sizeof(struct sockaddr_storage);
- struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client);
- struct vfsmount *mnt = ERR_PTR(-ENOENT);
- char *mnt_path;
- unsigned int maxbuflen;
- unsigned int s;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ unsigned int len, s;
+ char *export_path, *source, *p;
+ int ret = -ENOENT;
+
+ /* Allocate a buffer big enough to hold any of the hostnames plus a
+ * terminating char and also a buffer big enough to hold the hostname
+ * plus a colon plus the path.
+ */
+ len = 0;
+ for (s = 0; s < location->nservers; s++) {
+ const struct nfs4_string *buf = &location->servers[s];
+ if (buf->len > len)
+ len = buf->len;
+ }
- mnt_path = nfs4_pathname_string(&location->rootpath, page2, PAGE_SIZE);
- if (IS_ERR(mnt_path))
- return ERR_CAST(mnt_path);
- mountdata->mnt_path = mnt_path;
- maxbuflen = mnt_path - 1 - page2;
+ kfree(ctx->nfs_server.hostname);
+ ctx->nfs_server.hostname = kmalloc(len + 1, GFP_KERNEL);
+ if (!ctx->nfs_server.hostname)
+ return -ENOMEM;
- mountdata->addr = kmalloc(addr_bufsize, GFP_KERNEL);
- if (mountdata->addr == NULL)
- return ERR_PTR(-ENOMEM);
+ export_path = nfs4_pathname_string(&location->rootpath,
+ &ctx->nfs_server.export_path_len);
+ if (IS_ERR(export_path))
+ return PTR_ERR(export_path);
+
+ ctx->nfs_server.export_path = export_path;
+
+ source = kmalloc(len + 1 + ctx->nfs_server.export_path_len + 1,
+ GFP_KERNEL);
+ if (!source)
+ return -ENOMEM;
+ kfree(fc->source);
+ fc->source = source;
for (s = 0; s < location->nservers; s++) {
const struct nfs4_string *buf = &location->servers[s];
- if (buf->len <= 0 || buf->len >= maxbuflen)
- continue;
-
if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len))
continue;
- mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
- mountdata->addr, addr_bufsize, net);
- if (mountdata->addrlen == 0)
+ ctx->nfs_server.addrlen =
+ nfs_parse_server_name(buf->data, buf->len,
+ &ctx->nfs_server.address,
+ sizeof(ctx->nfs_server._address),
+ fc->net_ns);
+ if (ctx->nfs_server.addrlen == 0)
continue;
- memcpy(page2, buf->data, buf->len);
- page2[buf->len] = '\0';
- mountdata->hostname = page2;
+ rpc_set_port(&ctx->nfs_server.address, NFS_PORT);
- snprintf(page, PAGE_SIZE, "%s:%s",
- mountdata->hostname,
- mountdata->mnt_path);
+ memcpy(ctx->nfs_server.hostname, buf->data, buf->len);
+ ctx->nfs_server.hostname[buf->len] = '\0';
- mnt = vfs_submount(mountdata->dentry, &nfs4_referral_fs_type, page, mountdata);
- if (!IS_ERR(mnt))
- break;
+ p = source;
+ memcpy(p, buf->data, buf->len);
+ p += buf->len;
+ *p++ = ':';
+ memcpy(p, ctx->nfs_server.export_path, ctx->nfs_server.export_path_len);
+ p += ctx->nfs_server.export_path_len;
+ *p = 0;
+
+ ret = nfs4_get_referral_tree(fc);
+ if (ret == 0)
+ return 0;
}
- kfree(mountdata->addr);
- return mnt;
+
+ return ret;
}
/**
@@ -293,38 +358,23 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
* @locations: array of NFSv4 server location information
*
*/
-static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
- const struct nfs4_fs_locations *locations)
+static int nfs_follow_referral(struct fs_context *fc,
+ const struct nfs4_fs_locations *locations)
{
- struct vfsmount *mnt = ERR_PTR(-ENOENT);
- struct nfs_clone_mount mountdata = {
- .sb = dentry->d_sb,
- .dentry = dentry,
- .authflavor = NFS_SB(dentry->d_sb)->client->cl_auth->au_flavor,
- };
- char *page = NULL, *page2 = NULL;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
int loc, error;
if (locations == NULL || locations->nlocations <= 0)
- goto out;
-
- dprintk("%s: referral at %pd2\n", __func__, dentry);
-
- page = (char *) __get_free_page(GFP_USER);
- if (!page)
- goto out;
+ return -ENOENT;
- page2 = (char *) __get_free_page(GFP_USER);
- if (!page2)
- goto out;
+ dprintk("%s: referral at %pd2\n", __func__, ctx->clone_data.dentry);
/* Ensure fs path is a prefix of current dentry path */
- error = nfs4_validate_fspath(dentry, locations, page, page2);
- if (error < 0) {
- mnt = ERR_PTR(error);
- goto out;
- }
+ error = nfs4_validate_fspath(ctx->clone_data.dentry, locations, ctx);
+ if (error < 0)
+ return error;
+ error = -ENOENT;
for (loc = 0; loc < locations->nlocations; loc++) {
const struct nfs4_fs_location *location = &locations->locations[loc];
@@ -332,15 +382,12 @@ static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
location->rootpath.ncomponents == 0)
continue;
- mnt = try_location(&mountdata, page, page2, location);
- if (!IS_ERR(mnt))
- break;
+ error = try_location(fc, location);
+ if (error == 0)
+ return 0;
}
-out:
- free_page((unsigned long) page);
- free_page((unsigned long) page2);
- return mnt;
+ return error;
}
/*
@@ -348,71 +395,72 @@ out:
* @dentry - dentry of referral
*
*/
-static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
+static int nfs_do_refmount(struct fs_context *fc, struct rpc_clnt *client)
{
- struct vfsmount *mnt = ERR_PTR(-ENOMEM);
- struct dentry *parent;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct dentry *dentry, *parent;
struct nfs4_fs_locations *fs_locations = NULL;
struct page *page;
- int err;
+ int err = -ENOMEM;
/* BUG_ON(IS_ROOT(dentry)); */
page = alloc_page(GFP_KERNEL);
- if (page == NULL)
- return mnt;
+ if (!page)
+ return -ENOMEM;
fs_locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
- if (fs_locations == NULL)
+ if (!fs_locations)
goto out_free;
/* Get locations */
- mnt = ERR_PTR(-ENOENT);
-
+ dentry = ctx->clone_data.dentry;
parent = dget_parent(dentry);
dprintk("%s: getting locations for %pd2\n",
__func__, dentry);
err = nfs4_proc_fs_locations(client, d_inode(parent), &dentry->d_name, fs_locations, page);
dput(parent);
- if (err != 0 ||
- fs_locations->nlocations <= 0 ||
+ if (err != 0)
+ goto out_free_2;
+
+ err = -ENOENT;
+ if (fs_locations->nlocations <= 0 ||
fs_locations->fs_path.ncomponents <= 0)
- goto out_free;
+ goto out_free_2;
- mnt = nfs_follow_referral(dentry, fs_locations);
+ err = nfs_follow_referral(fc, fs_locations);
+out_free_2:
+ kfree(fs_locations);
out_free:
__free_page(page);
- kfree(fs_locations);
- return mnt;
+ return err;
}
-struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
- struct nfs_fh *fh, struct nfs_fattr *fattr)
+int nfs4_submount(struct fs_context *fc, struct nfs_server *server)
{
- rpc_authflavor_t flavor = server->client->cl_auth->au_flavor;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct dentry *dentry = ctx->clone_data.dentry;
struct dentry *parent = dget_parent(dentry);
struct inode *dir = d_inode(parent);
- const struct qstr *name = &dentry->d_name;
struct rpc_clnt *client;
- struct vfsmount *mnt;
+ int ret;
/* Look it up again to get its attributes and sec flavor */
- client = nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
+ client = nfs4_proc_lookup_mountpoint(dir, dentry, ctx->mntfh,
+ ctx->clone_data.fattr);
dput(parent);
if (IS_ERR(client))
- return ERR_CAST(client);
+ return PTR_ERR(client);
- if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
- mnt = nfs_do_refmount(client, dentry);
- goto out;
+ ctx->selected_flavor = client->cl_auth->au_flavor;
+ if (ctx->clone_data.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
+ ret = nfs_do_refmount(fc, client);
+ } else {
+ ret = nfs_do_submount(fc);
}
- if (client->cl_auth->au_flavor != flavor)
- flavor = client->cl_auth->au_flavor;
- mnt = nfs_do_submount(dentry, fh, fattr, flavor);
-out:
rpc_shutdown_client(client);
- return mnt;
+ return ret;
}
/*
@@ -453,7 +501,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
rpc_set_port(sap, NFS_PORT);
error = -ENOMEM;
- hostname = kstrndup(buf->data, buf->len, GFP_KERNEL);
+ hostname = kmemdup_nul(buf->data, buf->len, GFP_KERNEL);
if (hostname == NULL)
break;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 76d37161409a..69b7ab7a5815 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1097,11 +1097,12 @@ static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
return ret;
}
-static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
- struct nfs_server *server,
- struct rpc_message *msg,
- struct nfs4_sequence_args *args,
- struct nfs4_sequence_res *res)
+static int nfs4_do_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ unsigned short task_flags)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_call_sync_data data = {
@@ -1113,12 +1114,23 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
.rpc_client = clnt,
.rpc_message = msg,
.callback_ops = clp->cl_mvops->call_sync_ops,
- .callback_data = &data
+ .callback_data = &data,
+ .flags = task_flags,
};
return nfs4_call_sync_custom(&task_setup);
}
+static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res)
+{
+ return nfs4_do_call_sync(clnt, server, msg, args, res, 0);
+}
+
+
int nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
@@ -2962,10 +2974,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
struct dentry *dentry;
struct nfs4_state *state;
fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
+ struct inode *dir = d_inode(opendata->dir);
+ unsigned long dir_verifier;
unsigned int seq;
int ret;
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
+ dir_verifier = nfs_save_change_attribute(dir);
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
@@ -2993,8 +3008,19 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
dput(ctx->dentry);
ctx->dentry = dentry = alias;
}
- nfs_set_verifier(dentry,
- nfs_save_change_attribute(d_inode(opendata->dir)));
+ }
+
+ switch(opendata->o_arg.claim) {
+ default:
+ break;
+ case NFS4_OPEN_CLAIM_NULL:
+ case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+ case NFS4_OPEN_CLAIM_DELEGATE_PREV:
+ if (!opendata->rpc_done)
+ break;
+ if (opendata->o_res.delegation_type != 0)
+ dir_verifier = nfs_save_change_attribute(dir);
+ nfs_set_verifier(dentry, dir_verifier);
}
/* Parse layoutget results before we check for access */
@@ -3187,6 +3213,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
exception.retry = 1;
continue;
}
+ if (status == -NFS4ERR_EXPIRED) {
+ nfs4_schedule_lease_recovery(server->nfs_client);
+ exception.retry = 1;
+ continue;
+ }
if (status == -EAGAIN) {
/* We must have found a delegation */
exception.retry = 1;
@@ -3239,6 +3270,8 @@ static int _nfs4_do_setattr(struct inode *inode,
nfs_put_lock_context(l_ctx);
if (status == -EIO)
return -EBADF;
+ else if (status == -EAGAIN)
+ goto zero_stateid;
} else {
zero_stateid:
nfs4_stateid_copy(&arg->stateid, &zero_stateid);
@@ -4064,11 +4097,18 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_argp = &args,
.rpc_resp = &res,
};
+ unsigned short task_flags = 0;
+
+ /* Is this is an attribute revalidation, subject to softreval? */
+ if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
+ task_flags |= RPC_TASK_TIMEOUT;
nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
nfs_fattr_init(fattr);
- return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
+ return nfs4_do_call_sync(server->client, server, &msg,
+ &args.seq_args, &res.seq_res, task_flags);
}
int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -4156,7 +4196,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
- const struct qstr *name, struct nfs_fh *fhandle,
+ struct dentry *dentry, struct nfs_fh *fhandle,
struct nfs_fattr *fattr, struct nfs4_label *label)
{
struct nfs_server *server = NFS_SERVER(dir);
@@ -4164,7 +4204,7 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
.dir_fh = NFS_FH(dir),
- .name = name,
+ .name = &dentry->d_name,
};
struct nfs4_lookup_res res = {
.server = server,
@@ -4177,13 +4217,20 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
.rpc_argp = &args,
.rpc_resp = &res,
};
+ unsigned short task_flags = 0;
+
+ /* Is this is an attribute revalidation, subject to softreval? */
+ if (nfs_lookup_is_soft_revalidate(dentry))
+ task_flags |= RPC_TASK_TIMEOUT;
args.bitmask = nfs4_bitmask(server, label);
nfs_fattr_init(fattr);
- dprintk("NFS call lookup %s\n", name->name);
- status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
+ dprintk("NFS call lookup %pd2\n", dentry);
+ nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
+ status = nfs4_do_call_sync(clnt, server, &msg,
+ &args.seq_args, &res.seq_res, task_flags);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
@@ -4197,16 +4244,17 @@ static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
}
static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
- const struct qstr *name, struct nfs_fh *fhandle,
+ struct dentry *dentry, struct nfs_fh *fhandle,
struct nfs_fattr *fattr, struct nfs4_label *label)
{
struct nfs4_exception exception = {
.interruptible = true,
};
struct rpc_clnt *client = *clnt;
+ const struct qstr *name = &dentry->d_name;
int err;
do {
- err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
+ err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr, label);
trace_nfs4_lookup(dir, name, err);
switch (err) {
case -NFS4ERR_BADNAME:
@@ -4241,14 +4289,14 @@ out:
return err;
}
-static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
+static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
int status;
struct rpc_clnt *client = NFS_CLIENT(dir);
- status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
+ status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, label);
if (client != NFS_CLIENT(dir)) {
rpc_shutdown_client(client);
nfs_fixup_secinfo_attributes(fattr);
@@ -4257,13 +4305,13 @@ static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
}
struct rpc_clnt *
-nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
+nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct rpc_clnt *client = NFS_CLIENT(dir);
int status;
- status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
+ status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, NULL);
if (status < 0)
return ERR_PTR(status);
return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
@@ -5019,16 +5067,13 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
struct nfs4_exception exception = {
.interruptible = true,
};
- unsigned long now = jiffies;
int err;
do {
err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
if (err == 0) {
- nfs4_set_lease_period(server->nfs_client,
- fsinfo->lease_time * HZ,
- now);
+ nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
break;
}
err = nfs4_handle_exception(server, err, &exception);
@@ -5291,7 +5336,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
hdr->timestamp = jiffies;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
- nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0);
+ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
}
@@ -5582,10 +5627,9 @@ out:
*/
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
- struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
+ struct page **pages;
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
- .acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
@@ -5596,11 +5640,19 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
.rpc_argp = &args,
.rpc_resp = &res,
};
- unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
+ unsigned int npages;
int ret = -ENOMEM, i;
+ struct nfs_server *server = NFS_SERVER(inode);
- if (npages > ARRAY_SIZE(pages))
- return -ERANGE;
+ if (buflen == 0)
+ buflen = server->rsize;
+
+ npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
+ pages = kmalloc_array(npages, sizeof(struct page *), GFP_NOFS);
+ if (!pages)
+ return -ENOMEM;
+
+ args.acl_pages = pages;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
@@ -5646,6 +5698,7 @@ out_free:
__free_page(pages[i]);
if (res.acl_scratch)
__free_page(res.acl_scratch);
+ kfree(pages);
return ret;
}
@@ -6084,6 +6137,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
.callback_data = &setclientid,
.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
};
+ unsigned long now = jiffies;
int status;
/* nfs_client_id4 */
@@ -6116,6 +6170,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}
+
+ if (status == 0)
+ do_renew_lease(clp, now);
out:
trace_nfs4_setclientid(clp, status);
dprintk("NFS reply setclientid: %d\n", status);
@@ -6859,7 +6916,7 @@ static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_
case -NFS4ERR_STALE_STATEID:
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
nfs4_schedule_lease_recovery(server->nfs_client);
- };
+ }
}
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
@@ -8203,6 +8260,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
struct rpc_task *task;
struct nfs41_exchange_id_args *argp;
struct nfs41_exchange_id_res *resp;
+ unsigned long now = jiffies;
int status;
task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
@@ -8223,6 +8281,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
if (status != 0)
goto out;
+ do_renew_lease(clp, now);
+
clp->cl_clientid = resp->clientid;
clp->cl_exchange_flags = resp->flags;
clp->cl_seqid = resp->seqid;
@@ -8626,7 +8686,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
case -EACCES:
case -EAGAIN:
goto out;
- };
+ }
clp->cl_seqid++;
if (!status) {
@@ -10001,7 +10061,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.file_ops = &nfs4_file_operations,
.getroot = nfs4_proc_get_root,
.submount = nfs4_submount,
- .try_mount = nfs4_try_mount,
+ .try_get_tree = nfs4_try_get_tree,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookup = nfs4_proc_lookup,
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 6ea431b067dd..ff876dda7f06 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -138,15 +138,12 @@ nfs4_kill_renewd(struct nfs_client *clp)
*
* @clp: pointer to nfs_client
* @lease: new value for lease period
- * @lastrenewed: time at which lease was last renewed
*/
void nfs4_set_lease_period(struct nfs_client *clp,
- unsigned long lease,
- unsigned long lastrenewed)
+ unsigned long lease)
{
spin_lock(&clp->cl_lock);
clp->cl_lease_time = lease;
- clp->cl_last_renewal = lastrenewed;
spin_unlock(&clp->cl_lock);
/* Cap maximum reconnect timeout at 1/2 lease period */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 34552329233d..f7723d221945 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -92,17 +92,15 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp)
{
int status;
struct nfs_fsinfo fsinfo;
- unsigned long now;
if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
nfs4_schedule_state_renewal(clp);
return 0;
}
- now = jiffies;
status = nfs4_proc_get_lease_time(clp, &fsinfo);
if (status == 0) {
- nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
+ nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
nfs4_schedule_state_renewal(clp);
}
@@ -766,6 +764,7 @@ void nfs4_put_open_state(struct nfs4_state *state)
list_del(&state->open_states);
spin_unlock(&inode->i_lock);
spin_unlock(&owner->so_lock);
+ nfs4_inode_return_delegation_on_close(inode);
iput(inode);
nfs4_free_open_state(state);
nfs4_put_state_owner(owner);
@@ -1135,7 +1134,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
case -NFS4ERR_MOVED:
/* Non-seqid mutating errors */
return;
- };
+ }
/*
* Note: no locking needed as we are guaranteed to be first
* on the sequence list
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 2c9cbade561a..1475f932d7da 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -4,6 +4,7 @@
*/
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/nfs4_mount.h>
#include <linux/nfs_fs.h>
#include "delegation.h"
@@ -18,36 +19,6 @@
static int nfs4_write_inode(struct inode *inode, struct writeback_control *wbc);
static void nfs4_evict_inode(struct inode *inode);
-static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
-static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
-static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
-
-static struct file_system_type nfs4_remote_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs4_remote_mount,
- .kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
-
-static struct file_system_type nfs4_remote_referral_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs4_remote_referral_mount,
- .kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
-
-struct file_system_type nfs4_referral_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs4_referral_mount,
- .kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
static const struct super_operations nfs4_sops = {
.alloc_inode = nfs_alloc_inode,
@@ -61,16 +32,15 @@ static const struct super_operations nfs4_sops = {
.show_devname = nfs_show_devname,
.show_path = nfs_show_path,
.show_stats = nfs_show_stats,
- .remount_fs = nfs_remount,
};
struct nfs_subversion nfs_v4 = {
- .owner = THIS_MODULE,
- .nfs_fs = &nfs4_fs_type,
- .rpc_vers = &nfs_version4,
- .rpc_ops = &nfs_v4_clientops,
- .sops = &nfs4_sops,
- .xattr = nfs4_xattr_handlers,
+ .owner = THIS_MODULE,
+ .nfs_fs = &nfs4_fs_type,
+ .rpc_vers = &nfs_version4,
+ .rpc_ops = &nfs_v4_clientops,
+ .sops = &nfs4_sops,
+ .xattr = nfs4_xattr_handlers,
};
static int nfs4_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -101,53 +71,6 @@ static void nfs4_evict_inode(struct inode *inode)
nfs_clear_inode(inode);
}
-/*
- * Get the superblock for the NFS4 root partition
- */
-static struct dentry *
-nfs4_remote_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *info)
-{
- struct nfs_mount_info *mount_info = info;
- struct nfs_server *server;
- struct dentry *mntroot = ERR_PTR(-ENOMEM);
-
- mount_info->set_security = nfs_set_sb_security;
-
- /* Get a volume representation */
- server = nfs4_create_server(mount_info, &nfs_v4);
- if (IS_ERR(server)) {
- mntroot = ERR_CAST(server);
- goto out;
- }
-
- mntroot = nfs_fs_mount_common(server, flags, dev_name, mount_info, &nfs_v4);
-
-out:
- return mntroot;
-}
-
-static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
- int flags, void *data, const char *hostname)
-{
- struct vfsmount *root_mnt;
- char *root_devname;
- size_t len;
-
- len = strlen(hostname) + 5;
- root_devname = kmalloc(len, GFP_KERNEL);
- if (root_devname == NULL)
- return ERR_PTR(-ENOMEM);
- /* Does hostname needs to be enclosed in brackets? */
- if (strchr(hostname, ':'))
- snprintf(root_devname, len, "[%s]:/", hostname);
- else
- snprintf(root_devname, len, "%s:/", hostname);
- root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
- kfree(root_devname);
- return root_mnt;
-}
-
struct nfs_referral_count {
struct list_head list;
const struct task_struct *task;
@@ -214,111 +137,125 @@ static void nfs_referral_loop_unprotect(void)
kfree(p);
}
-static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
- const char *export_path)
+static int do_nfs4_mount(struct nfs_server *server,
+ struct fs_context *fc,
+ const char *hostname,
+ const char *export_path)
{
+ struct nfs_fs_context *root_ctx;
+ struct fs_context *root_fc;
+ struct vfsmount *root_mnt;
struct dentry *dentry;
- int err;
+ size_t len;
+ int ret;
- if (IS_ERR(root_mnt))
- return ERR_CAST(root_mnt);
+ struct fs_parameter param = {
+ .key = "source",
+ .type = fs_value_is_string,
+ .dirfd = -1,
+ };
- err = nfs_referral_loop_protect();
- if (err) {
- mntput(root_mnt);
- return ERR_PTR(err);
+ if (IS_ERR(server))
+ return PTR_ERR(server);
+
+ root_fc = vfs_dup_fs_context(fc);
+ if (IS_ERR(root_fc)) {
+ nfs_free_server(server);
+ return PTR_ERR(root_fc);
}
+ kfree(root_fc->source);
+ root_fc->source = NULL;
- dentry = mount_subtree(root_mnt, export_path);
- nfs_referral_loop_unprotect();
+ root_ctx = nfs_fc2context(root_fc);
+ root_ctx->internal = true;
+ root_ctx->server = server;
+ /* We leave export_path unset as it's not used to find the root. */
- return dentry;
-}
+ len = strlen(hostname) + 5;
+ param.string = kmalloc(len, GFP_KERNEL);
+ if (param.string == NULL) {
+ put_fs_context(root_fc);
+ return -ENOMEM;
+ }
-struct dentry *nfs4_try_mount(int flags, const char *dev_name,
- struct nfs_mount_info *mount_info,
- struct nfs_subversion *nfs_mod)
-{
- char *export_path;
- struct vfsmount *root_mnt;
- struct dentry *res;
- struct nfs_parsed_mount_data *data = mount_info->parsed;
+ /* Does hostname needs to be enclosed in brackets? */
+ if (strchr(hostname, ':'))
+ param.size = snprintf(param.string, len, "[%s]:/", hostname);
+ else
+ param.size = snprintf(param.string, len, "%s:/", hostname);
+ ret = vfs_parse_fs_param(root_fc, &param);
+ kfree(param.string);
+ if (ret < 0) {
+ put_fs_context(root_fc);
+ return ret;
+ }
+ root_mnt = fc_mount(root_fc);
+ put_fs_context(root_fc);
+
+ if (IS_ERR(root_mnt))
+ return PTR_ERR(root_mnt);
- dfprintk(MOUNT, "--> nfs4_try_mount()\n");
+ ret = nfs_referral_loop_protect();
+ if (ret) {
+ mntput(root_mnt);
+ return ret;
+ }
- export_path = data->nfs_server.export_path;
- data->nfs_server.export_path = "/";
- root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
- data->nfs_server.hostname);
- data->nfs_server.export_path = export_path;
+ dentry = mount_subtree(root_mnt, export_path);
+ nfs_referral_loop_unprotect();
- res = nfs_follow_remote_path(root_mnt, export_path);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
- dfprintk(MOUNT, "<-- nfs4_try_mount() = %d%s\n",
- PTR_ERR_OR_ZERO(res),
- IS_ERR(res) ? " [error]" : "");
- return res;
+ fc->root = dentry;
+ return 0;
}
-static struct dentry *
-nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
+int nfs4_try_get_tree(struct fs_context *fc)
{
- struct nfs_mount_info mount_info = {
- .fill_super = nfs_fill_super,
- .set_security = nfs_clone_sb_security,
- .cloned = raw_data,
- };
- struct nfs_server *server;
- struct dentry *mntroot = ERR_PTR(-ENOMEM);
-
- dprintk("--> nfs4_referral_get_sb()\n");
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ int err;
- mount_info.mntfh = nfs_alloc_fhandle();
- if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
- goto out;
+ dfprintk(MOUNT, "--> nfs4_try_get_tree()\n");
- /* create a new volume representation */
- server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
- if (IS_ERR(server)) {
- mntroot = ERR_CAST(server);
- goto out;
+ /* We create a mount for the server's root, walk to the requested
+ * location and then create another mount for that.
+ */
+ err= do_nfs4_mount(nfs4_create_server(fc),
+ fc, ctx->nfs_server.hostname,
+ ctx->nfs_server.export_path);
+ if (err) {
+ nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+ dfprintk(MOUNT, "<-- nfs4_try_get_tree() = %d [error]\n", err);
+ } else {
+ dfprintk(MOUNT, "<-- nfs4_try_get_tree() = 0\n");
}
-
- mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, &nfs_v4);
-out:
- nfs_free_fhandle(mount_info.mntfh);
- return mntroot;
+ return err;
}
/*
* Create an NFS4 server record on referral traversal
*/
-static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
+int nfs4_get_referral_tree(struct fs_context *fc)
{
- struct nfs_clone_mount *data = raw_data;
- char *export_path;
- struct vfsmount *root_mnt;
- struct dentry *res;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ int err;
dprintk("--> nfs4_referral_mount()\n");
- export_path = data->mnt_path;
- data->mnt_path = "/";
-
- root_mnt = nfs_do_root_mount(&nfs4_remote_referral_fs_type,
- flags, data, data->hostname);
- data->mnt_path = export_path;
-
- res = nfs_follow_remote_path(root_mnt, export_path);
- dprintk("<-- nfs4_referral_mount() = %d%s\n",
- PTR_ERR_OR_ZERO(res),
- IS_ERR(res) ? " [error]" : "");
- return res;
+ /* create a new volume representation */
+ err = do_nfs4_mount(nfs4_create_referral_server(fc),
+ fc, ctx->nfs_server.hostname,
+ ctx->nfs_server.export_path);
+ if (err) {
+ nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+ dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = %d [error]\n", err);
+ } else {
+ dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = 0\n");
+ }
+ return err;
}
-
static int __init init_nfs_v4(void)
{
int err;
diff --git a/fs/nfs/nfs4trace.c b/fs/nfs/nfs4trace.c
index 1a8f376b3f73..d9ac556bebcf 100644
--- a/fs/nfs/nfs4trace.c
+++ b/fs/nfs/nfs4trace.c
@@ -24,4 +24,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_pagelist);
EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_pagelist);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_read_error);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_write_error);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_commit_error);
#endif
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index e60b6fbd5ada..1e97e5e04cb4 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -155,6 +155,9 @@ TRACE_DEFINE_ENUM(NFS4ERR_WRONG_CRED);
TRACE_DEFINE_ENUM(NFS4ERR_WRONG_TYPE);
TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_MDS);
+TRACE_DEFINE_ENUM(NFS4ERR_RESET_TO_PNFS);
+
#define show_nfsv4_errors(error) \
__print_symbolic(error, \
{ NFS4_OK, "OK" }, \
@@ -305,7 +308,10 @@ TRACE_DEFINE_ENUM(NFS4ERR_XDEV);
{ NFS4ERR_WRONGSEC, "WRONGSEC" }, \
{ NFS4ERR_WRONG_CRED, "WRONG_CRED" }, \
{ NFS4ERR_WRONG_TYPE, "WRONG_TYPE" }, \
- { NFS4ERR_XDEV, "XDEV" })
+ { NFS4ERR_XDEV, "XDEV" }, \
+ /* ***** Internal to Linux NFS client ***** */ \
+ { NFS4ERR_RESET_TO_MDS, "RESET_TO_MDS" }, \
+ { NFS4ERR_RESET_TO_PNFS, "RESET_TO_PNFS" })
#define show_open_flags(flags) \
__print_flags(flags, "|", \
@@ -352,7 +358,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
),
TP_fast_assign(
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__assign_str(dstaddr, clp->cl_hostname);
),
@@ -432,7 +438,8 @@ TRACE_EVENT(nfs4_sequence_done,
__entry->target_highest_slotid =
res->sr_target_highest_slotid;
__entry->status_flags = res->sr_status_flags;
- __entry->error = res->sr_status;
+ __entry->error = res->sr_status < 0 ?
+ -res->sr_status : 0;
),
TP_printk(
"error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
@@ -640,7 +647,7 @@ TRACE_EVENT(nfs4_state_mgr_failed,
),
TP_fast_assign(
- __entry->error = status;
+ __entry->error = status < 0 ? -status : 0;
__entry->state = clp->cl_state;
__assign_str(hostname, clp->cl_hostname);
__assign_str(section, section);
@@ -659,7 +666,7 @@ TRACE_EVENT(nfs4_xdr_status,
TP_PROTO(
const struct xdr_stream *xdr,
u32 op,
- int error
+ u32 error
),
TP_ARGS(xdr, op, error),
@@ -691,6 +698,41 @@ TRACE_EVENT(nfs4_xdr_status,
)
);
+DECLARE_EVENT_CLASS(nfs4_cb_error_class,
+ TP_PROTO(
+ __be32 xid,
+ u32 cb_ident
+ ),
+
+ TP_ARGS(xid, cb_ident),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, cbident)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(xid);
+ __entry->cbident = cb_ident;
+ ),
+
+ TP_printk(
+ "xid=0x%08x cb_ident=0x%08x",
+ __entry->xid, __entry->cbident
+ )
+);
+
+#define DEFINE_CB_ERROR_EVENT(name) \
+ DEFINE_EVENT(nfs4_cb_error_class, nfs_cb_##name, \
+ TP_PROTO( \
+ __be32 xid, \
+ u32 cb_ident \
+ ), \
+ TP_ARGS(xid, cb_ident))
+
+DEFINE_CB_ERROR_EVENT(no_clp);
+DEFINE_CB_ERROR_EVENT(badprinc);
+
DECLARE_EVENT_CLASS(nfs4_open_event,
TP_PROTO(
const struct nfs_open_context *ctx,
@@ -849,7 +891,7 @@ TRACE_EVENT(nfs4_close,
__entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
__entry->fmode = (__force unsigned int)state->state;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(args->stateid.seqid);
__entry->stateid_hash =
@@ -914,7 +956,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
TP_fast_assign(
const struct inode *inode = state->inode;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->cmd = cmd;
__entry->type = request->fl_type;
__entry->start = request->fl_start;
@@ -986,7 +1028,7 @@ TRACE_EVENT(nfs4_set_lock,
TP_fast_assign(
const struct inode *inode = state->inode;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->cmd = cmd;
__entry->type = request->fl_type;
__entry->start = request->fl_start;
@@ -1164,7 +1206,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
TP_fast_assign(
__entry->dev = res->server->s_dev;
__entry->fhandle = nfs_fhandle_hash(args->fhandle);
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(args->stateid->seqid);
__entry->stateid_hash =
@@ -1204,7 +1246,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
TP_fast_assign(
const struct inode *inode = state->inode;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
@@ -1306,7 +1348,7 @@ TRACE_EVENT(nfs4_lookupp,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = NFS_FILEID(inode);
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
),
TP_printk(
@@ -1342,7 +1384,7 @@ TRACE_EVENT(nfs4_rename,
__entry->dev = olddir->i_sb->s_dev;
__entry->olddir = NFS_FILEID(olddir);
__entry->newdir = NFS_FILEID(newdir);
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__assign_str(oldname, oldname->name);
__assign_str(newname, newname->name);
),
@@ -1433,7 +1475,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(stateid->seqid);
__entry->stateid_hash =
@@ -1489,7 +1531,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
__entry->valid = fattr->valid;
__entry->fhandle = nfs_fhandle_hash(fhandle);
__entry->fileid = (fattr->valid & NFS_ATTR_FATTR_FILEID) ? fattr->fileid : 0;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
),
TP_printk(
@@ -1536,7 +1578,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
),
TP_fast_assign(
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->fhandle = nfs_fhandle_hash(fhandle);
if (!IS_ERR_OR_NULL(inode)) {
__entry->fileid = NFS_FILEID(inode);
@@ -1593,7 +1635,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
),
TP_fast_assign(
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->fhandle = nfs_fhandle_hash(fhandle);
if (!IS_ERR_OR_NULL(inode)) {
__entry->fileid = NFS_FILEID(inode);
@@ -1694,7 +1736,8 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
__field(u32, fhandle)
__field(u64, fileid)
__field(loff_t, offset)
- __field(size_t, count)
+ __field(u32, arg_count)
+ __field(u32, res_count)
__field(unsigned long, error)
__field(int, stateid_seq)
__field(u32, stateid_hash)
@@ -1702,13 +1745,18 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
TP_fast_assign(
const struct inode *inode = hdr->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
const struct nfs4_state *state =
hdr->args.context->state;
+
__entry->dev = inode->i_sb->s_dev;
- __entry->fileid = NFS_FILEID(inode);
- __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
__entry->offset = hdr->args.offset;
- __entry->count = hdr->args.count;
+ __entry->arg_count = hdr->args.count;
+ __entry->res_count = hdr->res.count;
__entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(state->stateid.seqid);
@@ -1718,14 +1766,14 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
TP_printk(
"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%zu stateid=%d:0x%08x",
+ "offset=%lld count=%u res=%u stateid=%d:0x%08x",
-__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset,
- __entry->count,
+ __entry->arg_count, __entry->res_count,
__entry->stateid_seq, __entry->stateid_hash
)
);
@@ -1754,7 +1802,8 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
__field(u32, fhandle)
__field(u64, fileid)
__field(loff_t, offset)
- __field(size_t, count)
+ __field(u32, arg_count)
+ __field(u32, res_count)
__field(unsigned long, error)
__field(int, stateid_seq)
__field(u32, stateid_hash)
@@ -1762,13 +1811,18 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
TP_fast_assign(
const struct inode *inode = hdr->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
const struct nfs4_state *state =
hdr->args.context->state;
+
__entry->dev = inode->i_sb->s_dev;
- __entry->fileid = NFS_FILEID(inode);
- __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
__entry->offset = hdr->args.offset;
- __entry->count = hdr->args.count;
+ __entry->arg_count = hdr->args.count;
+ __entry->res_count = hdr->res.count;
__entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(state->stateid.seqid);
@@ -1778,14 +1832,14 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
TP_printk(
"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%zu stateid=%d:0x%08x",
+ "offset=%lld count=%u res=%u stateid=%d:0x%08x",
-__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset,
- __entry->count,
+ __entry->arg_count, __entry->res_count,
__entry->stateid_seq, __entry->stateid_hash
)
);
@@ -1814,24 +1868,28 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
- __field(loff_t, offset)
- __field(size_t, count)
__field(unsigned long, error)
+ __field(loff_t, offset)
+ __field(u32, count)
),
TP_fast_assign(
const struct inode *inode = data->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = data->args.fh ?
+ data->args.fh : &nfsi->fh;
+
__entry->dev = inode->i_sb->s_dev;
- __entry->fileid = NFS_FILEID(inode);
- __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
__entry->offset = data->args.offset;
__entry->count = data->args.count;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
),
TP_printk(
"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%zu",
+ "offset=%lld count=%u",
-__entry->error,
show_nfsv4_errors(__entry->error),
MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -1896,7 +1954,7 @@ TRACE_EVENT(nfs4_layoutget,
__entry->iomode = args->iomode;
__entry->offset = args->offset;
__entry->count = args->length;
- __entry->error = error;
+ __entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(state->stateid.seqid);
__entry->stateid_hash =
@@ -2094,6 +2152,115 @@ DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_write_done);
DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_read_pagelist);
DEFINE_PNFS_LAYOUT_EVENT(pnfs_mds_fallback_write_pagelist);
+DECLARE_EVENT_CLASS(nfs4_flexfiles_io_event,
+ TP_PROTO(
+ const struct nfs_pgio_header *hdr
+ ),
+
+ TP_ARGS(hdr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, error)
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, count)
+ __field(int, stateid_seq)
+ __field(u32, stateid_hash)
+ __string(dstaddr, hdr->ds_clp ?
+ rpc_peeraddr2str(hdr->ds_clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR) : "unknown")
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = hdr->inode;
+
+ __entry->error = hdr->res.op_status;
+ __entry->fhandle = nfs_fhandle_hash(hdr->args.fh);
+ __entry->fileid = NFS_FILEID(inode);
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->offset = hdr->args.offset;
+ __entry->count = hdr->args.count;
+ __entry->stateid_seq =
+ be32_to_cpu(hdr->args.stateid.seqid);
+ __entry->stateid_hash =
+ nfs_stateid_hash(&hdr->args.stateid);
+ __assign_str(dstaddr, hdr->ds_clp ?
+ rpc_peeraddr2str(hdr->ds_clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR) : "unknown");
+ ),
+
+ TP_printk(
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%llu count=%u stateid=%d:0x%08x dstaddr=%s",
+ -__entry->error,
+ show_nfsv4_errors(__entry->error),
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle,
+ __entry->offset, __entry->count,
+ __entry->stateid_seq, __entry->stateid_hash,
+ __get_str(dstaddr)
+ )
+);
+
+#define DEFINE_NFS4_FLEXFILES_IO_EVENT(name) \
+ DEFINE_EVENT(nfs4_flexfiles_io_event, name, \
+ TP_PROTO( \
+ const struct nfs_pgio_header *hdr \
+ ), \
+ TP_ARGS(hdr))
+DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_read_error);
+DEFINE_NFS4_FLEXFILES_IO_EVENT(ff_layout_write_error);
+
+TRACE_EVENT(ff_layout_commit_error,
+ TP_PROTO(
+ const struct nfs_commit_data *data
+ ),
+
+ TP_ARGS(data),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, error)
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, count)
+ __string(dstaddr, data->ds_clp ?
+ rpc_peeraddr2str(data->ds_clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR) : "unknown")
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = data->inode;
+
+ __entry->error = data->res.op_status;
+ __entry->fhandle = nfs_fhandle_hash(data->args.fh);
+ __entry->fileid = NFS_FILEID(inode);
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->offset = data->args.offset;
+ __entry->count = data->args.count;
+ __assign_str(dstaddr, data->ds_clp ?
+ rpc_peeraddr2str(data->ds_clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR) : "unknown");
+ ),
+
+ TP_printk(
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%llu count=%u dstaddr=%s",
+ -__entry->error,
+ show_nfsv4_errors(__entry->error),
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle,
+ __entry->offset, __entry->count,
+ __get_str(dstaddr)
+ )
+);
+
+
#endif /* CONFIG_NFS_V4_1 */
#endif /* _TRACE_NFS4_H */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 936c57779ff4..47817ef0aadb 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1061,7 +1061,7 @@ static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *ve
static __be32 *
xdr_encode_nfstime4(__be32 *p, const struct timespec64 *t)
{
- p = xdr_encode_hyper(p, (__s64)t->tv_sec);
+ p = xdr_encode_hyper(p, t->tv_sec);
*p++ = cpu_to_be32(t->tv_nsec);
return p;
}
@@ -4097,7 +4097,7 @@ static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, str
status = NFS_ATTR_FATTR_ATIME;
bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS;
}
- dprintk("%s: atime=%ld\n", __func__, (long)time->tv_sec);
+ dprintk("%s: atime=%lld\n", __func__, time->tv_sec);
return status;
}
@@ -4115,7 +4115,7 @@ static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, s
status = NFS_ATTR_FATTR_CTIME;
bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA;
}
- dprintk("%s: ctime=%ld\n", __func__, (long)time->tv_sec);
+ dprintk("%s: ctime=%lld\n", __func__, time->tv_sec);
return status;
}
@@ -4132,8 +4132,8 @@ static int decode_attr_time_delta(struct xdr_stream *xdr, uint32_t *bitmap,
status = decode_attr_time(xdr, time);
bitmap[1] &= ~FATTR4_WORD1_TIME_DELTA;
}
- dprintk("%s: time_delta=%ld %ld\n", __func__, (long)time->tv_sec,
- (long)time->tv_nsec);
+ dprintk("%s: time_delta=%lld %ld\n", __func__, time->tv_sec,
+ time->tv_nsec);
return status;
}
@@ -4197,7 +4197,7 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str
status = NFS_ATTR_FATTR_MTIME;
bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY;
}
- dprintk("%s: mtime=%ld\n", __func__, (long)time->tv_sec);
+ dprintk("%s: mtime=%lld\n", __func__, time->tv_sec);
return status;
}
@@ -4313,11 +4313,14 @@ static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifi
static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
{
+ struct nfs_writeverf *verf = res->verf;
int status;
status = decode_op_hdr(xdr, OP_COMMIT);
if (!status)
- status = decode_write_verifier(xdr, &res->verf->verifier);
+ status = decode_write_verifier(xdr, &verf->verifier);
+ if (!status)
+ verf->committed = NFS_FILE_SYNC;
return status;
}
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 2a82dcce5fc1..a9588d19a5ae 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -198,7 +198,66 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit);
DEFINE_NFS_INODE_EVENT(nfs_fsync_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit);
DEFINE_NFS_INODE_EVENT(nfs_access_enter);
-DEFINE_NFS_INODE_EVENT_DONE(nfs_access_exit);
+
+TRACE_EVENT(nfs_access_exit,
+ TP_PROTO(
+ const struct inode *inode,
+ unsigned int mask,
+ unsigned int permitted,
+ int error
+ ),
+
+ TP_ARGS(inode, mask, permitted, error),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, error)
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(unsigned char, type)
+ __field(u64, fileid)
+ __field(u64, version)
+ __field(loff_t, size)
+ __field(unsigned long, nfsi_flags)
+ __field(unsigned long, cache_validity)
+ __field(unsigned int, mask)
+ __field(unsigned int, permitted)
+ ),
+
+ TP_fast_assign(
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ __entry->error = error < 0 ? -error : 0;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->type = nfs_umode_to_dtype(inode->i_mode);
+ __entry->version = inode_peek_iversion_raw(inode);
+ __entry->size = i_size_read(inode);
+ __entry->nfsi_flags = nfsi->flags;
+ __entry->cache_validity = nfsi->cache_validity;
+ __entry->mask = mask;
+ __entry->permitted = permitted;
+ ),
+
+ TP_printk(
+ "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "type=%u (%s) version=%llu size=%lld "
+ "cache_validity=0x%lx (%s) nfs_flags=0x%lx (%s) "
+ "mask=0x%x permitted=0x%x",
+ -__entry->error, nfs_show_status(__entry->error),
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle,
+ __entry->type,
+ nfs_show_file_type(__entry->type),
+ (unsigned long long)__entry->version,
+ (long long)__entry->size,
+ __entry->cache_validity,
+ nfs_show_cache_validity(__entry->cache_validity),
+ __entry->nfsi_flags,
+ nfs_show_nfsi_flags(__entry->nfsi_flags),
+ __entry->mask, __entry->permitted
+ )
+);
TRACE_DEFINE_ENUM(LOOKUP_FOLLOW);
TRACE_DEFINE_ENUM(LOOKUP_DIRECTORY);
@@ -818,75 +877,85 @@ TRACE_EVENT(nfs_sillyrename_unlink,
TRACE_EVENT(nfs_initiate_read,
TP_PROTO(
- const struct inode *inode,
- loff_t offset, unsigned long count
+ const struct nfs_pgio_header *hdr
),
- TP_ARGS(inode, offset, count),
+ TP_ARGS(hdr),
TP_STRUCT__entry(
- __field(loff_t, offset)
- __field(unsigned long, count)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, count)
),
TP_fast_assign(
+ const struct inode *inode = hdr->inode;
const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
- __entry->offset = offset;
- __entry->count = count;
+ __entry->offset = hdr->args.offset;
+ __entry->count = hdr->args.count;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
- __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->fhandle = nfs_fhandle_hash(fh);
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%lu",
+ "offset=%lld count=%u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- __entry->offset, __entry->count
+ (long long)__entry->offset, __entry->count
)
);
TRACE_EVENT(nfs_readpage_done,
TP_PROTO(
- const struct inode *inode,
- int status, loff_t offset, bool eof
+ const struct rpc_task *task,
+ const struct nfs_pgio_header *hdr
),
- TP_ARGS(inode, status, offset, eof),
+ TP_ARGS(task, hdr),
TP_STRUCT__entry(
- __field(int, status)
- __field(loff_t, offset)
- __field(bool, eof)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, arg_count)
+ __field(u32, res_count)
+ __field(bool, eof)
+ __field(int, status)
),
TP_fast_assign(
+ const struct inode *inode = hdr->inode;
const struct nfs_inode *nfsi = NFS_I(inode);
-
- __entry->status = status;
- __entry->offset = offset;
- __entry->eof = eof;
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
+
+ __entry->status = task->tk_status;
+ __entry->offset = hdr->args.offset;
+ __entry->arg_count = hdr->args.count;
+ __entry->res_count = hdr->res.count;
+ __entry->eof = hdr->res.eof;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
- __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->fhandle = nfs_fhandle_hash(fh);
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld status=%d%s",
+ "offset=%lld count=%u res=%u status=%d%s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- __entry->offset, __entry->status,
+ (long long)__entry->offset, __entry->arg_count,
+ __entry->res_count, __entry->status,
__entry->eof ? " eof" : ""
)
);
@@ -903,90 +972,144 @@ TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
TRACE_EVENT(nfs_initiate_write,
TP_PROTO(
- const struct inode *inode,
- loff_t offset, unsigned long count,
- enum nfs3_stable_how stable
+ const struct nfs_pgio_header *hdr
),
- TP_ARGS(inode, offset, count, stable),
+ TP_ARGS(hdr),
TP_STRUCT__entry(
- __field(loff_t, offset)
- __field(unsigned long, count)
- __field(enum nfs3_stable_how, stable)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, count)
+ __field(enum nfs3_stable_how, stable)
),
TP_fast_assign(
+ const struct inode *inode = hdr->inode;
const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
- __entry->offset = offset;
- __entry->count = count;
- __entry->stable = stable;
+ __entry->offset = hdr->args.offset;
+ __entry->count = hdr->args.count;
+ __entry->stable = hdr->args.stable;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
- __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->fhandle = nfs_fhandle_hash(fh);
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%lu stable=%s",
+ "offset=%lld count=%u stable=%s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- __entry->offset, __entry->count,
+ (long long)__entry->offset, __entry->count,
nfs_show_stable(__entry->stable)
)
);
TRACE_EVENT(nfs_writeback_done,
TP_PROTO(
- const struct inode *inode,
- int status,
- loff_t offset,
- struct nfs_writeverf *writeverf
+ const struct rpc_task *task,
+ const struct nfs_pgio_header *hdr
),
- TP_ARGS(inode, status, offset, writeverf),
+ TP_ARGS(task, hdr),
TP_STRUCT__entry(
- __field(int, status)
- __field(loff_t, offset)
- __field(enum nfs3_stable_how, stable)
- __field(unsigned long long, verifier)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, arg_count)
+ __field(u32, res_count)
+ __field(int, status)
+ __field(enum nfs3_stable_how, stable)
+ __array(char, verifier, NFS4_VERIFIER_SIZE)
),
TP_fast_assign(
+ const struct inode *inode = hdr->inode;
const struct nfs_inode *nfsi = NFS_I(inode);
-
- __entry->status = status;
- __entry->offset = offset;
- __entry->stable = writeverf->committed;
- memcpy(&__entry->verifier, &writeverf->verifier,
- sizeof(__entry->verifier));
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
+ const struct nfs_writeverf *verf = hdr->res.verf;
+
+ __entry->status = task->tk_status;
+ __entry->offset = hdr->args.offset;
+ __entry->arg_count = hdr->args.count;
+ __entry->res_count = hdr->res.count;
+ __entry->stable = verf->committed;
+ memcpy(__entry->verifier,
+ &verf->verifier,
+ NFS4_VERIFIER_SIZE);
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
- __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->fhandle = nfs_fhandle_hash(fh);
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld status=%d stable=%s "
- "verifier 0x%016llx",
+ "offset=%lld count=%u res=%u status=%d stable=%s "
+ "verifier=%s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- __entry->offset, __entry->status,
+ (long long)__entry->offset, __entry->arg_count,
+ __entry->res_count, __entry->status,
nfs_show_stable(__entry->stable),
- __entry->verifier
+ __print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE)
)
);
+DECLARE_EVENT_CLASS(nfs_page_error_class,
+ TP_PROTO(
+ const struct nfs_page *req,
+ int error
+ ),
+
+ TP_ARGS(req, error),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(pgoff_t, index)
+ __field(unsigned int, offset)
+ __field(unsigned int, pgbase)
+ __field(unsigned int, bytes)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->index = req->wb_index;
+ __entry->offset = req->wb_offset;
+ __entry->pgbase = req->wb_pgbase;
+ __entry->bytes = req->wb_bytes;
+ __entry->error = error;
+ ),
+
+ TP_printk(
+ "req=%p index=%lu offset=%u pgbase=%u bytes=%u error=%d",
+ __entry->req, __entry->index, __entry->offset,
+ __entry->pgbase, __entry->bytes, __entry->error
+ )
+);
+
+#define DEFINE_NFS_PAGEERR_EVENT(name) \
+ DEFINE_EVENT(nfs_page_error_class, name, \
+ TP_PROTO( \
+ const struct nfs_page *req, \
+ int error \
+ ), \
+ TP_ARGS(req, error))
+
+DEFINE_NFS_PAGEERR_EVENT(nfs_write_error);
+DEFINE_NFS_PAGEERR_EVENT(nfs_comp_error);
+DEFINE_NFS_PAGEERR_EVENT(nfs_commit_error);
+
TRACE_EVENT(nfs_initiate_commit,
TP_PROTO(
const struct nfs_commit_data *data
@@ -995,71 +1118,81 @@ TRACE_EVENT(nfs_initiate_commit,
TP_ARGS(data),
TP_STRUCT__entry(
- __field(loff_t, offset)
- __field(unsigned long, count)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, count)
),
TP_fast_assign(
const struct inode *inode = data->inode;
const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = data->args.fh ?
+ data->args.fh : &nfsi->fh;
__entry->offset = data->args.offset;
__entry->count = data->args.count;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
- __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->fhandle = nfs_fhandle_hash(fh);
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%lu",
+ "offset=%lld count=%u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- __entry->offset, __entry->count
+ (long long)__entry->offset, __entry->count
)
);
TRACE_EVENT(nfs_commit_done,
TP_PROTO(
+ const struct rpc_task *task,
const struct nfs_commit_data *data
),
- TP_ARGS(data),
+ TP_ARGS(task, data),
TP_STRUCT__entry(
- __field(int, status)
- __field(loff_t, offset)
- __field(unsigned long long, verifier)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
+ __field(loff_t, offset)
+ __field(int, status)
+ __field(enum nfs3_stable_how, stable)
+ __array(char, verifier, NFS4_VERIFIER_SIZE)
),
TP_fast_assign(
const struct inode *inode = data->inode;
const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = data->args.fh ?
+ data->args.fh : &nfsi->fh;
+ const struct nfs_writeverf *verf = data->res.verf;
- __entry->status = data->res.op_status;
+ __entry->status = task->tk_status;
__entry->offset = data->args.offset;
- memcpy(&__entry->verifier, &data->verf.verifier,
- sizeof(__entry->verifier));
+ __entry->stable = verf->committed;
+ memcpy(__entry->verifier,
+ &verf->verifier,
+ NFS4_VERIFIER_SIZE);
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
- __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
+ __entry->fhandle = nfs_fhandle_hash(fh);
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld status=%d verifier 0x%016llx",
+ "offset=%lld status=%d stable=%s verifier=%s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
- __entry->offset, __entry->status,
- __entry->verifier
+ (long long)__entry->offset, __entry->status,
+ nfs_show_stable(__entry->stable),
+ __print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE)
)
);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index cec3070ab577..542ea8dfd1bc 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1425,7 +1425,7 @@ retry:
/* lo ref dropped in pnfs_roc_release() */
layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
/* If the creds don't match, we can't compound the layoutreturn */
- if (!layoutreturn || cred != lo->plh_lc_cred)
+ if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
goto out_noroc;
roc = layoutreturn;
@@ -1998,8 +1998,6 @@ lookup_again:
trace_pnfs_update_layout(ino, pos, count,
iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_INVALID_OPEN);
- if (status != -EAGAIN)
- goto out_unlock;
spin_unlock(&ino->i_lock);
nfs4_schedule_stateid_recovery(server, ctx->state);
pnfs_clear_first_layoutget(lo);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index f8a38065c7e4..0fafdadc9c8d 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -79,6 +79,10 @@ enum pnfs_try_status {
PNFS_TRY_AGAIN = 2,
};
+/* error codes for internal use */
+#define NFS4ERR_RESET_TO_MDS 12001
+#define NFS4ERR_RESET_TO_PNFS 12002
+
#ifdef CONFIG_NFS_V4_1
#define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
@@ -91,10 +95,6 @@ enum pnfs_try_status {
#define NFS4_DEF_DS_RETRANS 5
#define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
-/* error codes for internal use */
-#define NFS4ERR_RESET_TO_MDS 12001
-#define NFS4ERR_RESET_TO_PNFS 12002
-
enum {
NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 82af4809b869..8b37e7f8e789 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -31,12 +31,11 @@ EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
/* Fake up some data that will cause nfs_commit_release to retry the writes. */
void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
{
- struct nfs_page *first = nfs_list_entry(data->pages.next);
+ struct nfs_writeverf *verf = data->res.verf;
data->task.tk_status = 0;
- memcpy(&data->verf.verifier, &first->wb_verf,
- sizeof(data->verf.verifier));
- data->verf.verifier.data[0]++; /* ensure verifier mismatch */
+ memset(&verf->verifier, 0, sizeof(verf->verifier));
+ verf->committed = NFS_UNSTABLE;
}
EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 0f7288b94633..15c865cc837f 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -108,10 +108,15 @@ nfs_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_resp = fattr,
};
int status;
+ unsigned short task_flags = 0;
+
+ /* Is this is an attribute revalidation, subject to softreval? */
+ if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
+ task_flags |= RPC_TASK_TIMEOUT;
dprintk("NFS call getattr\n");
nfs_fattr_init(fattr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = rpc_call_sync(server->client, &msg, task_flags);
dprintk("NFS reply getattr: %d\n", status);
return status;
}
@@ -147,14 +152,14 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct inode *dir, const struct qstr *name,
+nfs_proc_lookup(struct inode *dir, struct dentry *dentry,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
struct nfs_diropargs arg = {
.fh = NFS_FH(dir),
- .name = name->name,
- .len = name->len
+ .name = dentry->d_name.name,
+ .len = dentry->d_name.len
};
struct nfs_diropok res = {
.fh = fhandle,
@@ -166,10 +171,15 @@ nfs_proc_lookup(struct inode *dir, const struct qstr *name,
.rpc_resp = &res,
};
int status;
+ unsigned short task_flags = 0;
+
+ /* Is this is an attribute revalidation, subject to softreval? */
+ if (nfs_lookup_is_soft_revalidate(dentry))
+ task_flags |= RPC_TASK_TIMEOUT;
- dprintk("NFS call lookup %s\n", name->name);
+ dprintk("NFS call lookup %pd2\n", dentry);
nfs_fattr_init(fattr);
- status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
@@ -710,7 +720,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.file_ops = &nfs_file_operations,
.getroot = nfs_proc_get_root,
.submount = nfs_submount,
- .try_mount = nfs_try_mount,
+ .try_get_tree = nfs_try_get_tree,
.getattr = nfs_proc_getattr,
.setattr = nfs_proc_setattr,
.lookup = nfs_proc_lookup,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index cfe0b586eadd..34bb9add2302 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -214,7 +214,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr,
task_setup_data->flags |= swap_flags;
rpc_ops->read_setup(hdr, msg);
- trace_nfs_initiate_read(inode, hdr->io_start, hdr->good_bytes);
+ trace_nfs_initiate_read(hdr);
}
static void
@@ -247,8 +247,7 @@ static int nfs_readpage_done(struct rpc_task *task,
return status;
nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
- trace_nfs_readpage_done(inode, task->tk_status,
- hdr->args.offset, hdr->res.eof);
+ trace_nfs_readpage_done(task, hdr);
if (task->tk_status == -ESTALE) {
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
@@ -282,6 +281,8 @@ static void nfs_readpage_retry(struct rpc_task *task,
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
+ resp->count = 0;
+ resp->eof = 0;
rpc_restart_call_prepare(task);
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 8d8d04bb9d64..dada09b391c6 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -69,250 +69,6 @@
#include "nfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
-#define NFS_TEXT_DATA 1
-
-#if IS_ENABLED(CONFIG_NFS_V3)
-#define NFS_DEFAULT_VERSION 3
-#else
-#define NFS_DEFAULT_VERSION 2
-#endif
-
-#define NFS_MAX_CONNECTIONS 16
-
-enum {
- /* Mount options that take no arguments */
- Opt_soft, Opt_softerr, Opt_hard,
- Opt_posix, Opt_noposix,
- Opt_cto, Opt_nocto,
- Opt_ac, Opt_noac,
- Opt_lock, Opt_nolock,
- Opt_udp, Opt_tcp, Opt_rdma,
- Opt_acl, Opt_noacl,
- Opt_rdirplus, Opt_nordirplus,
- Opt_sharecache, Opt_nosharecache,
- Opt_resvport, Opt_noresvport,
- Opt_fscache, Opt_nofscache,
- Opt_migration, Opt_nomigration,
-
- /* Mount options that take integer arguments */
- Opt_port,
- Opt_rsize, Opt_wsize, Opt_bsize,
- Opt_timeo, Opt_retrans,
- Opt_acregmin, Opt_acregmax,
- Opt_acdirmin, Opt_acdirmax,
- Opt_actimeo,
- Opt_namelen,
- Opt_mountport,
- Opt_mountvers,
- Opt_minorversion,
-
- /* Mount options that take string arguments */
- Opt_nfsvers,
- Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
- Opt_addr, Opt_mountaddr, Opt_clientaddr,
- Opt_nconnect,
- Opt_lookupcache,
- Opt_fscache_uniq,
- Opt_local_lock,
-
- /* Special mount options */
- Opt_userspace, Opt_deprecated, Opt_sloppy,
-
- Opt_err
-};
-
-static const match_table_t nfs_mount_option_tokens = {
- { Opt_userspace, "bg" },
- { Opt_userspace, "fg" },
- { Opt_userspace, "retry=%s" },
-
- { Opt_sloppy, "sloppy" },
-
- { Opt_soft, "soft" },
- { Opt_softerr, "softerr" },
- { Opt_hard, "hard" },
- { Opt_deprecated, "intr" },
- { Opt_deprecated, "nointr" },
- { Opt_posix, "posix" },
- { Opt_noposix, "noposix" },
- { Opt_cto, "cto" },
- { Opt_nocto, "nocto" },
- { Opt_ac, "ac" },
- { Opt_noac, "noac" },
- { Opt_lock, "lock" },
- { Opt_nolock, "nolock" },
- { Opt_udp, "udp" },
- { Opt_tcp, "tcp" },
- { Opt_rdma, "rdma" },
- { Opt_acl, "acl" },
- { Opt_noacl, "noacl" },
- { Opt_rdirplus, "rdirplus" },
- { Opt_nordirplus, "nordirplus" },
- { Opt_sharecache, "sharecache" },
- { Opt_nosharecache, "nosharecache" },
- { Opt_resvport, "resvport" },
- { Opt_noresvport, "noresvport" },
- { Opt_fscache, "fsc" },
- { Opt_nofscache, "nofsc" },
- { Opt_migration, "migration" },
- { Opt_nomigration, "nomigration" },
-
- { Opt_port, "port=%s" },
- { Opt_rsize, "rsize=%s" },
- { Opt_wsize, "wsize=%s" },
- { Opt_bsize, "bsize=%s" },
- { Opt_timeo, "timeo=%s" },
- { Opt_retrans, "retrans=%s" },
- { Opt_acregmin, "acregmin=%s" },
- { Opt_acregmax, "acregmax=%s" },
- { Opt_acdirmin, "acdirmin=%s" },
- { Opt_acdirmax, "acdirmax=%s" },
- { Opt_actimeo, "actimeo=%s" },
- { Opt_namelen, "namlen=%s" },
- { Opt_mountport, "mountport=%s" },
- { Opt_mountvers, "mountvers=%s" },
- { Opt_minorversion, "minorversion=%s" },
-
- { Opt_nfsvers, "nfsvers=%s" },
- { Opt_nfsvers, "vers=%s" },
-
- { Opt_sec, "sec=%s" },
- { Opt_proto, "proto=%s" },
- { Opt_mountproto, "mountproto=%s" },
- { Opt_addr, "addr=%s" },
- { Opt_clientaddr, "clientaddr=%s" },
- { Opt_mounthost, "mounthost=%s" },
- { Opt_mountaddr, "mountaddr=%s" },
-
- { Opt_nconnect, "nconnect=%s" },
-
- { Opt_lookupcache, "lookupcache=%s" },
- { Opt_fscache_uniq, "fsc=%s" },
- { Opt_local_lock, "local_lock=%s" },
-
- /* The following needs to be listed after all other options */
- { Opt_nfsvers, "v%s" },
-
- { Opt_err, NULL }
-};
-
-enum {
- Opt_xprt_udp, Opt_xprt_udp6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_rdma,
- Opt_xprt_rdma6,
-
- Opt_xprt_err
-};
-
-static const match_table_t nfs_xprt_protocol_tokens = {
- { Opt_xprt_udp, "udp" },
- { Opt_xprt_udp6, "udp6" },
- { Opt_xprt_tcp, "tcp" },
- { Opt_xprt_tcp6, "tcp6" },
- { Opt_xprt_rdma, "rdma" },
- { Opt_xprt_rdma6, "rdma6" },
-
- { Opt_xprt_err, NULL }
-};
-
-enum {
- Opt_sec_none, Opt_sec_sys,
- Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
- Opt_sec_lkey, Opt_sec_lkeyi, Opt_sec_lkeyp,
- Opt_sec_spkm, Opt_sec_spkmi, Opt_sec_spkmp,
-
- Opt_sec_err
-};
-
-static const match_table_t nfs_secflavor_tokens = {
- { Opt_sec_none, "none" },
- { Opt_sec_none, "null" },
- { Opt_sec_sys, "sys" },
-
- { Opt_sec_krb5, "krb5" },
- { Opt_sec_krb5i, "krb5i" },
- { Opt_sec_krb5p, "krb5p" },
-
- { Opt_sec_lkey, "lkey" },
- { Opt_sec_lkeyi, "lkeyi" },
- { Opt_sec_lkeyp, "lkeyp" },
-
- { Opt_sec_spkm, "spkm3" },
- { Opt_sec_spkmi, "spkm3i" },
- { Opt_sec_spkmp, "spkm3p" },
-
- { Opt_sec_err, NULL }
-};
-
-enum {
- Opt_lookupcache_all, Opt_lookupcache_positive,
- Opt_lookupcache_none,
-
- Opt_lookupcache_err
-};
-
-static match_table_t nfs_lookupcache_tokens = {
- { Opt_lookupcache_all, "all" },
- { Opt_lookupcache_positive, "pos" },
- { Opt_lookupcache_positive, "positive" },
- { Opt_lookupcache_none, "none" },
-
- { Opt_lookupcache_err, NULL }
-};
-
-enum {
- Opt_local_lock_all, Opt_local_lock_flock, Opt_local_lock_posix,
- Opt_local_lock_none,
-
- Opt_local_lock_err
-};
-
-static match_table_t nfs_local_lock_tokens = {
- { Opt_local_lock_all, "all" },
- { Opt_local_lock_flock, "flock" },
- { Opt_local_lock_posix, "posix" },
- { Opt_local_lock_none, "none" },
-
- { Opt_local_lock_err, NULL }
-};
-
-enum {
- Opt_vers_2, Opt_vers_3, Opt_vers_4, Opt_vers_4_0,
- Opt_vers_4_1, Opt_vers_4_2,
-
- Opt_vers_err
-};
-
-static match_table_t nfs_vers_tokens = {
- { Opt_vers_2, "2" },
- { Opt_vers_3, "3" },
- { Opt_vers_4, "4" },
- { Opt_vers_4_0, "4.0" },
- { Opt_vers_4_1, "4.1" },
- { Opt_vers_4_2, "4.2" },
-
- { Opt_vers_err, NULL }
-};
-
-static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
-
-struct file_system_type nfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs",
- .mount = nfs_fs_mount,
- .kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
-MODULE_ALIAS_FS("nfs");
-EXPORT_SYMBOL_GPL(nfs_fs_type);
-
-struct file_system_type nfs_xdev_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs",
- .mount = nfs_xdev_mount,
- .kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
const struct super_operations nfs_sops = {
.alloc_inode = nfs_alloc_inode,
@@ -326,26 +82,10 @@ const struct super_operations nfs_sops = {
.show_devname = nfs_show_devname,
.show_path = nfs_show_path,
.show_stats = nfs_show_stats,
- .remount_fs = nfs_remount,
};
EXPORT_SYMBOL_GPL(nfs_sops);
#if IS_ENABLED(CONFIG_NFS_V4)
-static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
-static int nfs4_validate_mount_data(void *options,
- struct nfs_parsed_mount_data *args, const char *dev_name);
-
-struct file_system_type nfs4_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs_fs_mount,
- .kill_sb = nfs_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA,
-};
-MODULE_ALIAS_FS("nfs4");
-MODULE_ALIAS("nfs4");
-EXPORT_SYMBOL_GPL(nfs4_fs_type);
-
static int __init register_nfs4_fs(void)
{
return register_filesystem(&nfs4_fs_type);
@@ -635,6 +375,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
} nfs_info[] = {
{ NFS_MOUNT_SOFT, ",soft", "" },
{ NFS_MOUNT_SOFTERR, ",softerr", "" },
+ { NFS_MOUNT_SOFTREVAL, ",softreval", "" },
{ NFS_MOUNT_POSIX, ",posix", "" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
@@ -931,141 +672,6 @@ void nfs_umount_begin(struct super_block *sb)
}
EXPORT_SYMBOL_GPL(nfs_umount_begin);
-static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
-{
- struct nfs_parsed_mount_data *data;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (data) {
- data->timeo = NFS_UNSPEC_TIMEO;
- data->retrans = NFS_UNSPEC_RETRANS;
- data->acregmin = NFS_DEF_ACREGMIN;
- data->acregmax = NFS_DEF_ACREGMAX;
- data->acdirmin = NFS_DEF_ACDIRMIN;
- data->acdirmax = NFS_DEF_ACDIRMAX;
- data->mount_server.port = NFS_UNSPEC_PORT;
- data->nfs_server.port = NFS_UNSPEC_PORT;
- data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
- data->selected_flavor = RPC_AUTH_MAXFLAVOR;
- data->minorversion = 0;
- data->need_mount = true;
- data->net = current->nsproxy->net_ns;
- data->lsm_opts = NULL;
- }
- return data;
-}
-
-static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
-{
- if (data) {
- kfree(data->client_address);
- kfree(data->mount_server.hostname);
- kfree(data->nfs_server.export_path);
- kfree(data->nfs_server.hostname);
- kfree(data->fscache_uniq);
- security_free_mnt_opts(&data->lsm_opts);
- kfree(data);
- }
-}
-
-/*
- * Sanity-check a server address provided by the mount command.
- *
- * Address family must be initialized, and address must not be
- * the ANY address for that family.
- */
-static int nfs_verify_server_address(struct sockaddr *addr)
-{
- switch (addr->sa_family) {
- case AF_INET: {
- struct sockaddr_in *sa = (struct sockaddr_in *)addr;
- return sa->sin_addr.s_addr != htonl(INADDR_ANY);
- }
- case AF_INET6: {
- struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr;
- return !ipv6_addr_any(sa);
- }
- }
-
- dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
- return 0;
-}
-
-/*
- * Select between a default port value and a user-specified port value.
- * If a zero value is set, then autobind will be used.
- */
-static void nfs_set_port(struct sockaddr *sap, int *port,
- const unsigned short default_port)
-{
- if (*port == NFS_UNSPEC_PORT)
- *port = default_port;
-
- rpc_set_port(sap, *port);
-}
-
-/*
- * Sanity check the NFS transport protocol.
- *
- */
-static void nfs_validate_transport_protocol(struct nfs_parsed_mount_data *mnt)
-{
- switch (mnt->nfs_server.protocol) {
- case XPRT_TRANSPORT_UDP:
- case XPRT_TRANSPORT_TCP:
- case XPRT_TRANSPORT_RDMA:
- break;
- default:
- mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
- }
-}
-
-/*
- * For text based NFSv2/v3 mounts, the mount protocol transport default
- * settings should depend upon the specified NFS transport.
- */
-static void nfs_set_mount_transport_protocol(struct nfs_parsed_mount_data *mnt)
-{
- nfs_validate_transport_protocol(mnt);
-
- if (mnt->mount_server.protocol == XPRT_TRANSPORT_UDP ||
- mnt->mount_server.protocol == XPRT_TRANSPORT_TCP)
- return;
- switch (mnt->nfs_server.protocol) {
- case XPRT_TRANSPORT_UDP:
- mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
- break;
- case XPRT_TRANSPORT_TCP:
- case XPRT_TRANSPORT_RDMA:
- mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
- }
-}
-
-/*
- * Add 'flavor' to 'auth_info' if not already present.
- * Returns true if 'flavor' ends up in the list, false otherwise
- */
-static bool nfs_auth_info_add(struct nfs_auth_info *auth_info,
- rpc_authflavor_t flavor)
-{
- unsigned int i;
- unsigned int max_flavor_len = ARRAY_SIZE(auth_info->flavors);
-
- /* make sure this flavor isn't already in the list */
- for (i = 0; i < auth_info->flavor_len; i++) {
- if (flavor == auth_info->flavors[i])
- return true;
- }
-
- if (auth_info->flavor_len + 1 >= max_flavor_len) {
- dfprintk(MOUNT, "NFS: too many sec= flavors\n");
- return false;
- }
-
- auth_info->flavors[auth_info->flavor_len++] = flavor;
- return true;
-}
-
/*
* Return true if 'match' is in auth_info or auth_info is empty.
* Return false otherwise.
@@ -1087,633 +693,13 @@ bool nfs_auth_info_match(const struct nfs_auth_info *auth_info,
EXPORT_SYMBOL_GPL(nfs_auth_info_match);
/*
- * Parse the value of the 'sec=' option.
- */
-static int nfs_parse_security_flavors(char *value,
- struct nfs_parsed_mount_data *mnt)
-{
- substring_t args[MAX_OPT_ARGS];
- rpc_authflavor_t pseudoflavor;
- char *p;
-
- dfprintk(MOUNT, "NFS: parsing sec=%s option\n", value);
-
- while ((p = strsep(&value, ":")) != NULL) {
- switch (match_token(p, nfs_secflavor_tokens, args)) {
- case Opt_sec_none:
- pseudoflavor = RPC_AUTH_NULL;
- break;
- case Opt_sec_sys:
- pseudoflavor = RPC_AUTH_UNIX;
- break;
- case Opt_sec_krb5:
- pseudoflavor = RPC_AUTH_GSS_KRB5;
- break;
- case Opt_sec_krb5i:
- pseudoflavor = RPC_AUTH_GSS_KRB5I;
- break;
- case Opt_sec_krb5p:
- pseudoflavor = RPC_AUTH_GSS_KRB5P;
- break;
- case Opt_sec_lkey:
- pseudoflavor = RPC_AUTH_GSS_LKEY;
- break;
- case Opt_sec_lkeyi:
- pseudoflavor = RPC_AUTH_GSS_LKEYI;
- break;
- case Opt_sec_lkeyp:
- pseudoflavor = RPC_AUTH_GSS_LKEYP;
- break;
- case Opt_sec_spkm:
- pseudoflavor = RPC_AUTH_GSS_SPKM;
- break;
- case Opt_sec_spkmi:
- pseudoflavor = RPC_AUTH_GSS_SPKMI;
- break;
- case Opt_sec_spkmp:
- pseudoflavor = RPC_AUTH_GSS_SPKMP;
- break;
- default:
- dfprintk(MOUNT,
- "NFS: sec= option '%s' not recognized\n", p);
- return 0;
- }
-
- if (!nfs_auth_info_add(&mnt->auth_info, pseudoflavor))
- return 0;
- }
-
- return 1;
-}
-
-static int nfs_parse_version_string(char *string,
- struct nfs_parsed_mount_data *mnt,
- substring_t *args)
-{
- mnt->flags &= ~NFS_MOUNT_VER3;
- switch (match_token(string, nfs_vers_tokens, args)) {
- case Opt_vers_2:
- mnt->version = 2;
- break;
- case Opt_vers_3:
- mnt->flags |= NFS_MOUNT_VER3;
- mnt->version = 3;
- break;
- case Opt_vers_4:
- /* Backward compatibility option. In future,
- * the mount program should always supply
- * a NFSv4 minor version number.
- */
- mnt->version = 4;
- break;
- case Opt_vers_4_0:
- mnt->version = 4;
- mnt->minorversion = 0;
- break;
- case Opt_vers_4_1:
- mnt->version = 4;
- mnt->minorversion = 1;
- break;
- case Opt_vers_4_2:
- mnt->version = 4;
- mnt->minorversion = 2;
- break;
- default:
- return 0;
- }
- return 1;
-}
-
-static int nfs_get_option_str(substring_t args[], char **option)
-{
- kfree(*option);
- *option = match_strdup(args);
- return !*option;
-}
-
-static int nfs_get_option_ul(substring_t args[], unsigned long *option)
-{
- int rc;
- char *string;
-
- string = match_strdup(args);
- if (string == NULL)
- return -ENOMEM;
- rc = kstrtoul(string, 10, option);
- kfree(string);
-
- return rc;
-}
-
-static int nfs_get_option_ul_bound(substring_t args[], unsigned long *option,
- unsigned long l_bound, unsigned long u_bound)
-{
- int ret;
-
- ret = nfs_get_option_ul(args, option);
- if (ret != 0)
- return ret;
- if (*option < l_bound || *option > u_bound)
- return -ERANGE;
- return 0;
-}
-
-/*
- * Error-check and convert a string of mount options from user space into
- * a data structure. The whole mount string is processed; bad options are
- * skipped as they are encountered. If there were no errors, return 1;
- * otherwise return 0 (zero).
- */
-static int nfs_parse_mount_options(char *raw,
- struct nfs_parsed_mount_data *mnt)
-{
- char *p, *string;
- int rc, sloppy = 0, invalid_option = 0;
- unsigned short protofamily = AF_UNSPEC;
- unsigned short mountfamily = AF_UNSPEC;
-
- if (!raw) {
- dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
- return 1;
- }
- dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw);
-
- rc = security_sb_eat_lsm_opts(raw, &mnt->lsm_opts);
- if (rc)
- goto out_security_failure;
-
- while ((p = strsep(&raw, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- unsigned long option;
- int token;
-
- if (!*p)
- continue;
-
- dfprintk(MOUNT, "NFS: parsing nfs mount option '%s'\n", p);
-
- token = match_token(p, nfs_mount_option_tokens, args);
- switch (token) {
-
- /*
- * boolean options: foo/nofoo
- */
- case Opt_soft:
- mnt->flags |= NFS_MOUNT_SOFT;
- mnt->flags &= ~NFS_MOUNT_SOFTERR;
- break;
- case Opt_softerr:
- mnt->flags |= NFS_MOUNT_SOFTERR;
- mnt->flags &= ~NFS_MOUNT_SOFT;
- break;
- case Opt_hard:
- mnt->flags &= ~(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR);
- break;
- case Opt_posix:
- mnt->flags |= NFS_MOUNT_POSIX;
- break;
- case Opt_noposix:
- mnt->flags &= ~NFS_MOUNT_POSIX;
- break;
- case Opt_cto:
- mnt->flags &= ~NFS_MOUNT_NOCTO;
- break;
- case Opt_nocto:
- mnt->flags |= NFS_MOUNT_NOCTO;
- break;
- case Opt_ac:
- mnt->flags &= ~NFS_MOUNT_NOAC;
- break;
- case Opt_noac:
- mnt->flags |= NFS_MOUNT_NOAC;
- break;
- case Opt_lock:
- mnt->flags &= ~NFS_MOUNT_NONLM;
- mnt->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
- NFS_MOUNT_LOCAL_FCNTL);
- break;
- case Opt_nolock:
- mnt->flags |= NFS_MOUNT_NONLM;
- mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK |
- NFS_MOUNT_LOCAL_FCNTL);
- break;
- case Opt_udp:
- mnt->flags &= ~NFS_MOUNT_TCP;
- mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
- break;
- case Opt_tcp:
- mnt->flags |= NFS_MOUNT_TCP;
- mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
- break;
- case Opt_rdma:
- mnt->flags |= NFS_MOUNT_TCP; /* for side protocols */
- mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
- xprt_load_transport(p);
- break;
- case Opt_acl:
- mnt->flags &= ~NFS_MOUNT_NOACL;
- break;
- case Opt_noacl:
- mnt->flags |= NFS_MOUNT_NOACL;
- break;
- case Opt_rdirplus:
- mnt->flags &= ~NFS_MOUNT_NORDIRPLUS;
- break;
- case Opt_nordirplus:
- mnt->flags |= NFS_MOUNT_NORDIRPLUS;
- break;
- case Opt_sharecache:
- mnt->flags &= ~NFS_MOUNT_UNSHARED;
- break;
- case Opt_nosharecache:
- mnt->flags |= NFS_MOUNT_UNSHARED;
- break;
- case Opt_resvport:
- mnt->flags &= ~NFS_MOUNT_NORESVPORT;
- break;
- case Opt_noresvport:
- mnt->flags |= NFS_MOUNT_NORESVPORT;
- break;
- case Opt_fscache:
- mnt->options |= NFS_OPTION_FSCACHE;
- kfree(mnt->fscache_uniq);
- mnt->fscache_uniq = NULL;
- break;
- case Opt_nofscache:
- mnt->options &= ~NFS_OPTION_FSCACHE;
- kfree(mnt->fscache_uniq);
- mnt->fscache_uniq = NULL;
- break;
- case Opt_migration:
- mnt->options |= NFS_OPTION_MIGRATION;
- break;
- case Opt_nomigration:
- mnt->options &= ~NFS_OPTION_MIGRATION;
- break;
-
- /*
- * options that take numeric values
- */
- case Opt_port:
- if (nfs_get_option_ul(args, &option) ||
- option > USHRT_MAX)
- goto out_invalid_value;
- mnt->nfs_server.port = option;
- break;
- case Opt_rsize:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->rsize = option;
- break;
- case Opt_wsize:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->wsize = option;
- break;
- case Opt_bsize:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->bsize = option;
- break;
- case Opt_timeo:
- if (nfs_get_option_ul_bound(args, &option, 1, INT_MAX))
- goto out_invalid_value;
- mnt->timeo = option;
- break;
- case Opt_retrans:
- if (nfs_get_option_ul_bound(args, &option, 0, INT_MAX))
- goto out_invalid_value;
- mnt->retrans = option;
- break;
- case Opt_acregmin:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->acregmin = option;
- break;
- case Opt_acregmax:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->acregmax = option;
- break;
- case Opt_acdirmin:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->acdirmin = option;
- break;
- case Opt_acdirmax:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->acdirmax = option;
- break;
- case Opt_actimeo:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->acregmin = mnt->acregmax =
- mnt->acdirmin = mnt->acdirmax = option;
- break;
- case Opt_namelen:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- mnt->namlen = option;
- break;
- case Opt_mountport:
- if (nfs_get_option_ul(args, &option) ||
- option > USHRT_MAX)
- goto out_invalid_value;
- mnt->mount_server.port = option;
- break;
- case Opt_mountvers:
- if (nfs_get_option_ul(args, &option) ||
- option < NFS_MNT_VERSION ||
- option > NFS_MNT3_VERSION)
- goto out_invalid_value;
- mnt->mount_server.version = option;
- break;
- case Opt_minorversion:
- if (nfs_get_option_ul(args, &option))
- goto out_invalid_value;
- if (option > NFS4_MAX_MINOR_VERSION)
- goto out_invalid_value;
- mnt->minorversion = option;
- break;
-
- /*
- * options that take text values
- */
- case Opt_nfsvers:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = nfs_parse_version_string(string, mnt, args);
- kfree(string);
- if (!rc)
- goto out_invalid_value;
- break;
- case Opt_sec:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = nfs_parse_security_flavors(string, mnt);
- kfree(string);
- if (!rc) {
- dfprintk(MOUNT, "NFS: unrecognized "
- "security flavor\n");
- return 0;
- }
- break;
- case Opt_proto:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- token = match_token(string,
- nfs_xprt_protocol_tokens, args);
-
- protofamily = AF_INET;
- switch (token) {
- case Opt_xprt_udp6:
- protofamily = AF_INET6;
- /* fall through */
- case Opt_xprt_udp:
- mnt->flags &= ~NFS_MOUNT_TCP;
- mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
- break;
- case Opt_xprt_tcp6:
- protofamily = AF_INET6;
- /* fall through */
- case Opt_xprt_tcp:
- mnt->flags |= NFS_MOUNT_TCP;
- mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
- break;
- case Opt_xprt_rdma6:
- protofamily = AF_INET6;
- /* fall through */
- case Opt_xprt_rdma:
- /* vector side protocols to TCP */
- mnt->flags |= NFS_MOUNT_TCP;
- mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
- xprt_load_transport(string);
- break;
- default:
- dfprintk(MOUNT, "NFS: unrecognized "
- "transport protocol\n");
- kfree(string);
- return 0;
- }
- kfree(string);
- break;
- case Opt_mountproto:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- token = match_token(string,
- nfs_xprt_protocol_tokens, args);
- kfree(string);
-
- mountfamily = AF_INET;
- switch (token) {
- case Opt_xprt_udp6:
- mountfamily = AF_INET6;
- /* fall through */
- case Opt_xprt_udp:
- mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
- break;
- case Opt_xprt_tcp6:
- mountfamily = AF_INET6;
- /* fall through */
- case Opt_xprt_tcp:
- mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
- break;
- case Opt_xprt_rdma: /* not used for side protocols */
- default:
- dfprintk(MOUNT, "NFS: unrecognized "
- "transport protocol\n");
- return 0;
- }
- break;
- case Opt_addr:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- mnt->nfs_server.addrlen =
- rpc_pton(mnt->net, string, strlen(string),
- (struct sockaddr *)
- &mnt->nfs_server.address,
- sizeof(mnt->nfs_server.address));
- kfree(string);
- if (mnt->nfs_server.addrlen == 0)
- goto out_invalid_address;
- break;
- case Opt_clientaddr:
- if (nfs_get_option_str(args, &mnt->client_address))
- goto out_nomem;
- break;
- case Opt_mounthost:
- if (nfs_get_option_str(args,
- &mnt->mount_server.hostname))
- goto out_nomem;
- break;
- case Opt_mountaddr:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- mnt->mount_server.addrlen =
- rpc_pton(mnt->net, string, strlen(string),
- (struct sockaddr *)
- &mnt->mount_server.address,
- sizeof(mnt->mount_server.address));
- kfree(string);
- if (mnt->mount_server.addrlen == 0)
- goto out_invalid_address;
- break;
- case Opt_nconnect:
- if (nfs_get_option_ul_bound(args, &option, 1, NFS_MAX_CONNECTIONS))
- goto out_invalid_value;
- mnt->nfs_server.nconnect = option;
- break;
- case Opt_lookupcache:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- token = match_token(string,
- nfs_lookupcache_tokens, args);
- kfree(string);
- switch (token) {
- case Opt_lookupcache_all:
- mnt->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE);
- break;
- case Opt_lookupcache_positive:
- mnt->flags &= ~NFS_MOUNT_LOOKUP_CACHE_NONE;
- mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG;
- break;
- case Opt_lookupcache_none:
- mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE;
- break;
- default:
- dfprintk(MOUNT, "NFS: invalid "
- "lookupcache argument\n");
- return 0;
- }
- break;
- case Opt_fscache_uniq:
- if (nfs_get_option_str(args, &mnt->fscache_uniq))
- goto out_nomem;
- mnt->options |= NFS_OPTION_FSCACHE;
- break;
- case Opt_local_lock:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- token = match_token(string, nfs_local_lock_tokens,
- args);
- kfree(string);
- switch (token) {
- case Opt_local_lock_all:
- mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK |
- NFS_MOUNT_LOCAL_FCNTL);
- break;
- case Opt_local_lock_flock:
- mnt->flags |= NFS_MOUNT_LOCAL_FLOCK;
- break;
- case Opt_local_lock_posix:
- mnt->flags |= NFS_MOUNT_LOCAL_FCNTL;
- break;
- case Opt_local_lock_none:
- mnt->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
- NFS_MOUNT_LOCAL_FCNTL);
- break;
- default:
- dfprintk(MOUNT, "NFS: invalid "
- "local_lock argument\n");
- return 0;
- }
- break;
-
- /*
- * Special options
- */
- case Opt_sloppy:
- sloppy = 1;
- dfprintk(MOUNT, "NFS: relaxing parsing rules\n");
- break;
- case Opt_userspace:
- case Opt_deprecated:
- dfprintk(MOUNT, "NFS: ignoring mount option "
- "'%s'\n", p);
- break;
-
- default:
- invalid_option = 1;
- dfprintk(MOUNT, "NFS: unrecognized mount option "
- "'%s'\n", p);
- }
- }
-
- if (!sloppy && invalid_option)
- return 0;
-
- if (mnt->minorversion && mnt->version != 4)
- goto out_minorversion_mismatch;
-
- if (mnt->options & NFS_OPTION_MIGRATION &&
- (mnt->version != 4 || mnt->minorversion != 0))
- goto out_migration_misuse;
-
- /*
- * verify that any proto=/mountproto= options match the address
- * families in the addr=/mountaddr= options.
- */
- if (protofamily != AF_UNSPEC &&
- protofamily != mnt->nfs_server.address.ss_family)
- goto out_proto_mismatch;
-
- if (mountfamily != AF_UNSPEC) {
- if (mnt->mount_server.addrlen) {
- if (mountfamily != mnt->mount_server.address.ss_family)
- goto out_mountproto_mismatch;
- } else {
- if (mountfamily != mnt->nfs_server.address.ss_family)
- goto out_mountproto_mismatch;
- }
- }
-
- return 1;
-
-out_mountproto_mismatch:
- printk(KERN_INFO "NFS: mount server address does not match mountproto= "
- "option\n");
- return 0;
-out_proto_mismatch:
- printk(KERN_INFO "NFS: server address does not match proto= option\n");
- return 0;
-out_invalid_address:
- printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
- return 0;
-out_invalid_value:
- printk(KERN_INFO "NFS: bad mount option value specified: %s\n", p);
- return 0;
-out_minorversion_mismatch:
- printk(KERN_INFO "NFS: mount option vers=%u does not support "
- "minorversion=%u\n", mnt->version, mnt->minorversion);
- return 0;
-out_migration_misuse:
- printk(KERN_INFO
- "NFS: 'migration' not supported for this NFS version\n");
- return 0;
-out_nomem:
- printk(KERN_INFO "NFS: not enough memory to parse option\n");
- return 0;
-out_security_failure:
- printk(KERN_INFO "NFS: security options invalid: %d\n", rc);
- return 0;
-}
-
-/*
- * Ensure that a specified authtype in args->auth_info is supported by
- * the server. Returns 0 and sets args->selected_flavor if it's ok, and
+ * Ensure that a specified authtype in ctx->auth_info is supported by
+ * the server. Returns 0 and sets ctx->selected_flavor if it's ok, and
* -EACCES if not.
*/
-static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
- rpc_authflavor_t *server_authlist, unsigned int count)
+static int nfs_verify_authflavors(struct nfs_fs_context *ctx,
+ rpc_authflavor_t *server_authlist,
+ unsigned int count)
{
rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
bool found_auth_null = false;
@@ -1734,7 +720,7 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
for (i = 0; i < count; i++) {
flavor = server_authlist[i];
- if (nfs_auth_info_match(&args->auth_info, flavor))
+ if (nfs_auth_info_match(&ctx->auth_info, flavor))
goto out;
if (flavor == RPC_AUTH_NULL)
@@ -1742,7 +728,7 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
}
if (found_auth_null) {
- flavor = args->auth_info.flavors[0];
+ flavor = ctx->auth_info.flavors[0];
goto out;
}
@@ -1751,8 +737,8 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
return -EACCES;
out:
- args->selected_flavor = flavor;
- dfprintk(MOUNT, "NFS: using auth flavor %u\n", args->selected_flavor);
+ ctx->selected_flavor = flavor;
+ dfprintk(MOUNT, "NFS: using auth flavor %u\n", ctx->selected_flavor);
return 0;
}
@@ -1760,50 +746,51 @@ out:
* Use the remote server's MOUNT service to request the NFS file handle
* corresponding to the provided path.
*/
-static int nfs_request_mount(struct nfs_parsed_mount_data *args,
+static int nfs_request_mount(struct fs_context *fc,
struct nfs_fh *root_fh,
rpc_authflavor_t *server_authlist,
unsigned int *server_authlist_len)
{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct nfs_mount_request request = {
.sap = (struct sockaddr *)
- &args->mount_server.address,
- .dirpath = args->nfs_server.export_path,
- .protocol = args->mount_server.protocol,
+ &ctx->mount_server.address,
+ .dirpath = ctx->nfs_server.export_path,
+ .protocol = ctx->mount_server.protocol,
.fh = root_fh,
- .noresvport = args->flags & NFS_MOUNT_NORESVPORT,
+ .noresvport = ctx->flags & NFS_MOUNT_NORESVPORT,
.auth_flav_len = server_authlist_len,
.auth_flavs = server_authlist,
- .net = args->net,
+ .net = fc->net_ns,
};
int status;
- if (args->mount_server.version == 0) {
- switch (args->version) {
+ if (ctx->mount_server.version == 0) {
+ switch (ctx->version) {
default:
- args->mount_server.version = NFS_MNT3_VERSION;
+ ctx->mount_server.version = NFS_MNT3_VERSION;
break;
case 2:
- args->mount_server.version = NFS_MNT_VERSION;
+ ctx->mount_server.version = NFS_MNT_VERSION;
}
}
- request.version = args->mount_server.version;
+ request.version = ctx->mount_server.version;
- if (args->mount_server.hostname)
- request.hostname = args->mount_server.hostname;
+ if (ctx->mount_server.hostname)
+ request.hostname = ctx->mount_server.hostname;
else
- request.hostname = args->nfs_server.hostname;
+ request.hostname = ctx->nfs_server.hostname;
/*
* Construct the mount server's address.
*/
- if (args->mount_server.address.ss_family == AF_UNSPEC) {
- memcpy(request.sap, &args->nfs_server.address,
- args->nfs_server.addrlen);
- args->mount_server.addrlen = args->nfs_server.addrlen;
+ if (ctx->mount_server.address.sa_family == AF_UNSPEC) {
+ memcpy(request.sap, &ctx->nfs_server.address,
+ ctx->nfs_server.addrlen);
+ ctx->mount_server.addrlen = ctx->nfs_server.addrlen;
}
- request.salen = args->mount_server.addrlen;
- nfs_set_port(request.sap, &args->mount_server.port, 0);
+ request.salen = ctx->mount_server.addrlen;
+ nfs_set_port(request.sap, &ctx->mount_server.port, 0);
/*
* Now ask the mount server to map our export path
@@ -1819,20 +806,18 @@ static int nfs_request_mount(struct nfs_parsed_mount_data *args,
return 0;
}
-static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_info,
- struct nfs_subversion *nfs_mod)
+static struct nfs_server *nfs_try_mount_request(struct fs_context *fc)
{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
int status;
unsigned int i;
bool tried_auth_unix = false;
bool auth_null_in_list = false;
struct nfs_server *server = ERR_PTR(-EACCES);
- struct nfs_parsed_mount_data *args = mount_info->parsed;
rpc_authflavor_t authlist[NFS_MAX_SECFLAVORS];
unsigned int authlist_len = ARRAY_SIZE(authlist);
- status = nfs_request_mount(args, mount_info->mntfh, authlist,
- &authlist_len);
+ status = nfs_request_mount(fc, ctx->mntfh, authlist, &authlist_len);
if (status)
return ERR_PTR(status);
@@ -1840,13 +825,13 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
* Was a sec= authflavor specified in the options? First, verify
* whether the server supports it, and then just try to use it if so.
*/
- if (args->auth_info.flavor_len > 0) {
- status = nfs_verify_authflavors(args, authlist, authlist_len);
+ if (ctx->auth_info.flavor_len > 0) {
+ status = nfs_verify_authflavors(ctx, authlist, authlist_len);
dfprintk(MOUNT, "NFS: using auth flavor %u\n",
- args->selected_flavor);
+ ctx->selected_flavor);
if (status)
return ERR_PTR(status);
- return nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
+ return ctx->nfs_mod->rpc_ops->create_server(fc);
}
/*
@@ -1872,8 +857,8 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
/* Fallthrough */
}
dfprintk(MOUNT, "NFS: attempting to use auth flavor %u\n", flavor);
- args->selected_flavor = flavor;
- server = nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
+ ctx->selected_flavor = flavor;
+ server = ctx->nfs_mod->rpc_ops->create_server(fc);
if (!IS_ERR(server))
return server;
}
@@ -1888,348 +873,23 @@ static struct nfs_server *nfs_try_mount_request(struct nfs_mount_info *mount_inf
/* Last chance! Try AUTH_UNIX */
dfprintk(MOUNT, "NFS: attempting to use auth flavor %u\n", RPC_AUTH_UNIX);
- args->selected_flavor = RPC_AUTH_UNIX;
- return nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
+ ctx->selected_flavor = RPC_AUTH_UNIX;
+ return ctx->nfs_mod->rpc_ops->create_server(fc);
}
-struct dentry *nfs_try_mount(int flags, const char *dev_name,
- struct nfs_mount_info *mount_info,
- struct nfs_subversion *nfs_mod)
+int nfs_try_get_tree(struct fs_context *fc)
{
- struct nfs_server *server;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
- if (mount_info->parsed->need_mount)
- server = nfs_try_mount_request(mount_info, nfs_mod);
+ if (ctx->need_mount)
+ ctx->server = nfs_try_mount_request(fc);
else
- server = nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
-
- if (IS_ERR(server))
- return ERR_CAST(server);
-
- return nfs_fs_mount_common(server, flags, dev_name, mount_info, nfs_mod);
-}
-EXPORT_SYMBOL_GPL(nfs_try_mount);
-
-/*
- * Split "dev_name" into "hostname:export_path".
- *
- * The leftmost colon demarks the split between the server's hostname
- * and the export path. If the hostname starts with a left square
- * bracket, then it may contain colons.
- *
- * Note: caller frees hostname and export path, even on error.
- */
-static int nfs_parse_devname(const char *dev_name,
- char **hostname, size_t maxnamlen,
- char **export_path, size_t maxpathlen)
-{
- size_t len;
- char *end;
-
- if (unlikely(!dev_name || !*dev_name)) {
- dfprintk(MOUNT, "NFS: device name not specified\n");
- return -EINVAL;
- }
-
- /* Is the host name protected with square brakcets? */
- if (*dev_name == '[') {
- end = strchr(++dev_name, ']');
- if (end == NULL || end[1] != ':')
- goto out_bad_devname;
-
- len = end - dev_name;
- end++;
- } else {
- char *comma;
-
- end = strchr(dev_name, ':');
- if (end == NULL)
- goto out_bad_devname;
- len = end - dev_name;
-
- /* kill possible hostname list: not supported */
- comma = strchr(dev_name, ',');
- if (comma != NULL && comma < end)
- len = comma - dev_name;
- }
-
- if (len > maxnamlen)
- goto out_hostname;
-
- /* N.B. caller will free nfs_server.hostname in all cases */
- *hostname = kstrndup(dev_name, len, GFP_KERNEL);
- if (*hostname == NULL)
- goto out_nomem;
- len = strlen(++end);
- if (len > maxpathlen)
- goto out_path;
- *export_path = kstrndup(end, len, GFP_KERNEL);
- if (!*export_path)
- goto out_nomem;
-
- dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *export_path);
- return 0;
-
-out_bad_devname:
- dfprintk(MOUNT, "NFS: device name not in host:path format\n");
- return -EINVAL;
+ ctx->server = ctx->nfs_mod->rpc_ops->create_server(fc);
-out_nomem:
- dfprintk(MOUNT, "NFS: not enough memory to parse device name\n");
- return -ENOMEM;
-
-out_hostname:
- dfprintk(MOUNT, "NFS: server hostname too long\n");
- return -ENAMETOOLONG;
-
-out_path:
- dfprintk(MOUNT, "NFS: export pathname too long\n");
- return -ENAMETOOLONG;
+ return nfs_get_tree_common(fc);
}
+EXPORT_SYMBOL_GPL(nfs_try_get_tree);
-/*
- * Validate the NFS2/NFS3 mount data
- * - fills in the mount root filehandle
- *
- * For option strings, user space handles the following behaviors:
- *
- * + DNS: mapping server host name to IP address ("addr=" option)
- *
- * + failure mode: how to behave if a mount request can't be handled
- * immediately ("fg/bg" option)
- *
- * + retry: how often to retry a mount request ("retry=" option)
- *
- * + breaking back: trying proto=udp after proto=tcp, v2 after v3,
- * mountproto=tcp after mountproto=udp, and so on
- */
-static int nfs23_validate_mount_data(void *options,
- struct nfs_parsed_mount_data *args,
- struct nfs_fh *mntfh,
- const char *dev_name)
-{
- struct nfs_mount_data *data = (struct nfs_mount_data *)options;
- struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
- int extra_flags = NFS_MOUNT_LEGACY_INTERFACE;
-
- if (data == NULL)
- goto out_no_data;
-
- args->version = NFS_DEFAULT_VERSION;
- switch (data->version) {
- case 1:
- data->namlen = 0; /* fall through */
- case 2:
- data->bsize = 0; /* fall through */
- case 3:
- if (data->flags & NFS_MOUNT_VER3)
- goto out_no_v3;
- data->root.size = NFS2_FHSIZE;
- memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
- /* Turn off security negotiation */
- extra_flags |= NFS_MOUNT_SECFLAVOUR;
- /* fall through */
- case 4:
- if (data->flags & NFS_MOUNT_SECFLAVOUR)
- goto out_no_sec;
- /* fall through */
- case 5:
- memset(data->context, 0, sizeof(data->context));
- /* fall through */
- case 6:
- if (data->flags & NFS_MOUNT_VER3) {
- if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
- goto out_invalid_fh;
- mntfh->size = data->root.size;
- args->version = 3;
- } else {
- mntfh->size = NFS2_FHSIZE;
- args->version = 2;
- }
-
-
- memcpy(mntfh->data, data->root.data, mntfh->size);
- if (mntfh->size < sizeof(mntfh->data))
- memset(mntfh->data + mntfh->size, 0,
- sizeof(mntfh->data) - mntfh->size);
-
- /*
- * Translate to nfs_parsed_mount_data, which nfs_fill_super
- * can deal with.
- */
- args->flags = data->flags & NFS_MOUNT_FLAGMASK;
- args->flags |= extra_flags;
- args->rsize = data->rsize;
- args->wsize = data->wsize;
- args->timeo = data->timeo;
- args->retrans = data->retrans;
- args->acregmin = data->acregmin;
- args->acregmax = data->acregmax;
- args->acdirmin = data->acdirmin;
- args->acdirmax = data->acdirmax;
- args->need_mount = false;
-
- memcpy(sap, &data->addr, sizeof(data->addr));
- args->nfs_server.addrlen = sizeof(data->addr);
- args->nfs_server.port = ntohs(data->addr.sin_port);
- if (sap->sa_family != AF_INET ||
- !nfs_verify_server_address(sap))
- goto out_no_address;
-
- if (!(data->flags & NFS_MOUNT_TCP))
- args->nfs_server.protocol = XPRT_TRANSPORT_UDP;
- /* N.B. caller will free nfs_server.hostname in all cases */
- args->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL);
- args->namlen = data->namlen;
- args->bsize = data->bsize;
-
- if (data->flags & NFS_MOUNT_SECFLAVOUR)
- args->selected_flavor = data->pseudoflavor;
- else
- args->selected_flavor = RPC_AUTH_UNIX;
- if (!args->nfs_server.hostname)
- goto out_nomem;
-
- if (!(data->flags & NFS_MOUNT_NONLM))
- args->flags &= ~(NFS_MOUNT_LOCAL_FLOCK|
- NFS_MOUNT_LOCAL_FCNTL);
- else
- args->flags |= (NFS_MOUNT_LOCAL_FLOCK|
- NFS_MOUNT_LOCAL_FCNTL);
- /*
- * The legacy version 6 binary mount data from userspace has a
- * field used only to transport selinux information into the
- * the kernel. To continue to support that functionality we
- * have a touch of selinux knowledge here in the NFS code. The
- * userspace code converted context=blah to just blah so we are
- * converting back to the full string selinux understands.
- */
- if (data->context[0]){
-#ifdef CONFIG_SECURITY_SELINUX
- int rc;
- data->context[NFS_MAX_CONTEXT_LEN] = '\0';
- rc = security_add_mnt_opt("context", data->context,
- strlen(data->context), &args->lsm_opts);
- if (rc)
- return rc;
-#else
- return -EINVAL;
-#endif
- }
-
- break;
- default:
- return NFS_TEXT_DATA;
- }
-
- return 0;
-
-out_no_data:
- dfprintk(MOUNT, "NFS: mount program didn't pass any mount data\n");
- return -EINVAL;
-
-out_no_v3:
- dfprintk(MOUNT, "NFS: nfs_mount_data version %d does not support v3\n",
- data->version);
- return -EINVAL;
-
-out_no_sec:
- dfprintk(MOUNT, "NFS: nfs_mount_data version supports only AUTH_SYS\n");
- return -EINVAL;
-
-out_nomem:
- dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
- return -ENOMEM;
-
-out_no_address:
- dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
- return -EINVAL;
-
-out_invalid_fh:
- dfprintk(MOUNT, "NFS: invalid root filehandle\n");
- return -EINVAL;
-}
-
-#if IS_ENABLED(CONFIG_NFS_V4)
-static int nfs_validate_mount_data(struct file_system_type *fs_type,
- void *options,
- struct nfs_parsed_mount_data *args,
- struct nfs_fh *mntfh,
- const char *dev_name)
-{
- if (fs_type == &nfs_fs_type)
- return nfs23_validate_mount_data(options, args, mntfh, dev_name);
- return nfs4_validate_mount_data(options, args, dev_name);
-}
-#else
-static int nfs_validate_mount_data(struct file_system_type *fs_type,
- void *options,
- struct nfs_parsed_mount_data *args,
- struct nfs_fh *mntfh,
- const char *dev_name)
-{
- return nfs23_validate_mount_data(options, args, mntfh, dev_name);
-}
-#endif
-
-static int nfs_validate_text_mount_data(void *options,
- struct nfs_parsed_mount_data *args,
- const char *dev_name)
-{
- int port = 0;
- int max_namelen = PAGE_SIZE;
- int max_pathlen = NFS_MAXPATHLEN;
- struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
-
- if (nfs_parse_mount_options((char *)options, args) == 0)
- return -EINVAL;
-
- if (!nfs_verify_server_address(sap))
- goto out_no_address;
-
- if (args->version == 4) {
-#if IS_ENABLED(CONFIG_NFS_V4)
- if (args->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
- port = NFS_RDMA_PORT;
- else
- port = NFS_PORT;
- max_namelen = NFS4_MAXNAMLEN;
- max_pathlen = NFS4_MAXPATHLEN;
- nfs_validate_transport_protocol(args);
- if (args->nfs_server.protocol == XPRT_TRANSPORT_UDP)
- goto out_invalid_transport_udp;
- nfs4_validate_mount_flags(args);
-#else
- goto out_v4_not_compiled;
-#endif /* CONFIG_NFS_V4 */
- } else {
- nfs_set_mount_transport_protocol(args);
- if (args->nfs_server.protocol == XPRT_TRANSPORT_RDMA)
- port = NFS_RDMA_PORT;
- }
-
- nfs_set_port(sap, &args->nfs_server.port, port);
-
- return nfs_parse_devname(dev_name,
- &args->nfs_server.hostname,
- max_namelen,
- &args->nfs_server.export_path,
- max_pathlen);
-
-#if !IS_ENABLED(CONFIG_NFS_V4)
-out_v4_not_compiled:
- dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
- return -EPROTONOSUPPORT;
-#else
-out_invalid_transport_udp:
- dfprintk(MOUNT, "NFSv4: Unsupported transport protocol udp\n");
- return -EINVAL;
-#endif /* !CONFIG_NFS_V4 */
-
-out_no_address:
- dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
- return -EINVAL;
-}
#define NFS_REMOUNT_CMP_FLAGMASK ~(NFS_MOUNT_INTR \
| NFS_MOUNT_SECURE \
@@ -2246,39 +906,35 @@ out_no_address:
static int
nfs_compare_remount_data(struct nfs_server *nfss,
- struct nfs_parsed_mount_data *data)
-{
- if ((data->flags ^ nfss->flags) & NFS_REMOUNT_CMP_FLAGMASK ||
- data->rsize != nfss->rsize ||
- data->wsize != nfss->wsize ||
- data->version != nfss->nfs_client->rpc_ops->version ||
- data->minorversion != nfss->nfs_client->cl_minorversion ||
- data->retrans != nfss->client->cl_timeout->to_retries ||
- !nfs_auth_info_match(&data->auth_info, nfss->client->cl_auth->au_flavor) ||
- data->acregmin != nfss->acregmin / HZ ||
- data->acregmax != nfss->acregmax / HZ ||
- data->acdirmin != nfss->acdirmin / HZ ||
- data->acdirmax != nfss->acdirmax / HZ ||
- data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
- (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
- data->nfs_server.port != nfss->port ||
- data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
- !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
+ struct nfs_fs_context *ctx)
+{
+ if ((ctx->flags ^ nfss->flags) & NFS_REMOUNT_CMP_FLAGMASK ||
+ ctx->rsize != nfss->rsize ||
+ ctx->wsize != nfss->wsize ||
+ ctx->version != nfss->nfs_client->rpc_ops->version ||
+ ctx->minorversion != nfss->nfs_client->cl_minorversion ||
+ ctx->retrans != nfss->client->cl_timeout->to_retries ||
+ !nfs_auth_info_match(&ctx->auth_info, nfss->client->cl_auth->au_flavor) ||
+ ctx->acregmin != nfss->acregmin / HZ ||
+ ctx->acregmax != nfss->acregmax / HZ ||
+ ctx->acdirmin != nfss->acdirmin / HZ ||
+ ctx->acdirmax != nfss->acdirmax / HZ ||
+ ctx->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+ (ctx->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
+ ctx->nfs_server.port != nfss->port ||
+ ctx->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
+ !rpc_cmp_addr((struct sockaddr *)&ctx->nfs_server.address,
(struct sockaddr *)&nfss->nfs_client->cl_addr))
return -EINVAL;
return 0;
}
-int
-nfs_remount(struct super_block *sb, int *flags, char *raw_data)
+int nfs_reconfigure(struct fs_context *fc)
{
- int error;
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
+ struct super_block *sb = fc->root->d_sb;
struct nfs_server *nfss = sb->s_fs_info;
- struct nfs_parsed_mount_data *data;
- struct nfs_mount_data *options = (struct nfs_mount_data *)raw_data;
- struct nfs4_mount_data *options4 = (struct nfs4_mount_data *)raw_data;
- u32 nfsvers = nfss->nfs_client->rpc_ops->version;
sync_filesystem(sb);
@@ -2288,92 +944,38 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
* ones were explicitly specified. Fall back to legacy behavior and
* just return success.
*/
- if ((nfsvers == 4 && (!options4 || options4->version == 1)) ||
- (nfsvers <= 3 && (!options || (options->version >= 1 &&
- options->version <= 6))))
+ if (ctx->skip_reconfig_option_check)
return 0;
- data = nfs_alloc_parsed_mount_data();
- if (data == NULL)
- return -ENOMEM;
-
- /* fill out struct with values from existing mount */
- data->flags = nfss->flags;
- data->rsize = nfss->rsize;
- data->wsize = nfss->wsize;
- data->retrans = nfss->client->cl_timeout->to_retries;
- data->selected_flavor = nfss->client->cl_auth->au_flavor;
- data->acregmin = nfss->acregmin / HZ;
- data->acregmax = nfss->acregmax / HZ;
- data->acdirmin = nfss->acdirmin / HZ;
- data->acdirmax = nfss->acdirmax / HZ;
- data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ;
- data->nfs_server.port = nfss->port;
- data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
- data->version = nfsvers;
- data->minorversion = nfss->nfs_client->cl_minorversion;
- data->net = current->nsproxy->net_ns;
- memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
- data->nfs_server.addrlen);
-
- /* overwrite those values with any that were specified */
- error = -EINVAL;
- if (!nfs_parse_mount_options((char *)options, data))
- goto out;
-
/*
* noac is a special case. It implies -o sync, but that's not
- * necessarily reflected in the mtab options. do_remount_sb
+ * necessarily reflected in the mtab options. reconfigure_super
* will clear SB_SYNCHRONOUS if -o sync wasn't specified in the
* remount options, so we have to explicitly reset it.
*/
- if (data->flags & NFS_MOUNT_NOAC)
- *flags |= SB_SYNCHRONOUS;
+ if (ctx->flags & NFS_MOUNT_NOAC) {
+ fc->sb_flags |= SB_SYNCHRONOUS;
+ fc->sb_flags_mask |= SB_SYNCHRONOUS;
+ }
/* compare new mount options with old ones */
- error = nfs_compare_remount_data(nfss, data);
- if (!error)
- error = security_sb_remount(sb, data->lsm_opts);
-out:
- nfs_free_parsed_mount_data(data);
- return error;
-}
-EXPORT_SYMBOL_GPL(nfs_remount);
-
-/*
- * Initialise the common bits of the superblock
- */
-static void nfs_initialise_sb(struct super_block *sb)
-{
- struct nfs_server *server = NFS_SB(sb);
-
- sb->s_magic = NFS_SUPER_MAGIC;
-
- /* We probably want something more informative here */
- snprintf(sb->s_id, sizeof(sb->s_id),
- "%u:%u", MAJOR(sb->s_dev), MINOR(sb->s_dev));
-
- if (sb->s_blocksize == 0)
- sb->s_blocksize = nfs_block_bits(server->wsize,
- &sb->s_blocksize_bits);
-
- nfs_super_set_maxbytes(sb, server->maxfilesize);
+ return nfs_compare_remount_data(nfss, ctx);
}
+EXPORT_SYMBOL_GPL(nfs_reconfigure);
/*
- * Finish setting up an NFS2/3 superblock
+ * Finish setting up an NFS superblock
*/
-void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
+static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
{
- struct nfs_parsed_mount_data *data = mount_info->parsed;
struct nfs_server *server = NFS_SB(sb);
sb->s_blocksize_bits = 0;
sb->s_blocksize = 0;
sb->s_xattr = server->nfs_client->cl_nfs_mod->xattr;
sb->s_op = server->nfs_client->cl_nfs_mod->sops;
- if (data && data->bsize)
- sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
+ if (ctx && ctx->bsize)
+ sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits);
if (server->nfs_client->rpc_ops->version != 2) {
/* The VFS shouldn't apply the umask to mode bits. We will do
@@ -2393,53 +995,27 @@ void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
sb->s_time_max = S64_MAX;
}
- nfs_initialise_sb(sb);
-}
-EXPORT_SYMBOL_GPL(nfs_fill_super);
-
-/*
- * Finish setting up a cloned NFS2/3/4 superblock
- */
-static void nfs_clone_super(struct super_block *sb,
- struct nfs_mount_info *mount_info)
-{
- const struct super_block *old_sb = mount_info->cloned->sb;
- struct nfs_server *server = NFS_SB(sb);
-
- sb->s_blocksize_bits = old_sb->s_blocksize_bits;
- sb->s_blocksize = old_sb->s_blocksize;
- sb->s_maxbytes = old_sb->s_maxbytes;
- sb->s_xattr = old_sb->s_xattr;
- sb->s_op = old_sb->s_op;
- sb->s_export_op = old_sb->s_export_op;
+ sb->s_magic = NFS_SUPER_MAGIC;
- if (server->nfs_client->rpc_ops->version != 2) {
- /* The VFS shouldn't apply the umask to mode bits. We will do
- * so ourselves when necessary.
- */
- sb->s_flags |= SB_POSIXACL;
- sb->s_time_gran = 1;
- } else
- sb->s_time_gran = 1000;
+ /* We probably want something more informative here */
+ snprintf(sb->s_id, sizeof(sb->s_id),
+ "%u:%u", MAJOR(sb->s_dev), MINOR(sb->s_dev));
- if (server->nfs_client->rpc_ops->version != 4) {
- sb->s_time_min = 0;
- sb->s_time_max = U32_MAX;
- } else {
- sb->s_time_min = S64_MIN;
- sb->s_time_max = S64_MAX;
- }
+ if (sb->s_blocksize == 0)
+ sb->s_blocksize = nfs_block_bits(server->wsize,
+ &sb->s_blocksize_bits);
- nfs_initialise_sb(sb);
+ nfs_super_set_maxbytes(sb, server->maxfilesize);
}
-static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
+static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b,
+ const struct fs_context *fc)
{
const struct nfs_server *a = s->s_fs_info;
const struct rpc_clnt *clnt_a = a->client;
const struct rpc_clnt *clnt_b = b->client;
- if ((s->s_flags & NFS_MS_MASK) != (flags & NFS_MS_MASK))
+ if ((s->s_flags & NFS_SB_MASK) != (fc->sb_flags & NFS_SB_MASK))
goto Ebusy;
if (a->nfs_client != b->nfs_client)
goto Ebusy;
@@ -2464,19 +1040,11 @@ Ebusy:
return 0;
}
-struct nfs_sb_mountdata {
- struct nfs_server *server;
- int mntflags;
-};
-
-static int nfs_set_super(struct super_block *s, void *data)
+static int nfs_set_super(struct super_block *s, struct fs_context *fc)
{
- struct nfs_sb_mountdata *sb_mntdata = data;
- struct nfs_server *server = sb_mntdata->server;
+ struct nfs_server *server = fc->s_fs_info;
int ret;
- s->s_flags = sb_mntdata->mntflags;
- s->s_fs_info = server;
s->s_d_op = server->nfs_client->rpc_ops->dentry_ops;
ret = set_anon_super(s, server);
if (ret == 0)
@@ -2541,11 +1109,9 @@ static int nfs_compare_userns(const struct nfs_server *old,
return 1;
}
-static int nfs_compare_super(struct super_block *sb, void *data)
+static int nfs_compare_super(struct super_block *sb, struct fs_context *fc)
{
- struct nfs_sb_mountdata *sb_mntdata = data;
- struct nfs_server *server = sb_mntdata->server, *old = NFS_SB(sb);
- int mntflags = sb_mntdata->mntflags;
+ struct nfs_server *server = fc->s_fs_info, *old = NFS_SB(sb);
if (!nfs_compare_super_address(old, server))
return 0;
@@ -2556,13 +1122,12 @@ static int nfs_compare_super(struct super_block *sb, void *data)
return 0;
if (!nfs_compare_userns(old, server))
return 0;
- return nfs_compare_mount_options(sb, server, mntflags);
+ return nfs_compare_mount_options(sb, server, fc);
}
#ifdef CONFIG_NFS_FSCACHE
static void nfs_get_cache_cookie(struct super_block *sb,
- struct nfs_parsed_mount_data *parsed,
- struct nfs_clone_mount *cloned)
+ struct nfs_fs_context *ctx)
{
struct nfs_server *nfss = NFS_SB(sb);
char *uniq = NULL;
@@ -2571,80 +1136,36 @@ static void nfs_get_cache_cookie(struct super_block *sb,
nfss->fscache_key = NULL;
nfss->fscache = NULL;
- if (parsed) {
- if (!(parsed->options & NFS_OPTION_FSCACHE))
- return;
- if (parsed->fscache_uniq) {
- uniq = parsed->fscache_uniq;
- ulen = strlen(parsed->fscache_uniq);
- }
- } else if (cloned) {
- struct nfs_server *mnt_s = NFS_SB(cloned->sb);
+ if (!ctx)
+ return;
+
+ if (ctx->clone_data.sb) {
+ struct nfs_server *mnt_s = NFS_SB(ctx->clone_data.sb);
if (!(mnt_s->options & NFS_OPTION_FSCACHE))
return;
if (mnt_s->fscache_key) {
uniq = mnt_s->fscache_key->key.uniquifier;
ulen = mnt_s->fscache_key->key.uniq_len;
}
- } else
+ } else {
+ if (!(ctx->options & NFS_OPTION_FSCACHE))
+ return;
+ if (ctx->fscache_uniq) {
+ uniq = ctx->fscache_uniq;
+ ulen = strlen(ctx->fscache_uniq);
+ }
return;
+ }
nfs_fscache_get_super_cookie(sb, uniq, ulen);
}
#else
static void nfs_get_cache_cookie(struct super_block *sb,
- struct nfs_parsed_mount_data *parsed,
- struct nfs_clone_mount *cloned)
+ struct nfs_fs_context *ctx)
{
}
#endif
-int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
- struct nfs_mount_info *mount_info)
-{
- int error;
- unsigned long kflags = 0, kflags_out = 0;
- if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL)
- kflags |= SECURITY_LSM_NATIVE_LABELS;
-
- error = security_sb_set_mnt_opts(s, mount_info->parsed->lsm_opts,
- kflags, &kflags_out);
- if (error)
- goto err;
-
- if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL &&
- !(kflags_out & SECURITY_LSM_NATIVE_LABELS))
- NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL;
-err:
- return error;
-}
-EXPORT_SYMBOL_GPL(nfs_set_sb_security);
-
-int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
- struct nfs_mount_info *mount_info)
-{
- int error;
- unsigned long kflags = 0, kflags_out = 0;
-
- /* clone any lsm security options from the parent to the new sb */
- if (d_inode(mntroot)->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
- return -ESTALE;
-
- if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL)
- kflags |= SECURITY_LSM_NATIVE_LABELS;
-
- error = security_sb_clone_mnt_opts(mount_info->cloned->sb, s, kflags,
- &kflags_out);
- if (error)
- return error;
-
- if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL &&
- !(kflags_out & SECURITY_LSM_NATIVE_LABELS))
- NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL;
- return 0;
-}
-EXPORT_SYMBOL_GPL(nfs_clone_sb_security);
-
static void nfs_set_readahead(struct backing_dev_info *bdi,
unsigned long iomax_pages)
{
@@ -2652,35 +1173,40 @@ static void nfs_set_readahead(struct backing_dev_info *bdi,
bdi->io_pages = iomax_pages;
}
-struct dentry *nfs_fs_mount_common(struct nfs_server *server,
- int flags, const char *dev_name,
- struct nfs_mount_info *mount_info,
- struct nfs_subversion *nfs_mod)
+int nfs_get_tree_common(struct fs_context *fc)
{
+ struct nfs_fs_context *ctx = nfs_fc2context(fc);
struct super_block *s;
- struct dentry *mntroot = ERR_PTR(-ENOMEM);
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
- .server = server,
- };
+ int (*compare_super)(struct super_block *, struct fs_context *) = nfs_compare_super;
+ struct nfs_server *server = ctx->server;
+ unsigned long kflags = 0, kflags_out = 0;
int error;
+ ctx->server = NULL;
+ if (IS_ERR(server))
+ return PTR_ERR(server);
+
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
/* -o noac implies -o sync */
if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= SB_SYNCHRONOUS;
+ fc->sb_flags |= SB_SYNCHRONOUS;
- if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
- if (mount_info->cloned->sb->s_flags & SB_SYNCHRONOUS)
- sb_mntdata.mntflags |= SB_SYNCHRONOUS;
+ if (ctx->clone_data.sb)
+ if (ctx->clone_data.sb->s_flags & SB_SYNCHRONOUS)
+ fc->sb_flags |= SB_SYNCHRONOUS;
+
+ if (server->caps & NFS_CAP_SECURITY_LABEL)
+ fc->lsm_flags |= SECURITY_LSM_NATIVE_LABELS;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
+ fc->s_fs_info = server;
+ s = sget_fc(fc, compare_super, nfs_set_super);
+ fc->s_fs_info = NULL;
if (IS_ERR(s)) {
- mntroot = ERR_CAST(s);
+ error = PTR_ERR(s);
+ nfs_errorf(fc, "NFS: Couldn't get superblock");
goto out_err_nosb;
}
@@ -2690,88 +1216,66 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
} else {
error = super_setup_bdi_name(s, "%u:%u", MAJOR(server->s_dev),
MINOR(server->s_dev));
- if (error) {
- mntroot = ERR_PTR(error);
+ if (error)
goto error_splat_super;
- }
nfs_set_readahead(s->s_bdi, server->rpages);
server->super = s;
}
if (!s->s_root) {
+ unsigned bsize = ctx->clone_data.inherited_bsize;
/* initial superblock/root creation */
- mount_info->fill_super(s, mount_info);
- nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
- if (!(server->flags & NFS_MOUNT_UNSHARED))
- s->s_iflags |= SB_I_MULTIROOT;
+ nfs_fill_super(s, ctx);
+ if (bsize) {
+ s->s_blocksize_bits = bsize;
+ s->s_blocksize = 1U << bsize;
+ }
+ nfs_get_cache_cookie(s, ctx);
}
- mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
- if (IS_ERR(mntroot))
+ error = nfs_get_root(s, fc);
+ if (error < 0) {
+ nfs_errorf(fc, "NFS: Couldn't get root dentry");
goto error_splat_super;
+ }
- error = mount_info->set_security(s, mntroot, mount_info);
+ if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL)
+ kflags |= SECURITY_LSM_NATIVE_LABELS;
+ if (ctx->clone_data.sb) {
+ if (d_inode(fc->root)->i_fop != &nfs_dir_operations) {
+ error = -ESTALE;
+ goto error_splat_root;
+ }
+ /* clone any lsm security options from the parent to the new sb */
+ error = security_sb_clone_mnt_opts(ctx->clone_data.sb, s, kflags,
+ &kflags_out);
+ } else {
+ error = security_sb_set_mnt_opts(s, fc->security,
+ kflags, &kflags_out);
+ }
if (error)
goto error_splat_root;
+ if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL &&
+ !(kflags_out & SECURITY_LSM_NATIVE_LABELS))
+ NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL;
s->s_flags |= SB_ACTIVE;
+ error = 0;
out:
- return mntroot;
+ return error;
out_err_nosb:
nfs_free_server(server);
goto out;
error_splat_root:
- dput(mntroot);
- mntroot = ERR_PTR(error);
+ dput(fc->root);
+ fc->root = NULL;
error_splat_super:
deactivate_locked_super(s);
goto out;
}
-EXPORT_SYMBOL_GPL(nfs_fs_mount_common);
-
-struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
-{
- struct nfs_mount_info mount_info = {
- .fill_super = nfs_fill_super,
- .set_security = nfs_set_sb_security,
- };
- struct dentry *mntroot = ERR_PTR(-ENOMEM);
- struct nfs_subversion *nfs_mod;
- int error;
-
- mount_info.parsed = nfs_alloc_parsed_mount_data();
- mount_info.mntfh = nfs_alloc_fhandle();
- if (mount_info.parsed == NULL || mount_info.mntfh == NULL)
- goto out;
-
- /* Validate the mount data */
- error = nfs_validate_mount_data(fs_type, raw_data, mount_info.parsed, mount_info.mntfh, dev_name);
- if (error == NFS_TEXT_DATA)
- error = nfs_validate_text_mount_data(raw_data, mount_info.parsed, dev_name);
- if (error < 0) {
- mntroot = ERR_PTR(error);
- goto out;
- }
-
- nfs_mod = get_nfs_version(mount_info.parsed->version);
- if (IS_ERR(nfs_mod)) {
- mntroot = ERR_CAST(nfs_mod);
- goto out;
- }
-
- mntroot = nfs_mod->rpc_ops->try_mount(flags, dev_name, &mount_info, nfs_mod);
-
- put_nfs_version(nfs_mod);
-out:
- nfs_free_parsed_mount_data(mount_info.parsed);
- nfs_free_fhandle(mount_info.mntfh);
- return mntroot;
-}
-EXPORT_SYMBOL_GPL(nfs_fs_mount);
/*
* Destroy an NFS2/3 superblock
@@ -2790,150 +1294,8 @@ void nfs_kill_super(struct super_block *s)
}
EXPORT_SYMBOL_GPL(nfs_kill_super);
-/*
- * Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
- */
-static struct dentry *
-nfs_xdev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
-{
- struct nfs_clone_mount *data = raw_data;
- struct nfs_mount_info mount_info = {
- .fill_super = nfs_clone_super,
- .set_security = nfs_clone_sb_security,
- .cloned = data,
- };
- struct nfs_server *server;
- struct dentry *mntroot = ERR_PTR(-ENOMEM);
- struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod;
-
- dprintk("--> nfs_xdev_mount()\n");
-
- mount_info.mntfh = mount_info.cloned->fh;
-
- /* create a new volume representation */
- server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
-
- if (IS_ERR(server))
- mntroot = ERR_CAST(server);
- else
- mntroot = nfs_fs_mount_common(server, flags,
- dev_name, &mount_info, nfs_mod);
-
- dprintk("<-- nfs_xdev_mount() = %ld\n",
- IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L);
- return mntroot;
-}
-
#if IS_ENABLED(CONFIG_NFS_V4)
-static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
-{
- args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3|
- NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
-}
-
-/*
- * Validate NFSv4 mount options
- */
-static int nfs4_validate_mount_data(void *options,
- struct nfs_parsed_mount_data *args,
- const char *dev_name)
-{
- struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
- struct nfs4_mount_data *data = (struct nfs4_mount_data *)options;
- char *c;
-
- if (data == NULL)
- goto out_no_data;
-
- args->version = 4;
-
- switch (data->version) {
- case 1:
- if (data->host_addrlen > sizeof(args->nfs_server.address))
- goto out_no_address;
- if (data->host_addrlen == 0)
- goto out_no_address;
- args->nfs_server.addrlen = data->host_addrlen;
- if (copy_from_user(sap, data->host_addr, data->host_addrlen))
- return -EFAULT;
- if (!nfs_verify_server_address(sap))
- goto out_no_address;
- args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
-
- if (data->auth_flavourlen) {
- rpc_authflavor_t pseudoflavor;
- if (data->auth_flavourlen > 1)
- goto out_inval_auth;
- if (copy_from_user(&pseudoflavor,
- data->auth_flavours,
- sizeof(pseudoflavor)))
- return -EFAULT;
- args->selected_flavor = pseudoflavor;
- } else
- args->selected_flavor = RPC_AUTH_UNIX;
-
- c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN);
- if (IS_ERR(c))
- return PTR_ERR(c);
- args->nfs_server.hostname = c;
-
- c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN);
- if (IS_ERR(c))
- return PTR_ERR(c);
- args->nfs_server.export_path = c;
- dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c);
-
- c = strndup_user(data->client_addr.data, 16);
- if (IS_ERR(c))
- return PTR_ERR(c);
- args->client_address = c;
-
- /*
- * Translate to nfs_parsed_mount_data, which nfs4_fill_super
- * can deal with.
- */
-
- args->flags = data->flags & NFS4_MOUNT_FLAGMASK;
- args->rsize = data->rsize;
- args->wsize = data->wsize;
- args->timeo = data->timeo;
- args->retrans = data->retrans;
- args->acregmin = data->acregmin;
- args->acregmax = data->acregmax;
- args->acdirmin = data->acdirmin;
- args->acdirmax = data->acdirmax;
- args->nfs_server.protocol = data->proto;
- nfs_validate_transport_protocol(args);
- if (args->nfs_server.protocol == XPRT_TRANSPORT_UDP)
- goto out_invalid_transport_udp;
-
- break;
- default:
- return NFS_TEXT_DATA;
- }
-
- return 0;
-
-out_no_data:
- dfprintk(MOUNT, "NFS4: mount program didn't pass any mount data\n");
- return -EINVAL;
-
-out_inval_auth:
- dfprintk(MOUNT, "NFS4: Invalid number of RPC auth flavours %d\n",
- data->auth_flavourlen);
- return -EINVAL;
-
-out_no_address:
- dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
- return -EINVAL;
-
-out_invalid_transport_udp:
- dfprintk(MOUNT, "NFSv4: Unsupported transport protocol udp\n");
- return -EINVAL;
-}
-
/*
* NFS v4 module parameters need to stay in the
* NFS client for backwards compatibility
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 52cab65f91cf..c478b772cc49 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -243,13 +243,24 @@ out:
/* A writeback failed: mark the page as bad, and invalidate the page cache */
static void nfs_set_pageerror(struct address_space *mapping)
{
+ struct inode *inode = mapping->host;
+
nfs_zap_mapping(mapping->host, mapping);
+ /* Force file size revalidation */
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
+ NFS_INO_REVAL_PAGECACHE |
+ NFS_INO_INVALID_SIZE;
+ spin_unlock(&inode->i_lock);
}
static void nfs_mapping_set_error(struct page *page, int error)
{
+ struct address_space *mapping = page_file_mapping(page);
+
SetPageError(page);
- mapping_set_error(page_file_mapping(page), error);
+ mapping_set_error(mapping, error);
+ nfs_set_pageerror(mapping);
}
/*
@@ -592,7 +603,7 @@ release_request:
static void nfs_write_error(struct nfs_page *req, int error)
{
- nfs_set_pageerror(page_file_mapping(req->wb_page));
+ trace_nfs_write_error(req, error);
nfs_mapping_set_error(req->wb_page, error);
nfs_inode_remove_request(req);
nfs_end_page_writeback(req);
@@ -998,7 +1009,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
(hdr->good_bytes < bytes)) {
- nfs_set_pageerror(page_file_mapping(req->wb_page));
+ trace_nfs_comp_error(req, hdr->error);
nfs_mapping_set_error(req->wb_page, hdr->error);
goto remove_req;
}
@@ -1403,8 +1414,7 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
task_setup_data->priority = priority;
rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
- trace_nfs_initiate_write(hdr->inode, hdr->io_start, hdr->good_bytes,
- hdr->args.stable);
+ trace_nfs_initiate_write(hdr);
}
/* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -1568,8 +1578,7 @@ static int nfs_writeback_done(struct rpc_task *task,
return status;
nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
- trace_nfs_writeback_done(inode, task->tk_status,
- hdr->args.offset, hdr->res.verf);
+ trace_nfs_writeback_done(task, hdr);
if (hdr->res.verf->committed < hdr->args.stable &&
task->tk_status >= 0) {
@@ -1649,6 +1658,8 @@ static void nfs_writeback_result(struct rpc_task *task,
*/
argp->stable = NFS_FILE_SYNC;
}
+ resp->count = 0;
+ resp->verf->committed = 0;
rpc_restart_call_prepare(task);
}
}
@@ -1824,11 +1835,12 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
/* Call the NFS version-specific code */
NFS_PROTO(data->inode)->commit_done(task, data);
- trace_nfs_commit_done(data);
+ trace_nfs_commit_done(task, data);
}
static void nfs_commit_release_pages(struct nfs_commit_data *data)
{
+ const struct nfs_writeverf *verf = data->res.verf;
struct nfs_page *req;
int status = data->task.tk_status;
struct nfs_commit_info cinfo;
@@ -1847,6 +1859,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
(long long)req_offset(req));
if (status < 0) {
if (req->wb_page) {
+ trace_nfs_commit_error(req, status);
nfs_mapping_set_error(req->wb_page, status);
nfs_inode_remove_request(req);
}
@@ -1856,7 +1869,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
/* Okay, COMMIT succeeded, apparently. Check the verifier
* returned by the server against all stored verfs. */
- if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
+ if (verf->committed > NFS_UNSTABLE &&
+ !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
/* We have a match */
if (req->wb_page)
nfs_inode_remove_request(req);
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index f2f81561ebb6..f368f3215f88 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -134,6 +134,16 @@ config NFSD_FLEXFILELAYOUT
If unsure, say N.
+config NFSD_V4_2_INTER_SSC
+ bool "NFSv4.2 inter server to server COPY"
+ depends on NFSD_V4 && NFS_V4_1 && NFS_V4_2
+ help
+ This option enables support for NFSv4.2 inter server to
+ server copy where the destination server calls the NFSv4.2
+ client to read the data to copy from the source server.
+
+ If unsure, say N.
+
config NFSD_V4_SECURITY_LABEL
bool "Provide Security Label support for NFSv4 server"
depends on NFSD_V4 && SECURITY
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 32a9bf22ac08..22e77ede9f14 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -27,7 +27,6 @@
#define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
#define NFSD_LAUNDRETTE_DELAY (2 * HZ)
-#define NFSD_FILE_LRU_RESCAN (0)
#define NFSD_FILE_SHUTDOWN (1)
#define NFSD_FILE_LRU_THRESHOLD (4096UL)
#define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
@@ -44,6 +43,17 @@ struct nfsd_fcache_bucket {
static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
+struct nfsd_fcache_disposal {
+ struct list_head list;
+ struct work_struct work;
+ struct net *net;
+ spinlock_t lock;
+ struct list_head freeme;
+ struct rcu_head rcu;
+};
+
+static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
+
static struct kmem_cache *nfsd_file_slab;
static struct kmem_cache *nfsd_file_mark_slab;
static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
@@ -52,32 +62,21 @@ static long nfsd_file_lru_flags;
static struct fsnotify_group *nfsd_file_fsnotify_group;
static atomic_long_t nfsd_filecache_count;
static struct delayed_work nfsd_filecache_laundrette;
+static DEFINE_SPINLOCK(laundrette_lock);
+static LIST_HEAD(laundrettes);
-enum nfsd_file_laundrette_ctl {
- NFSD_FILE_LAUNDRETTE_NOFLUSH = 0,
- NFSD_FILE_LAUNDRETTE_MAY_FLUSH
-};
+static void nfsd_file_gc(void);
static void
-nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl)
+nfsd_file_schedule_laundrette(void)
{
long count = atomic_long_read(&nfsd_filecache_count);
if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
return;
- /* Be more aggressive about scanning if over the threshold */
- if (count > NFSD_FILE_LRU_THRESHOLD)
- mod_delayed_work(system_wq, &nfsd_filecache_laundrette, 0);
- else
- schedule_delayed_work(&nfsd_filecache_laundrette, NFSD_LAUNDRETTE_DELAY);
-
- if (ctl == NFSD_FILE_LAUNDRETTE_NOFLUSH)
- return;
-
- /* ...and don't delay flushing if we're out of control */
- if (count >= NFSD_FILE_LRU_LIMIT)
- flush_delayed_work(&nfsd_filecache_laundrette);
+ queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
+ NFSD_LAUNDRETTE_DELAY);
}
static void
@@ -101,7 +100,7 @@ nfsd_file_mark_free(struct fsnotify_mark *mark)
static struct nfsd_file_mark *
nfsd_file_mark_get(struct nfsd_file_mark *nfm)
{
- if (!atomic_inc_not_zero(&nfm->nfm_ref))
+ if (!refcount_inc_not_zero(&nfm->nfm_ref))
return NULL;
return nfm;
}
@@ -109,8 +108,7 @@ nfsd_file_mark_get(struct nfsd_file_mark *nfm)
static void
nfsd_file_mark_put(struct nfsd_file_mark *nfm)
{
- if (atomic_dec_and_test(&nfm->nfm_ref)) {
-
+ if (refcount_dec_and_test(&nfm->nfm_ref)) {
fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
fsnotify_put_mark(&nfm->nfm_mark);
}
@@ -133,9 +131,13 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf)
struct nfsd_file_mark,
nfm_mark));
mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
- fsnotify_put_mark(mark);
- if (likely(nfm))
+ if (nfm) {
+ fsnotify_put_mark(mark);
break;
+ }
+ /* Avoid soft lockup race with nfsd_file_mark_put() */
+ fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
+ fsnotify_put_mark(mark);
} else
mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
@@ -145,7 +147,7 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf)
return NULL;
fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
- atomic_set(&new->nfm_ref, 1);
+ refcount_set(&new->nfm_ref, 1);
err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
@@ -183,7 +185,7 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
nf->nf_flags = 0;
nf->nf_inode = inode;
nf->nf_hashval = hashval;
- atomic_set(&nf->nf_ref, 1);
+ refcount_set(&nf->nf_ref, 1);
nf->nf_may = may & NFSD_FILE_MAY_MASK;
if (may & NFSD_MAY_NOT_BREAK_LEASE) {
if (may & NFSD_MAY_WRITE)
@@ -192,6 +194,7 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
__set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
}
nf->nf_mark = NULL;
+ init_rwsem(&nf->nf_rwsem);
trace_nfsd_file_alloc(nf);
}
return nf;
@@ -238,13 +241,6 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
}
-static bool
-nfsd_file_in_use(struct nfsd_file *nf)
-{
- return nfsd_file_check_writeback(nf) ||
- nfsd_file_check_write_error(nf);
-}
-
static void
nfsd_file_do_unhash(struct nfsd_file *nf)
{
@@ -256,8 +252,6 @@ nfsd_file_do_unhash(struct nfsd_file *nf)
nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
--nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
hlist_del_rcu(&nf->nf_node);
- if (!list_empty(&nf->nf_lru))
- list_lru_del(&nfsd_file_lru, &nf->nf_lru);
atomic_long_dec(&nfsd_filecache_count);
}
@@ -266,6 +260,8 @@ nfsd_file_unhash(struct nfsd_file *nf)
{
if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
nfsd_file_do_unhash(nf);
+ if (!list_empty(&nf->nf_lru))
+ list_lru_del(&nfsd_file_lru, &nf->nf_lru);
return true;
}
return false;
@@ -283,42 +279,48 @@ nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *disp
if (!nfsd_file_unhash(nf))
return false;
/* keep final reference for nfsd_file_lru_dispose */
- if (atomic_add_unless(&nf->nf_ref, -1, 1))
+ if (refcount_dec_not_one(&nf->nf_ref))
return true;
list_add(&nf->nf_lru, dispose);
return true;
}
-static int
+static void
nfsd_file_put_noref(struct nfsd_file *nf)
{
- int count;
trace_nfsd_file_put(nf);
- count = atomic_dec_return(&nf->nf_ref);
- if (!count) {
+ if (refcount_dec_and_test(&nf->nf_ref)) {
WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
nfsd_file_free(nf);
}
- return count;
}
void
nfsd_file_put(struct nfsd_file *nf)
{
- bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
- bool unused = !nfsd_file_in_use(nf);
+ bool is_hashed;
set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
- if (nfsd_file_put_noref(nf) == 1 && is_hashed && unused)
- nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH);
+ if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
+ nfsd_file_put_noref(nf);
+ return;
+ }
+
+ filemap_flush(nf->nf_file->f_mapping);
+ is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
+ nfsd_file_put_noref(nf);
+ if (is_hashed)
+ nfsd_file_schedule_laundrette();
+ if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
+ nfsd_file_gc();
}
struct nfsd_file *
nfsd_file_get(struct nfsd_file *nf)
{
- if (likely(atomic_inc_not_zero(&nf->nf_ref)))
+ if (likely(refcount_inc_not_zero(&nf->nf_ref)))
return nf;
return NULL;
}
@@ -344,7 +346,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del(&nf->nf_lru);
- if (!atomic_dec_and_test(&nf->nf_ref))
+ if (!refcount_dec_and_test(&nf->nf_ref))
continue;
if (nfsd_file_free(nf))
flush = true;
@@ -353,6 +355,58 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
flush_delayed_fput();
}
+static void
+nfsd_file_list_remove_disposal(struct list_head *dst,
+ struct nfsd_fcache_disposal *l)
+{
+ spin_lock(&l->lock);
+ list_splice_init(&l->freeme, dst);
+ spin_unlock(&l->lock);
+}
+
+static void
+nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
+{
+ struct nfsd_fcache_disposal *l;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(l, &laundrettes, list) {
+ if (l->net == net) {
+ spin_lock(&l->lock);
+ list_splice_tail_init(files, &l->freeme);
+ spin_unlock(&l->lock);
+ queue_work(nfsd_filecache_wq, &l->work);
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+
+static void
+nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
+ struct net *net)
+{
+ struct nfsd_file *nf, *tmp;
+
+ list_for_each_entry_safe(nf, tmp, src, nf_lru) {
+ if (nf->nf_net == net)
+ list_move_tail(&nf->nf_lru, dst);
+ }
+}
+
+static void
+nfsd_file_dispose_list_delayed(struct list_head *dispose)
+{
+ LIST_HEAD(list);
+ struct nfsd_file *nf;
+
+ while(!list_empty(dispose)) {
+ nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
+ nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
+ nfsd_file_list_add_disposal(&list, nf->nf_net);
+ }
+}
+
/*
* Note this can deadlock with nfsd_file_cache_purge.
*/
@@ -375,7 +429,7 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
* counter. Here we check the counter and then test and clear the flag.
* That order is deliberate to ensure that we can do this locklessly.
*/
- if (atomic_read(&nf->nf_ref) > 1)
+ if (refcount_read(&nf->nf_ref) > 1)
goto out_skip;
/*
@@ -386,31 +440,51 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
goto out_skip;
if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
- goto out_rescan;
+ goto out_skip;
if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
goto out_skip;
list_lru_isolate_move(lru, &nf->nf_lru, head);
return LRU_REMOVED;
-out_rescan:
- set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags);
out_skip:
return LRU_SKIP;
}
-static void
-nfsd_file_lru_dispose(struct list_head *head)
+static unsigned long
+nfsd_file_lru_walk_list(struct shrink_control *sc)
{
- while(!list_empty(head)) {
- struct nfsd_file *nf = list_first_entry(head,
- struct nfsd_file, nf_lru);
- list_del_init(&nf->nf_lru);
+ LIST_HEAD(head);
+ struct nfsd_file *nf;
+ unsigned long ret;
+
+ if (sc)
+ ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
+ nfsd_file_lru_cb, &head);
+ else
+ ret = list_lru_walk(&nfsd_file_lru,
+ nfsd_file_lru_cb,
+ &head, LONG_MAX);
+ list_for_each_entry(nf, &head, nf_lru) {
spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
nfsd_file_do_unhash(nf);
spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
- nfsd_file_put_noref(nf);
}
+ nfsd_file_dispose_list_delayed(&head);
+ return ret;
+}
+
+static void
+nfsd_file_gc(void)
+{
+ nfsd_file_lru_walk_list(NULL);
+}
+
+static void
+nfsd_file_gc_worker(struct work_struct *work)
+{
+ nfsd_file_gc();
+ nfsd_file_schedule_laundrette();
}
static unsigned long
@@ -422,12 +496,7 @@ nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
static unsigned long
nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
{
- LIST_HEAD(head);
- unsigned long ret;
-
- ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head);
- nfsd_file_lru_dispose(&head);
- return ret;
+ return nfsd_file_lru_walk_list(sc);
}
static struct shrinker nfsd_file_shrinker = {
@@ -489,7 +558,7 @@ nfsd_file_close_inode(struct inode *inode)
__nfsd_file_close_inode(inode, hashval, &dispose);
trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
- nfsd_file_dispose_list(&dispose);
+ nfsd_file_dispose_list_delayed(&dispose);
}
/**
@@ -505,16 +574,11 @@ static void
nfsd_file_delayed_close(struct work_struct *work)
{
LIST_HEAD(head);
+ struct nfsd_fcache_disposal *l = container_of(work,
+ struct nfsd_fcache_disposal, work);
- list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX);
-
- if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags))
- nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH);
-
- if (!list_empty(&head)) {
- nfsd_file_lru_dispose(&head);
- flush_delayed_fput();
- }
+ nfsd_file_list_remove_disposal(&head, l);
+ nfsd_file_dispose_list(&head);
}
static int
@@ -575,6 +639,10 @@ nfsd_file_cache_init(void)
if (nfsd_file_hashtbl)
return 0;
+ nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
+ if (!nfsd_filecache_wq)
+ goto out;
+
nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
if (!nfsd_file_hashtbl) {
@@ -628,7 +696,7 @@ nfsd_file_cache_init(void)
spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
}
- INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_delayed_close);
+ INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
out:
return ret;
out_notifier:
@@ -644,6 +712,8 @@ out_err:
nfsd_file_mark_slab = NULL;
kfree(nfsd_file_hashtbl);
nfsd_file_hashtbl = NULL;
+ destroy_workqueue(nfsd_filecache_wq);
+ nfsd_filecache_wq = NULL;
goto out;
}
@@ -682,6 +752,88 @@ nfsd_file_cache_purge(struct net *net)
}
}
+static struct nfsd_fcache_disposal *
+nfsd_alloc_fcache_disposal(struct net *net)
+{
+ struct nfsd_fcache_disposal *l;
+
+ l = kmalloc(sizeof(*l), GFP_KERNEL);
+ if (!l)
+ return NULL;
+ INIT_WORK(&l->work, nfsd_file_delayed_close);
+ l->net = net;
+ spin_lock_init(&l->lock);
+ INIT_LIST_HEAD(&l->freeme);
+ return l;
+}
+
+static void
+nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
+{
+ rcu_assign_pointer(l->net, NULL);
+ cancel_work_sync(&l->work);
+ nfsd_file_dispose_list(&l->freeme);
+ kfree_rcu(l, rcu);
+}
+
+static void
+nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l)
+{
+ spin_lock(&laundrette_lock);
+ list_add_tail_rcu(&l->list, &laundrettes);
+ spin_unlock(&laundrette_lock);
+}
+
+static void
+nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l)
+{
+ spin_lock(&laundrette_lock);
+ list_del_rcu(&l->list);
+ spin_unlock(&laundrette_lock);
+}
+
+static int
+nfsd_alloc_fcache_disposal_net(struct net *net)
+{
+ struct nfsd_fcache_disposal *l;
+
+ l = nfsd_alloc_fcache_disposal(net);
+ if (!l)
+ return -ENOMEM;
+ nfsd_add_fcache_disposal(l);
+ return 0;
+}
+
+static void
+nfsd_free_fcache_disposal_net(struct net *net)
+{
+ struct nfsd_fcache_disposal *l;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(l, &laundrettes, list) {
+ if (l->net != net)
+ continue;
+ nfsd_del_fcache_disposal(l);
+ rcu_read_unlock();
+ nfsd_free_fcache_disposal(l);
+ return;
+ }
+ rcu_read_unlock();
+}
+
+int
+nfsd_file_cache_start_net(struct net *net)
+{
+ return nfsd_alloc_fcache_disposal_net(net);
+}
+
+void
+nfsd_file_cache_shutdown_net(struct net *net)
+{
+ nfsd_file_cache_purge(net);
+ nfsd_free_fcache_disposal_net(net);
+}
+
void
nfsd_file_cache_shutdown(void)
{
@@ -706,6 +858,8 @@ nfsd_file_cache_shutdown(void)
nfsd_file_mark_slab = NULL;
kfree(nfsd_file_hashtbl);
nfsd_file_hashtbl = NULL;
+ destroy_workqueue(nfsd_filecache_wq);
+ nfsd_filecache_wq = NULL;
}
static bool
@@ -789,6 +943,7 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct nfsd_file *nf, *new;
struct inode *inode;
unsigned int hashval;
+ bool retry = true;
/* FIXME: skip this if fh_dentry is already set? */
status = fh_verify(rqstp, fhp, S_IFREG,
@@ -824,6 +979,11 @@ wait_for_construction:
/* Did construction of this file fail? */
if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
+ if (!retry) {
+ status = nfserr_jukebox;
+ goto out;
+ }
+ retry = false;
nfsd_file_put_noref(nf);
goto retry;
}
@@ -858,7 +1018,7 @@ out:
open_file:
nf = new;
/* Take reference for the hashtable */
- atomic_inc(&nf->nf_ref);
+ refcount_inc(&nf->nf_ref);
__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
list_lru_add(&nfsd_file_lru, &nf->nf_lru);
@@ -867,7 +1027,8 @@ open_file:
nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
nfsd_file_hashtbl[hashval].nfb_count);
spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
- atomic_long_inc(&nfsd_filecache_count);
+ if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
+ nfsd_file_gc();
nf->nf_mark = nfsd_file_mark_find_or_create(nf);
if (nf->nf_mark)
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index 851d9abf54c2..7872df5a0fe3 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -19,7 +19,7 @@
*/
struct nfsd_file_mark {
struct fsnotify_mark nfm_mark;
- atomic_t nfm_ref;
+ refcount_t nfm_ref;
};
/*
@@ -43,14 +43,17 @@ struct nfsd_file {
unsigned long nf_flags;
struct inode *nf_inode;
unsigned int nf_hashval;
- atomic_t nf_ref;
+ refcount_t nf_ref;
unsigned char nf_may;
struct nfsd_file_mark *nf_mark;
+ struct rw_semaphore nf_rwsem;
};
int nfsd_file_cache_init(void);
void nfsd_file_cache_purge(struct net *);
void nfsd_file_cache_shutdown(void);
+int nfsd_file_cache_start_net(struct net *net);
+void nfsd_file_cache_shutdown_net(struct net *net);
void nfsd_file_put(struct nfsd_file *nf);
struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
void nfsd_file_close_inode_sync(struct inode *inode);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 9a4ef815fb8c..2baf32311e00 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -40,7 +40,7 @@ struct nfsd_net {
struct lock_manager nfsd4_manager;
bool grace_ended;
- time_t boot_time;
+ time64_t boot_time;
/* internal mount of the "nfsd" pseudofilesystem: */
struct vfsmount *nfsd_mnt;
@@ -92,8 +92,8 @@ struct nfsd_net {
bool in_grace;
const struct nfsd4_client_tracking_ops *client_tracking_ops;
- time_t nfsd4_lease;
- time_t nfsd4_grace;
+ time64_t nfsd4_lease;
+ time64_t nfsd4_grace;
bool somebody_reclaimed;
bool track_reclaim_completes;
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index cea68d8411ac..288bc76b4574 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -203,7 +203,7 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
RETURN_STATUS(nfserr_io);
nfserr = nfsd_write(rqstp, &resp->fh, argp->offset,
rqstp->rq_vec, nvecs, &cnt,
- resp->committed);
+ resp->committed, resp->verf);
resp->count = cnt;
RETURN_STATUS(nfserr);
}
@@ -683,7 +683,8 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
RETURN_STATUS(nfserr_inval);
fh_copy(&resp->fh, &argp->fh);
- nfserr = nfsd_commit(rqstp, &resp->fh, argp->offset, argp->count);
+ nfserr = nfsd_commit(rqstp, &resp->fh, argp->offset, argp->count,
+ resp->verf);
RETURN_STATUS(nfserr);
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 195ab7a0fc89..aae514d40b64 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -32,14 +32,14 @@ static u32 nfs3_ftypes[] = {
* XDR functions for basic NFS types
*/
static __be32 *
-encode_time3(__be32 *p, struct timespec *time)
+encode_time3(__be32 *p, struct timespec64 *time)
{
*p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec);
return p;
}
static __be32 *
-decode_time3(__be32 *p, struct timespec *time)
+decode_time3(__be32 *p, struct timespec64 *time)
{
time->tv_sec = ntohl(*p++);
time->tv_nsec = ntohl(*p++);
@@ -167,7 +167,6 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
struct kstat *stat)
{
struct user_namespace *userns = nfsd_user_namespace(rqstp);
- struct timespec ts;
*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
*p++ = htonl((u32) (stat->mode & S_IALLUGO));
*p++ = htonl((u32) stat->nlink);
@@ -183,12 +182,9 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
*p++ = htonl((u32) MINOR(stat->rdev));
p = encode_fsid(p, fhp);
p = xdr_encode_hyper(p, stat->ino);
- ts = timespec64_to_timespec(stat->atime);
- p = encode_time3(p, &ts);
- ts = timespec64_to_timespec(stat->mtime);
- p = encode_time3(p, &ts);
- ts = timespec64_to_timespec(stat->ctime);
- p = encode_time3(p, &ts);
+ p = encode_time3(p, &stat->atime);
+ p = encode_time3(p, &stat->mtime);
+ p = encode_time3(p, &stat->ctime);
return p;
}
@@ -277,8 +273,8 @@ void fill_pre_wcc(struct svc_fh *fhp)
stat.size = inode->i_size;
}
- fhp->fh_pre_mtime = timespec64_to_timespec(stat.mtime);
- fhp->fh_pre_ctime = timespec64_to_timespec(stat.ctime);
+ fhp->fh_pre_mtime = stat.mtime;
+ fhp->fh_pre_ctime = stat.ctime;
fhp->fh_pre_size = stat.size;
fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
fhp->fh_pre_saved = true;
@@ -330,7 +326,7 @@ nfs3svc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p)
p = decode_sattr3(p, &args->attrs, nfsd_user_namespace(rqstp));
if ((args->check_guard = ntohl(*p++)) != 0) {
- struct timespec time;
+ struct timespec64 time;
p = decode_time3(p, &time);
args->guardtime = time.tv_sec;
}
@@ -751,17 +747,13 @@ int
nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p)
{
struct nfsd3_writeres *resp = rqstp->rq_resp;
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- __be32 verf[2];
p = encode_wcc_data(rqstp, p, &resp->fh);
if (resp->status == 0) {
*p++ = htonl(resp->count);
*p++ = htonl(resp->committed);
- /* unique identifier, y2038 overflow can be ignored */
- nfsd_copy_boot_verifier(verf, nn);
- *p++ = verf[0];
- *p++ = verf[1];
+ *p++ = resp->verf[0];
+ *p++ = resp->verf[1];
}
return xdr_ressize_check(rqstp, p);
}
@@ -1125,16 +1117,12 @@ int
nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p)
{
struct nfsd3_commitres *resp = rqstp->rq_resp;
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- __be32 verf[2];
p = encode_wcc_data(rqstp, p, &resp->fh);
/* Write verifier */
if (resp->status == 0) {
- /* unique identifier, y2038 overflow can be ignored */
- nfsd_copy_boot_verifier(verf, nn);
- *p++ = verf[0];
- *p++ = verf[1];
+ *p++ = resp->verf[0];
+ *p++ = resp->verf[1];
}
return xdr_ressize_check(rqstp, p);
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 24534db87e86..c3b11a715082 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -823,7 +823,16 @@ static const struct rpc_program cb_program = {
static int max_cb_time(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
+
+ /*
+ * nfsd4_lease is set to at most one hour in __nfsd4_write_time,
+ * so we can use 32-bit math on it. Warn if that assumption
+ * ever stops being true.
+ */
+ if (WARN_ON_ONCE(nn->nfsd4_lease > 3600))
+ return 360 * HZ;
+
+ return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ;
}
static struct workqueue_struct *callback_wq;
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 2681c70283ce..e12409eca7cc 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -675,7 +675,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
/* Client gets 2 lease periods to return it */
cutoff = ktime_add_ns(task->tk_start,
- nn->nfsd4_lease * NSEC_PER_SEC * 2);
+ (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
if (ktime_before(now, cutoff)) {
rpc_delay(task, HZ/100); /* 10 mili-seconds */
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 4798667af647..0e75f7fb5fec 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -37,6 +37,7 @@
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/sunrpc/addr.h>
#include "idmap.h"
#include "cache.h"
@@ -232,7 +233,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
if (!*resfh)
return nfserr_jukebox;
fh_init(*resfh, NFS4_FHSIZE);
- open->op_truncate = 0;
+ open->op_truncate = false;
if (open->op_create) {
/* FIXME: check session persistence and pnfs flags.
@@ -365,7 +366,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
- open->op_created = 0;
+ open->op_created = false;
/*
* RFC5661 18.51.3
* Before RECLAIM_COMPLETE done, server should deny new lock
@@ -503,12 +504,20 @@ nfsd4_putfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_putfh *putfh = &u->putfh;
+ __be32 ret;
fh_put(&cstate->current_fh);
cstate->current_fh.fh_handle.fh_size = putfh->pf_fhlen;
memcpy(&cstate->current_fh.fh_handle.fh_base, putfh->pf_fhval,
putfh->pf_fhlen);
- return fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
+ ret = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_BYPASS_GSS);
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+ if (ret == nfserr_stale && putfh->no_verify) {
+ SET_FH_FLAG(&cstate->current_fh, NFSD4_FH_FOREIGN);
+ ret = 0;
+ }
+#endif
+ return ret;
}
static __be32
@@ -530,9 +539,9 @@ nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_restorefh;
fh_dup2(&cstate->current_fh, &cstate->save_fh);
- if (HAS_STATE_ID(cstate, SAVED_STATE_ID_FLAG)) {
+ if (HAS_CSTATE_FLAG(cstate, SAVED_STATE_ID_FLAG)) {
memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t));
- SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+ SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
}
return nfs_ok;
}
@@ -542,9 +551,9 @@ nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
fh_dup2(&cstate->save_fh, &cstate->current_fh);
- if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) {
+ if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG)) {
memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t));
- SET_STATE_ID(cstate, SAVED_STATE_ID_FLAG);
+ SET_CSTATE_FLAG(cstate, SAVED_STATE_ID_FLAG);
}
return nfs_ok;
}
@@ -581,9 +590,9 @@ nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfsd4_commit *commit = &u->commit;
- gen_boot_verifier(&commit->co_verf, SVC_NET(rqstp));
return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
- commit->co_count);
+ commit->co_count,
+ (__be32 *)commit->co_verf.data);
}
static __be32
@@ -776,7 +785,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* check stateid */
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&read->rd_stateid, RD_STATE,
- &read->rd_nf);
+ &read->rd_nf, NULL);
if (status) {
dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
goto out;
@@ -948,7 +957,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
status = nfs4_preprocess_stateid_op(rqstp, cstate,
&cstate->current_fh, &setattr->sa_stateid,
- WR_STATE, NULL);
+ WR_STATE, NULL, NULL);
if (status) {
dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
return status;
@@ -975,7 +984,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
status = nfsd_setattr(rqstp, &cstate->current_fh, &setattr->sa_iattr,
- 0, (time_t)0);
+ 0, (time64_t)0);
out:
fh_drop_write(&cstate->current_fh);
return status;
@@ -999,22 +1008,22 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
trace_nfsd_write_start(rqstp, &cstate->current_fh,
write->wr_offset, cnt);
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
- stateid, WR_STATE, &nf);
+ stateid, WR_STATE, &nf, NULL);
if (status) {
dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
return status;
}
write->wr_how_written = write->wr_stable_how;
- gen_boot_verifier(&write->wr_verifier, SVC_NET(rqstp));
nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
&write->wr_head, write->wr_buflen);
WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
- status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf->nf_file,
+ status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf,
write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
- write->wr_how_written);
+ write->wr_how_written,
+ (__be32 *)write->wr_verifier.data);
nfsd_file_put(nf);
write->wr_bytes_written = cnt;
@@ -1034,14 +1043,14 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_nofilehandle;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
- src_stateid, RD_STATE, src);
+ src_stateid, RD_STATE, src, NULL);
if (status) {
dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
goto out;
}
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
- dst_stateid, WR_STATE, dst);
+ dst_stateid, WR_STATE, dst, NULL);
if (status) {
dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
goto out_put_src;
@@ -1076,8 +1085,8 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos,
- dst->nf_file, clone->cl_dst_pos, clone->cl_count,
+ status = nfsd4_clone_file_range(src, clone->cl_src_pos,
+ dst, clone->cl_dst_pos, clone->cl_count,
EX_ISSYNC(cstate->current_fh.fh_export));
nfsd_file_put(dst);
@@ -1135,6 +1144,207 @@ void nfsd4_shutdown_copy(struct nfs4_client *clp)
while ((copy = nfsd4_get_copy(clp)) != NULL)
nfsd4_stop_copy(copy);
}
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+
+extern struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh,
+ nfs4_stateid *stateid);
+extern void nfs42_ssc_close(struct file *filep);
+
+extern void nfs_sb_deactive(struct super_block *sb);
+
+#define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys"
+
+/**
+ * Support one copy source server for now.
+ */
+static __be32
+nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ struct vfsmount **mount)
+{
+ struct file_system_type *type;
+ struct vfsmount *ss_mnt;
+ struct nfs42_netaddr *naddr;
+ struct sockaddr_storage tmp_addr;
+ size_t tmp_addrlen, match_netid_len = 3;
+ char *startsep = "", *endsep = "", *match_netid = "tcp";
+ char *ipaddr, *dev_name, *raw_data;
+ int len, raw_len;
+ __be32 status = nfserr_inval;
+
+ naddr = &nss->u.nl4_addr;
+ tmp_addrlen = rpc_uaddr2sockaddr(SVC_NET(rqstp), naddr->addr,
+ naddr->addr_len,
+ (struct sockaddr *)&tmp_addr,
+ sizeof(tmp_addr));
+ if (tmp_addrlen == 0)
+ goto out_err;
+
+ if (tmp_addr.ss_family == AF_INET6) {
+ startsep = "[";
+ endsep = "]";
+ match_netid = "tcp6";
+ match_netid_len = 4;
+ }
+
+ if (naddr->netid_len != match_netid_len ||
+ strncmp(naddr->netid, match_netid, naddr->netid_len))
+ goto out_err;
+
+ /* Construct the raw data for the vfs_kern_mount call */
+ len = RPC_MAX_ADDRBUFLEN + 1;
+ ipaddr = kzalloc(len, GFP_KERNEL);
+ if (!ipaddr)
+ goto out_err;
+
+ rpc_ntop((struct sockaddr *)&tmp_addr, ipaddr, len);
+
+ /* 2 for ipv6 endsep and startsep. 3 for ":/" and trailing '/0'*/
+
+ raw_len = strlen(NFSD42_INTERSSC_MOUNTOPS) + strlen(ipaddr);
+ raw_data = kzalloc(raw_len, GFP_KERNEL);
+ if (!raw_data)
+ goto out_free_ipaddr;
+
+ snprintf(raw_data, raw_len, NFSD42_INTERSSC_MOUNTOPS, ipaddr);
+
+ status = nfserr_nodev;
+ type = get_fs_type("nfs");
+ if (!type)
+ goto out_free_rawdata;
+
+ /* Set the server:<export> for the vfs_kern_mount call */
+ dev_name = kzalloc(len + 5, GFP_KERNEL);
+ if (!dev_name)
+ goto out_free_rawdata;
+ snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
+
+ /* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */
+ ss_mnt = vfs_kern_mount(type, SB_KERNMOUNT, dev_name, raw_data);
+ module_put(type->owner);
+ if (IS_ERR(ss_mnt))
+ goto out_free_devname;
+
+ status = 0;
+ *mount = ss_mnt;
+
+out_free_devname:
+ kfree(dev_name);
+out_free_rawdata:
+ kfree(raw_data);
+out_free_ipaddr:
+ kfree(ipaddr);
+out_err:
+ return status;
+}
+
+static void
+nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+{
+ nfs_sb_deactive(ss_mnt->mnt_sb);
+ mntput(ss_mnt);
+}
+
+/**
+ * nfsd4_setup_inter_ssc
+ *
+ * Verify COPY destination stateid.
+ * Connect to the source server with NFSv4.1.
+ * Create the source struct file for nfsd_copy_range.
+ * Called with COPY cstate:
+ * SAVED_FH: source filehandle
+ * CURRENT_FH: destination filehandle
+ */
+static __be32
+nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_copy *copy, struct vfsmount **mount)
+{
+ struct svc_fh *s_fh = NULL;
+ stateid_t *s_stid = &copy->cp_src_stateid;
+ __be32 status = nfserr_inval;
+
+ /* Verify the destination stateid and set dst struct file*/
+ status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+ &copy->cp_dst_stateid,
+ WR_STATE, &copy->nf_dst, NULL);
+ if (status)
+ goto out;
+
+ status = nfsd4_interssc_connect(&copy->cp_src, rqstp, mount);
+ if (status)
+ goto out;
+
+ s_fh = &cstate->save_fh;
+
+ copy->c_fh.size = s_fh->fh_handle.fh_size;
+ memcpy(copy->c_fh.data, &s_fh->fh_handle.fh_base, copy->c_fh.size);
+ copy->stateid.seqid = cpu_to_be32(s_stid->si_generation);
+ memcpy(copy->stateid.other, (void *)&s_stid->si_opaque,
+ sizeof(stateid_opaque_t));
+
+ status = 0;
+out:
+ return status;
+}
+
+static void
+nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+ struct nfsd_file *dst)
+{
+ nfs42_ssc_close(src->nf_file);
+ nfsd_file_put(src);
+ nfsd_file_put(dst);
+ mntput(ss_mnt);
+}
+
+#else /* CONFIG_NFSD_V4_2_INTER_SSC */
+
+static __be32
+nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_copy *copy,
+ struct vfsmount **mount)
+{
+ *mount = NULL;
+ return nfserr_inval;
+}
+
+static void
+nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
+ struct nfsd_file *dst)
+{
+}
+
+static void
+nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
+{
+}
+
+static struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh,
+ nfs4_stateid *stateid)
+{
+ return NULL;
+}
+#endif /* CONFIG_NFSD_V4_2_INTER_SSC */
+
+static __be32
+nfsd4_setup_intra_ssc(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ struct nfsd4_copy *copy)
+{
+ return nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid,
+ &copy->nf_src, &copy->cp_dst_stateid,
+ &copy->nf_dst);
+}
+
+static void
+nfsd4_cleanup_intra_ssc(struct nfsd_file *src, struct nfsd_file *dst)
+{
+ nfsd_file_put(src);
+ nfsd_file_put(dst);
+}
static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
{
@@ -1200,12 +1410,16 @@ static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
status = nfs_ok;
}
- nfsd_file_put(copy->nf_src);
- nfsd_file_put(copy->nf_dst);
+ if (!copy->cp_intra) /* Inter server SSC */
+ nfsd4_cleanup_inter_ssc(copy->ss_mnt, copy->nf_src,
+ copy->nf_dst);
+ else
+ nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
+
return status;
}
-static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
+static int dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
{
dst->cp_src_pos = src->cp_src_pos;
dst->cp_dst_pos = src->cp_dst_pos;
@@ -1215,15 +1429,25 @@ static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
memcpy(&dst->fh, &src->fh, sizeof(src->fh));
dst->cp_clp = src->cp_clp;
dst->nf_dst = nfsd_file_get(src->nf_dst);
- dst->nf_src = nfsd_file_get(src->nf_src);
+ dst->cp_intra = src->cp_intra;
+ if (src->cp_intra) /* for inter, file_src doesn't exist yet */
+ dst->nf_src = nfsd_file_get(src->nf_src);
+
memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
+ memcpy(&dst->cp_src, &src->cp_src, sizeof(struct nl4_server));
+ memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
+ memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
+ dst->ss_mnt = src->ss_mnt;
+
+ return 0;
}
static void cleanup_async_copy(struct nfsd4_copy *copy)
{
- nfs4_free_cp_state(copy);
+ nfs4_free_copy_state(copy);
nfsd_file_put(copy->nf_dst);
- nfsd_file_put(copy->nf_src);
+ if (copy->cp_intra)
+ nfsd_file_put(copy->nf_src);
spin_lock(&copy->cp_clp->async_lock);
list_del(&copy->copies);
spin_unlock(&copy->cp_clp->async_lock);
@@ -1235,7 +1459,24 @@ static int nfsd4_do_async_copy(void *data)
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
struct nfsd4_copy *cb_copy;
+ if (!copy->cp_intra) { /* Inter server SSC */
+ copy->nf_src = kzalloc(sizeof(struct nfsd_file), GFP_KERNEL);
+ if (!copy->nf_src) {
+ copy->nfserr = nfserr_serverfault;
+ nfsd4_interssc_disconnect(copy->ss_mnt);
+ goto do_callback;
+ }
+ copy->nf_src->nf_file = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
+ &copy->stateid);
+ if (IS_ERR(copy->nf_src->nf_file)) {
+ copy->nfserr = nfserr_offload_denied;
+ nfsd4_interssc_disconnect(copy->ss_mnt);
+ goto do_callback;
+ }
+ }
+
copy->nfserr = nfsd4_do_copy(copy, 0);
+do_callback:
cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
if (!cb_copy)
goto out;
@@ -1247,6 +1488,8 @@ static int nfsd4_do_async_copy(void *data)
&nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD);
nfsd4_run_cb(&cb_copy->cp_cb);
out:
+ if (!copy->cp_intra)
+ kfree(copy->nf_src);
cleanup_async_copy(copy);
return 0;
}
@@ -1259,11 +1502,20 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd4_copy *async_copy = NULL;
- status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid,
- &copy->nf_src, &copy->cp_dst_stateid,
- &copy->nf_dst);
- if (status)
- goto out;
+ if (!copy->cp_intra) { /* Inter server SSC */
+ if (!inter_copy_offload_enable || copy->cp_synchronous) {
+ status = nfserr_notsupp;
+ goto out;
+ }
+ status = nfsd4_setup_inter_ssc(rqstp, cstate, copy,
+ &copy->ss_mnt);
+ if (status)
+ return nfserr_offload_denied;
+ } else {
+ status = nfsd4_setup_intra_ssc(rqstp, cstate, copy);
+ if (status)
+ return status;
+ }
copy->cp_clp = cstate->clp;
memcpy(&copy->fh, &cstate->current_fh.fh_handle,
@@ -1274,15 +1526,15 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserrno(-ENOMEM);
async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
if (!async_copy)
- goto out;
- if (!nfs4_init_cp_state(nn, copy)) {
- kfree(async_copy);
- goto out;
- }
+ goto out_err;
+ if (!nfs4_init_copy_state(nn, copy))
+ goto out_err;
refcount_set(&async_copy->refcount, 1);
memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
sizeof(copy->cp_stateid));
- dup_copy_fields(copy, async_copy);
+ status = dup_copy_fields(copy, async_copy);
+ if (status)
+ goto out_err;
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
async_copy, "%s", "copy thread");
if (IS_ERR(async_copy->copy_task))
@@ -1293,13 +1545,17 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
spin_unlock(&async_copy->cp_clp->async_lock);
wake_up_process(async_copy->copy_task);
status = nfs_ok;
- } else
+ } else {
status = nfsd4_do_copy(copy, 1);
+ }
out:
return status;
out_err:
if (async_copy)
cleanup_async_copy(async_copy);
+ status = nfserrno(-ENOMEM);
+ if (!copy->cp_intra)
+ nfsd4_interssc_disconnect(copy->ss_mnt);
goto out;
}
@@ -1310,7 +1566,7 @@ find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
spin_lock(&clp->async_lock);
list_for_each_entry(copy, &clp->async_copies, copies) {
- if (memcmp(&copy->cp_stateid, stateid, NFS4_STATEID_SIZE))
+ if (memcmp(&copy->cp_stateid.stid, stateid, NFS4_STATEID_SIZE))
continue;
refcount_inc(&copy->refcount);
spin_unlock(&clp->async_lock);
@@ -1326,16 +1582,61 @@ nfsd4_offload_cancel(struct svc_rqst *rqstp,
union nfsd4_op_u *u)
{
struct nfsd4_offload_status *os = &u->offload_status;
- __be32 status = 0;
struct nfsd4_copy *copy;
struct nfs4_client *clp = cstate->clp;
copy = find_async_copy(clp, &os->stateid);
- if (copy)
+ if (!copy) {
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+
+ return manage_cpntf_state(nn, &os->stateid, clp, NULL);
+ } else
nfsd4_stop_copy(copy);
- else
- status = nfserr_bad_stateid;
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_copy_notify *cn = &u->copy_notify;
+ __be32 status;
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfs4_stid *stid;
+ struct nfs4_cpntf_state *cps;
+ struct nfs4_client *clp = cstate->clp;
+
+ status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
+ &cn->cpn_src_stateid, RD_STATE, NULL,
+ &stid);
+ if (status)
+ return status;
+
+ cn->cpn_sec = nn->nfsd4_lease;
+ cn->cpn_nsec = 0;
+
+ status = nfserrno(-ENOMEM);
+ cps = nfs4_alloc_init_cpntf_state(nn, stid);
+ if (!cps)
+ goto out;
+ memcpy(&cn->cpn_cnr_stateid, &cps->cp_stateid.stid, sizeof(stateid_t));
+ memcpy(&cps->cp_p_stateid, &stid->sc_stateid, sizeof(stateid_t));
+ memcpy(&cps->cp_p_clid, &clp->cl_clientid, sizeof(clientid_t));
+
+ /* For now, only return one server address in cpn_src, the
+ * address used by the client to connect to this server.
+ */
+ cn->cpn_src.nl4_type = NL4_NETADDR;
+ status = nfsd4_set_netaddr((struct sockaddr *)&rqstp->rq_daddr,
+ &cn->cpn_src.u.nl4_addr);
+ WARN_ON_ONCE(status);
+ if (status) {
+ nfs4_put_cpntf_state(nn, cps);
+ goto out;
+ }
+out:
+ nfs4_put_stid(stid);
return status;
}
@@ -1348,7 +1649,7 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&fallocate->falloc_stateid,
- WR_STATE, &nf);
+ WR_STATE, &nf, NULL);
if (status != nfs_ok) {
dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
return status;
@@ -1407,7 +1708,7 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&seek->seek_stateid,
- RD_STATE, &nf);
+ RD_STATE, &nf, NULL);
if (status) {
dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
return status;
@@ -1912,6 +2213,45 @@ static void svcxdr_init_encode(struct svc_rqst *rqstp,
- rqstp->rq_auth_slack;
}
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+static void
+check_if_stalefh_allowed(struct nfsd4_compoundargs *args)
+{
+ struct nfsd4_op *op, *current_op = NULL, *saved_op = NULL;
+ struct nfsd4_copy *copy;
+ struct nfsd4_putfh *putfh;
+ int i;
+
+ /* traverse all operation and if it's a COPY compound, mark the
+ * source filehandle to skip verification
+ */
+ for (i = 0; i < args->opcnt; i++) {
+ op = &args->ops[i];
+ if (op->opnum == OP_PUTFH)
+ current_op = op;
+ else if (op->opnum == OP_SAVEFH)
+ saved_op = current_op;
+ else if (op->opnum == OP_RESTOREFH)
+ current_op = saved_op;
+ else if (op->opnum == OP_COPY) {
+ copy = (struct nfsd4_copy *)&op->u;
+ if (!saved_op) {
+ op->status = nfserr_nofilehandle;
+ return;
+ }
+ putfh = (struct nfsd4_putfh *)&saved_op->u;
+ if (!copy->cp_intra)
+ putfh->no_verify = true;
+ }
+ }
+}
+#else
+static void
+check_if_stalefh_allowed(struct nfsd4_compoundargs *args)
+{
+}
+#endif
+
/*
* COMPOUND call.
*/
@@ -1960,6 +2300,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
resp->opcnt = 1;
goto encode_op;
}
+ check_if_stalefh_allowed(args);
trace_nfsd_compound(rqstp, args->opcnt);
while (!status && resp->opcnt < args->opcnt) {
@@ -1975,13 +2316,14 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
op->status = nfsd4_open_omfg(rqstp, cstate, op);
goto encode_op;
}
-
- if (!current_fh->fh_dentry) {
+ if (!current_fh->fh_dentry &&
+ !HAS_FH_FLAG(current_fh, NFSD4_FH_FOREIGN)) {
if (!(op->opdesc->op_flags & ALLOWED_WITHOUT_FH)) {
op->status = nfserr_nofilehandle;
goto encode_op;
}
- } else if (current_fh->fh_export->ex_fslocs.migrated &&
+ } else if (current_fh->fh_export &&
+ current_fh->fh_export->ex_fslocs.migrated &&
!(op->opdesc->op_flags & ALLOWED_ON_ABSENT_FS)) {
op->status = nfserr_moved;
goto encode_op;
@@ -2025,7 +2367,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
if (op->opdesc->op_flags & OP_CLEAR_STATEID)
clear_current_stateid(cstate);
- if (need_wrongsec_check(rqstp))
+ if (current_fh->fh_export &&
+ need_wrongsec_check(rqstp))
op->status = check_nfsd_access(current_fh->fh_export, rqstp);
}
encode_op:
@@ -2292,6 +2635,21 @@ static inline u32 nfsd4_offload_status_rsize(struct svc_rqst *rqstp,
1 /* osr_complete<1> optional 0 for now */) * sizeof(__be32);
}
+static inline u32 nfsd4_copy_notify_rsize(struct svc_rqst *rqstp,
+ struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 3 /* cnr_lease_time */ +
+ 1 /* We support one cnr_source_server */ +
+ 1 /* cnr_stateid seq */ +
+ op_encode_stateid_maxsz /* cnr_stateid */ +
+ 1 /* num cnr_source_server*/ +
+ 1 /* nl4_type */ +
+ 1 /* nl4 size */ +
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) /*nl4_loc + nl4_loc_sz */)
+ * sizeof(__be32);
+}
+
#ifdef CONFIG_NFSD_PNFS
static inline u32 nfsd4_getdeviceinfo_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
@@ -2716,6 +3074,12 @@ static const struct nfsd4_operation nfsd4_ops[] = {
.op_name = "OP_OFFLOAD_CANCEL",
.op_rsize_bop = nfsd4_only_status_rsize,
},
+ [OP_COPY_NOTIFY] = {
+ .op_func = nfsd4_copy_notify,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_COPY_NOTIFY",
+ .op_rsize_bop = nfsd4_copy_notify_rsize,
+ },
};
/**
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 2481e7662128..a8fb18609146 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -1445,7 +1445,7 @@ nfsd4_cld_grace_done_v0(struct nfsd_net *nn)
}
cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone;
- cup->cu_u.cu_msg.cm_u.cm_gracetime = (int64_t)nn->boot_time;
+ cup->cu_u.cu_msg.cm_u.cm_gracetime = nn->boot_time;
ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
if (!ret)
ret = cup->cu_u.cu_msg.cm_status;
@@ -1782,7 +1782,7 @@ nfsd4_cltrack_client_has_session(struct nfs4_client *clp)
}
static char *
-nfsd4_cltrack_grace_start(time_t grace_start)
+nfsd4_cltrack_grace_start(time64_t grace_start)
{
int copied;
size_t len;
@@ -1795,7 +1795,7 @@ nfsd4_cltrack_grace_start(time_t grace_start)
if (!result)
return result;
- copied = snprintf(result, len, GRACE_START_ENV_PREFIX "%ld",
+ copied = snprintf(result, len, GRACE_START_ENV_PREFIX "%lld",
grace_start);
if (copied >= len) {
/* just return nothing if output was truncated */
@@ -2004,7 +2004,7 @@ nfsd4_umh_cltrack_grace_done(struct nfsd_net *nn)
char *legacy;
char timestr[22]; /* FIXME: better way to determine max size? */
- sprintf(timestr, "%ld", nn->boot_time);
+ sprintf(timestr, "%lld", nn->boot_time);
legacy = nfsd4_cltrack_legacy_topdir();
nfsd4_umh_cltrack_upcall("gracedone", timestr, legacy, NULL);
kfree(legacy);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 369e574c5092..65cfe9ab47be 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -80,6 +80,7 @@ static u64 current_sessionid = 1;
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
void nfsd4_end_grace(struct nfsd_net *nn);
+static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
/* Locking: */
@@ -170,7 +171,7 @@ renew_client_locked(struct nfs4_client *clp)
clp->cl_clientid.cl_boot,
clp->cl_clientid.cl_id);
list_move_tail(&clp->cl_lru, &nn->client_lru);
- clp->cl_time = get_seconds();
+ clp->cl_time = ktime_get_boottime_seconds();
}
static void put_client_renew_locked(struct nfs4_client *clp)
@@ -722,6 +723,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
/* Will be incremented before return to client: */
refcount_set(&stid->sc_count, 1);
spin_lock_init(&stid->sc_lock);
+ INIT_LIST_HEAD(&stid->sc_cp_list);
/*
* It shouldn't be a problem to reuse an opaque stateid value.
@@ -741,30 +743,76 @@ out_free:
/*
* Create a unique stateid_t to represent each COPY.
*/
-int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
+static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
+ unsigned char sc_type)
{
int new_id;
+ stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
+ stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
+ stid->sc_type = sc_type;
+
idr_preload(GFP_KERNEL);
spin_lock(&nn->s2s_cp_lock);
- new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, copy, 0, 0, GFP_NOWAIT);
+ new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
+ stid->stid.si_opaque.so_id = new_id;
spin_unlock(&nn->s2s_cp_lock);
idr_preload_end();
if (new_id < 0)
return 0;
- copy->cp_stateid.si_opaque.so_id = new_id;
- copy->cp_stateid.si_opaque.so_clid.cl_boot = nn->boot_time;
- copy->cp_stateid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
return 1;
}
-void nfs4_free_cp_state(struct nfsd4_copy *copy)
+int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
+{
+ return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
+}
+
+struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
+ struct nfs4_stid *p_stid)
+{
+ struct nfs4_cpntf_state *cps;
+
+ cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
+ if (!cps)
+ return NULL;
+ cps->cpntf_time = ktime_get_boottime_seconds();
+ refcount_set(&cps->cp_stateid.sc_count, 1);
+ if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
+ goto out_free;
+ spin_lock(&nn->s2s_cp_lock);
+ list_add(&cps->cp_list, &p_stid->sc_cp_list);
+ spin_unlock(&nn->s2s_cp_lock);
+ return cps;
+out_free:
+ kfree(cps);
+ return NULL;
+}
+
+void nfs4_free_copy_state(struct nfsd4_copy *copy)
{
struct nfsd_net *nn;
+ WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
nn = net_generic(copy->cp_clp->net, nfsd_net_id);
spin_lock(&nn->s2s_cp_lock);
- idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.si_opaque.so_id);
+ idr_remove(&nn->s2s_cp_stateids,
+ copy->cp_stateid.stid.si_opaque.so_id);
+ spin_unlock(&nn->s2s_cp_lock);
+}
+
+static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
+{
+ struct nfs4_cpntf_state *cps;
+ struct nfsd_net *nn;
+
+ nn = net_generic(net, nfsd_net_id);
+ spin_lock(&nn->s2s_cp_lock);
+ while (!list_empty(&stid->sc_cp_list)) {
+ cps = list_first_entry(&stid->sc_cp_list,
+ struct nfs4_cpntf_state, cp_list);
+ _free_cpntf_state_locked(nn, cps);
+ }
spin_unlock(&nn->s2s_cp_lock);
}
@@ -806,7 +854,7 @@ static void nfs4_free_deleg(struct nfs4_stid *stid)
static DEFINE_SPINLOCK(blocked_delegations_lock);
static struct bloom_pair {
int entries, old_entries;
- time_t swap_time;
+ time64_t swap_time;
int new; /* index into 'set' */
DECLARE_BITMAP(set[2], 256);
} blocked_delegations;
@@ -818,15 +866,15 @@ static int delegation_blocked(struct knfsd_fh *fh)
if (bd->entries == 0)
return 0;
- if (seconds_since_boot() - bd->swap_time > 30) {
+ if (ktime_get_seconds() - bd->swap_time > 30) {
spin_lock(&blocked_delegations_lock);
- if (seconds_since_boot() - bd->swap_time > 30) {
+ if (ktime_get_seconds() - bd->swap_time > 30) {
bd->entries -= bd->old_entries;
bd->old_entries = bd->entries;
memset(bd->set[bd->new], 0,
sizeof(bd->set[0]));
bd->new = 1-bd->new;
- bd->swap_time = seconds_since_boot();
+ bd->swap_time = ktime_get_seconds();
}
spin_unlock(&blocked_delegations_lock);
}
@@ -856,7 +904,7 @@ static void block_delegations(struct knfsd_fh *fh)
__set_bit((hash>>8)&255, bd->set[bd->new]);
__set_bit((hash>>16)&255, bd->set[bd->new]);
if (bd->entries == 0)
- bd->swap_time = seconds_since_boot();
+ bd->swap_time = ktime_get_seconds();
bd->entries += 1;
spin_unlock(&blocked_delegations_lock);
}
@@ -915,6 +963,7 @@ nfs4_put_stid(struct nfs4_stid *s)
return;
}
idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
+ nfs4_free_cpntf_statelist(clp->net, s);
spin_unlock(&clp->cl_lock);
s->sc_free(s);
if (fp)
@@ -1862,7 +1911,7 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
*/
if (clid->cl_boot == (u32)nn->boot_time)
return 0;
- dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
+ dprintk("NFSD stale clientid (%08x/%08x) boot_time %08llx\n",
clid->cl_boot, clid->cl_id, nn->boot_time);
return 1;
}
@@ -2215,14 +2264,14 @@ static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
* This is opaque to client, so no need to byte-swap. Use
* __force to keep sparse happy
*/
- verf[0] = (__force __be32)get_seconds();
+ verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
verf[1] = (__force __be32)nn->clverifier_counter++;
memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
}
static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
{
- clp->cl_clientid.cl_boot = nn->boot_time;
+ clp->cl_clientid.cl_boot = (u32)nn->boot_time;
clp->cl_clientid.cl_id = nn->clientid_counter++;
gen_confirm(clp, nn);
}
@@ -2292,7 +2341,7 @@ static int client_info_show(struct seq_file *m, void *v)
clp->cl_nii_domain.len);
seq_printf(m, "\nImplementation name: ");
seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
- seq_printf(m, "\nImplementation time: [%ld, %ld]\n",
+ seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
}
drop_client(clp);
@@ -2612,7 +2661,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
gen_clid(clp, nn);
kref_init(&clp->cl_nfsdfs.cl_ref);
nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
- clp->cl_time = get_seconds();
+ clp->cl_time = ktime_get_boottime_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
copy_verf(clp, verf);
memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
@@ -2946,8 +2995,7 @@ static __be32 copy_impl_id(struct nfs4_client *clp,
xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
if (!clp->cl_nii_name.data)
return nfserr_jukebox;
- clp->cl_nii_time.tv_sec = exid->nii_time.tv_sec;
- clp->cl_nii_time.tv_nsec = exid->nii_time.tv_nsec;
+ clp->cl_nii_time = exid->nii_time;
return 0;
}
@@ -3373,7 +3421,7 @@ static __be32 nfsd4_map_bcts_dir(u32 *dir)
case NFS4_CDFC4_BACK_OR_BOTH:
*dir = NFS4_CDFC4_BOTH;
return nfs_ok;
- };
+ }
return nfserr_inval;
}
@@ -4283,7 +4331,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
last = oo->oo_last_closed_stid;
oo->oo_last_closed_stid = s;
list_move_tail(&oo->oo_close_lru, &nn->close_lru);
- oo->oo_time = get_seconds();
+ oo->oo_time = ktime_get_boottime_seconds();
spin_unlock(&nn->client_lock);
if (last)
nfs4_put_stid(&last->st_stid);
@@ -4378,7 +4426,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
*/
spin_lock(&state_lock);
if (dp->dl_time == 0) {
- dp->dl_time = get_seconds();
+ dp->dl_time = ktime_get_boottime_seconds();
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
}
spin_unlock(&state_lock);
@@ -4490,7 +4538,8 @@ static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4
static __be32 lookup_clientid(clientid_t *clid,
struct nfsd4_compound_state *cstate,
- struct nfsd_net *nn)
+ struct nfsd_net *nn,
+ bool sessions)
{
struct nfs4_client *found;
@@ -4511,7 +4560,7 @@ static __be32 lookup_clientid(clientid_t *clid,
*/
WARN_ON_ONCE(cstate->session);
spin_lock(&nn->client_lock);
- found = find_confirmed_client(clid, false, nn);
+ found = find_confirmed_client(clid, sessions, nn);
if (!found) {
spin_unlock(&nn->client_lock);
return nfserr_expired;
@@ -4544,7 +4593,7 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
if (open->op_file == NULL)
return nfserr_jukebox;
- status = lookup_clientid(clientid, cstate, nn);
+ status = lookup_clientid(clientid, cstate, nn, false);
if (status)
return status;
clp = cstate->clp;
@@ -4672,7 +4721,7 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
return 0;
if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
return nfserr_inval;
- return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
+ return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
}
static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
@@ -5133,7 +5182,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("process_renew(%08x/%08x): starting\n",
clid->cl_boot, clid->cl_id);
- status = lookup_clientid(clid, cstate, nn);
+ status = lookup_clientid(clid, cstate, nn, false);
if (status)
goto out;
clp = cstate->clp;
@@ -5184,9 +5233,8 @@ nfsd4_end_grace(struct nfsd_net *nn)
*/
static bool clients_still_reclaiming(struct nfsd_net *nn)
{
- unsigned long now = get_seconds();
- unsigned long double_grace_period_end = nn->boot_time +
- 2 * nn->nfsd4_lease;
+ time64_t double_grace_period_end = nn->boot_time +
+ 2 * nn->nfsd4_lease;
if (nn->track_reclaim_completes &&
atomic_read(&nn->nr_reclaim_complete) ==
@@ -5199,12 +5247,12 @@ static bool clients_still_reclaiming(struct nfsd_net *nn)
* If we've given them *two* lease times to reclaim, and they're
* still not done, give up:
*/
- if (time_after(now, double_grace_period_end))
+ if (ktime_get_boottime_seconds() > double_grace_period_end)
return false;
return true;
}
-static time_t
+static time64_t
nfs4_laundromat(struct nfsd_net *nn)
{
struct nfs4_client *clp;
@@ -5213,8 +5261,11 @@ nfs4_laundromat(struct nfsd_net *nn)
struct nfs4_ol_stateid *stp;
struct nfsd4_blocked_lock *nbl;
struct list_head *pos, *next, reaplist;
- time_t cutoff = get_seconds() - nn->nfsd4_lease;
- time_t t, new_timeo = nn->nfsd4_lease;
+ time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease;
+ time64_t t, new_timeo = nn->nfsd4_lease;
+ struct nfs4_cpntf_state *cps;
+ copy_stateid_t *cps_t;
+ int i;
dprintk("NFSD: laundromat service - starting\n");
@@ -5225,10 +5276,20 @@ nfs4_laundromat(struct nfsd_net *nn)
dprintk("NFSD: end of grace period\n");
nfsd4_end_grace(nn);
INIT_LIST_HEAD(&reaplist);
+
+ spin_lock(&nn->s2s_cp_lock);
+ idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
+ cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
+ if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
+ cps->cpntf_time > cutoff)
+ _free_cpntf_state_locked(nn, cps);
+ }
+ spin_unlock(&nn->s2s_cp_lock);
+
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
- if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
+ if (clp->cl_time > cutoff) {
t = clp->cl_time - cutoff;
new_timeo = min(new_timeo, t);
break;
@@ -5251,7 +5312,7 @@ nfs4_laundromat(struct nfsd_net *nn)
spin_lock(&state_lock);
list_for_each_safe(pos, next, &nn->del_recall_lru) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
- if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
+ if (dp->dl_time > cutoff) {
t = dp->dl_time - cutoff;
new_timeo = min(new_timeo, t);
break;
@@ -5271,8 +5332,7 @@ nfs4_laundromat(struct nfsd_net *nn)
while (!list_empty(&nn->close_lru)) {
oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
oo_close_lru);
- if (time_after((unsigned long)oo->oo_time,
- (unsigned long)cutoff)) {
+ if (oo->oo_time > cutoff) {
t = oo->oo_time - cutoff;
new_timeo = min(new_timeo, t);
break;
@@ -5302,8 +5362,7 @@ nfs4_laundromat(struct nfsd_net *nn)
while (!list_empty(&nn->blocked_locks_lru)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
struct nfsd4_blocked_lock, nbl_lru);
- if (time_after((unsigned long)nbl->nbl_time,
- (unsigned long)cutoff)) {
+ if (nbl->nbl_time > cutoff) {
t = nbl->nbl_time - cutoff;
new_timeo = min(new_timeo, t);
break;
@@ -5320,7 +5379,7 @@ nfs4_laundromat(struct nfsd_net *nn)
free_blocked_lock(nbl);
}
out:
- new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
+ new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
return new_timeo;
}
@@ -5330,13 +5389,13 @@ static void laundromat_main(struct work_struct *);
static void
laundromat_main(struct work_struct *laundry)
{
- time_t t;
+ time64_t t;
struct delayed_work *dwork = to_delayed_work(laundry);
struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
laundromat_work);
t = nfs4_laundromat(nn);
- dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
+ dprintk("NFSD: laundromat_main - sleeping for %lld seconds\n", t);
queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
@@ -5521,7 +5580,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return nfserr_bad_stateid;
- status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
+ status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn,
+ false);
if (status == nfserr_stale_clientid) {
if (cstate->session)
return nfserr_bad_stateid;
@@ -5600,6 +5660,85 @@ nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
out:
return status;
}
+static void
+_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
+{
+ WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
+ if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
+ return;
+ list_del(&cps->cp_list);
+ idr_remove(&nn->s2s_cp_stateids,
+ cps->cp_stateid.stid.si_opaque.so_id);
+ kfree(cps);
+}
+/*
+ * A READ from an inter server to server COPY will have a
+ * copy stateid. Look up the copy notify stateid from the
+ * idr structure and take a reference on it.
+ */
+__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
+ struct nfs4_client *clp,
+ struct nfs4_cpntf_state **cps)
+{
+ copy_stateid_t *cps_t;
+ struct nfs4_cpntf_state *state = NULL;
+
+ if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
+ return nfserr_bad_stateid;
+ spin_lock(&nn->s2s_cp_lock);
+ cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
+ if (cps_t) {
+ state = container_of(cps_t, struct nfs4_cpntf_state,
+ cp_stateid);
+ if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
+ state = NULL;
+ goto unlock;
+ }
+ if (!clp)
+ refcount_inc(&state->cp_stateid.sc_count);
+ else
+ _free_cpntf_state_locked(nn, state);
+ }
+unlock:
+ spin_unlock(&nn->s2s_cp_lock);
+ if (!state)
+ return nfserr_bad_stateid;
+ if (!clp && state)
+ *cps = state;
+ return 0;
+}
+
+static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
+ struct nfs4_stid **stid)
+{
+ __be32 status;
+ struct nfs4_cpntf_state *cps = NULL;
+ struct nfsd4_compound_state cstate;
+
+ status = manage_cpntf_state(nn, st, NULL, &cps);
+ if (status)
+ return status;
+
+ cps->cpntf_time = ktime_get_boottime_seconds();
+ memset(&cstate, 0, sizeof(cstate));
+ status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true);
+ if (status)
+ goto out;
+ status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid,
+ NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
+ stid, nn);
+ put_client_renew(cstate.clp);
+out:
+ nfs4_put_cpntf_state(nn, cps);
+ return status;
+}
+
+void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
+{
+ spin_lock(&nn->s2s_cp_lock);
+ _free_cpntf_state_locked(nn, cps);
+ spin_unlock(&nn->s2s_cp_lock);
+}
/*
* Checks for stateid operations
@@ -5607,7 +5746,8 @@ out:
__be32
nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
- stateid_t *stateid, int flags, struct nfsd_file **nfp)
+ stateid_t *stateid, int flags, struct nfsd_file **nfp,
+ struct nfs4_stid **cstid)
{
struct inode *ino = d_inode(fhp->fh_dentry);
struct net *net = SVC_NET(rqstp);
@@ -5629,6 +5769,8 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
status = nfsd4_lookup_stateid(cstate, stateid,
NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
&s, nn);
+ if (status == nfserr_bad_stateid)
+ status = find_cpntf_state(nn, stateid, &s);
if (status)
return status;
status = nfsd4_stid_check_stateid_generation(stateid, s,
@@ -5656,8 +5798,12 @@ done:
if (status == nfs_ok && nfp)
status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
out:
- if (s)
- nfs4_put_stid(s);
+ if (s) {
+ if (!status && cstid)
+ *cstid = s;
+ else
+ nfs4_put_stid(s);
+ }
return status;
}
@@ -6550,7 +6696,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
if (fl_flags & FL_SLEEP) {
- nbl->nbl_time = jiffies;
+ nbl->nbl_time = ktime_get_boottime_seconds();
spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
@@ -6657,7 +6803,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_inval;
if (!nfsd4_has_session(cstate)) {
- status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
+ status = lookup_clientid(&lockt->lt_clientid, cstate, nn,
+ false);
if (status)
goto out;
}
@@ -6841,7 +6988,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
- status = lookup_clientid(clid, cstate, nn);
+ status = lookup_clientid(clid, cstate, nn, false);
if (status)
return status;
@@ -6988,7 +7135,7 @@ nfs4_check_open_reclaim(clientid_t *clid,
__be32 status;
/* find clientid in conf_id_hashtbl */
- status = lookup_clientid(clid, cstate, nn);
+ status = lookup_clientid(clid, cstate, nn, false);
if (status)
return nfserr_reclaim_bad;
@@ -7641,7 +7788,7 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
nn->conf_name_tree = RB_ROOT;
nn->unconf_name_tree = RB_ROOT;
- nn->boot_time = get_seconds();
+ nn->boot_time = ktime_get_real_seconds();
nn->grace_ended = false;
nn->nfsd4_manager.block_opens = true;
INIT_LIST_HEAD(&nn->nfsd4_manager.list);
@@ -7710,7 +7857,7 @@ nfs4_state_start_net(struct net *net)
nfsd4_client_tracking_init(net);
if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
goto skip_grace;
- printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
+ printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
nn->nfsd4_grace, net->ns.inum);
queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
return 0;
@@ -7786,7 +7933,8 @@ nfs4_state_shutdown(void)
static void
get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
- if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
+ if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
+ CURRENT_STATEID(stateid))
memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
}
@@ -7795,14 +7943,14 @@ put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
if (cstate->minorversion) {
memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
- SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+ SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
}
}
void
clear_current_stateid(struct nfsd4_compound_state *cstate)
{
- CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
+ CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
}
/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index d2dc4c0e22e8..9761512674a0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -40,6 +40,7 @@
#include <linux/utsname.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/svcauth_gss.h>
+#include <linux/sunrpc/addr.h>
#include "idmap.h"
#include "acl.h"
@@ -1744,10 +1745,47 @@ nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
DECODE_TAIL;
}
+static __be32 nfsd4_decode_nl4_server(struct nfsd4_compoundargs *argp,
+ struct nl4_server *ns)
+{
+ DECODE_HEAD;
+ struct nfs42_netaddr *naddr;
+
+ READ_BUF(4);
+ ns->nl4_type = be32_to_cpup(p++);
+
+ /* currently support for 1 inter-server source server */
+ switch (ns->nl4_type) {
+ case NL4_NETADDR:
+ naddr = &ns->u.nl4_addr;
+
+ READ_BUF(4);
+ naddr->netid_len = be32_to_cpup(p++);
+ if (naddr->netid_len > RPCBIND_MAXNETIDLEN)
+ goto xdr_error;
+
+ READ_BUF(naddr->netid_len + 4); /* 4 for uaddr len */
+ COPYMEM(naddr->netid, naddr->netid_len);
+
+ naddr->addr_len = be32_to_cpup(p++);
+ if (naddr->addr_len > RPCBIND_MAXUADDRLEN)
+ goto xdr_error;
+
+ READ_BUF(naddr->addr_len);
+ COPYMEM(naddr->addr, naddr->addr_len);
+ break;
+ default:
+ goto xdr_error;
+ }
+ DECODE_TAIL;
+}
+
static __be32
nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
{
DECODE_HEAD;
+ struct nl4_server *ns_dummy;
+ int i, count;
status = nfsd4_decode_stateid(argp, &copy->cp_src_stateid);
if (status)
@@ -1762,7 +1800,32 @@ nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
p = xdr_decode_hyper(p, &copy->cp_count);
p++; /* ca_consecutive: we always do consecutive copies */
copy->cp_synchronous = be32_to_cpup(p++);
- /* tmp = be32_to_cpup(p); Source server list not supported */
+
+ count = be32_to_cpup(p++);
+
+ copy->cp_intra = false;
+ if (count == 0) { /* intra-server copy */
+ copy->cp_intra = true;
+ goto intra;
+ }
+
+ /* decode all the supplied server addresses but use first */
+ status = nfsd4_decode_nl4_server(argp, &copy->cp_src);
+ if (status)
+ return status;
+
+ ns_dummy = kmalloc(sizeof(struct nl4_server), GFP_KERNEL);
+ if (ns_dummy == NULL)
+ return nfserrno(-ENOMEM);
+ for (i = 0; i < count - 1; i++) {
+ status = nfsd4_decode_nl4_server(argp, ns_dummy);
+ if (status) {
+ kfree(ns_dummy);
+ return status;
+ }
+ }
+ kfree(ns_dummy);
+intra:
DECODE_TAIL;
}
@@ -1775,6 +1838,18 @@ nfsd4_decode_offload_status(struct nfsd4_compoundargs *argp,
}
static __be32
+nfsd4_decode_copy_notify(struct nfsd4_compoundargs *argp,
+ struct nfsd4_copy_notify *cn)
+{
+ int status;
+
+ status = nfsd4_decode_stateid(argp, &cn->cpn_src_stateid);
+ if (status)
+ return status;
+ return nfsd4_decode_nl4_server(argp, &cn->cpn_dst);
+}
+
+static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
{
DECODE_HEAD;
@@ -1875,7 +1950,7 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
/* new operations for NFSv4.2 */
[OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_COPY] = (nfsd4_dec)nfsd4_decode_copy,
- [OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_copy_notify,
[OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_LAYOUTERROR] = (nfsd4_dec)nfsd4_decode_notsupp,
@@ -2024,11 +2099,11 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
*/
static __be32 *encode_time_delta(__be32 *p, struct inode *inode)
{
- struct timespec ts;
+ struct timespec64 ts;
u32 ns;
ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
- ts = ns_to_timespec(ns);
+ ts = ns_to_timespec64(ns);
p = xdr_encode_hyper(p, ts.tv_sec);
*p++ = cpu_to_be32(ts.tv_nsec);
@@ -4244,6 +4319,46 @@ nfsd42_encode_write_res(struct nfsd4_compoundres *resp,
}
static __be32
+nfsd42_encode_nl4_server(struct nfsd4_compoundres *resp, struct nl4_server *ns)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ struct nfs42_netaddr *addr;
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4);
+ *p++ = cpu_to_be32(ns->nl4_type);
+
+ switch (ns->nl4_type) {
+ case NL4_NETADDR:
+ addr = &ns->u.nl4_addr;
+
+ /* netid_len, netid, uaddr_len, uaddr (port included
+ * in RPCBIND_MAXUADDRLEN)
+ */
+ p = xdr_reserve_space(xdr,
+ 4 /* netid len */ +
+ (XDR_QUADLEN(addr->netid_len) * 4) +
+ 4 /* uaddr len */ +
+ (XDR_QUADLEN(addr->addr_len) * 4));
+ if (!p)
+ return nfserr_resource;
+
+ *p++ = cpu_to_be32(addr->netid_len);
+ p = xdr_encode_opaque_fixed(p, addr->netid,
+ addr->netid_len);
+ *p++ = cpu_to_be32(addr->addr_len);
+ p = xdr_encode_opaque_fixed(p, addr->addr,
+ addr->addr_len);
+ break;
+ default:
+ WARN_ON_ONCE(ns->nl4_type != NL4_NETADDR);
+ return nfserr_inval;
+ }
+
+ return 0;
+}
+
+static __be32
nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_copy *copy)
{
@@ -4277,6 +4392,40 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
}
static __be32
+nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
+ struct nfsd4_copy_notify *cn)
+{
+ struct xdr_stream *xdr = &resp->xdr;
+ __be32 *p;
+
+ if (nfserr)
+ return nfserr;
+
+ /* 8 sec, 4 nsec */
+ p = xdr_reserve_space(xdr, 12);
+ if (!p)
+ return nfserr_resource;
+
+ /* cnr_lease_time */
+ p = xdr_encode_hyper(p, cn->cpn_sec);
+ *p++ = cpu_to_be32(cn->cpn_nsec);
+
+ /* cnr_stateid */
+ nfserr = nfsd4_encode_stateid(xdr, &cn->cpn_cnr_stateid);
+ if (nfserr)
+ return nfserr;
+
+ /* cnr_src.nl_nsvr */
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ return nfserr_resource;
+
+ *p++ = cpu_to_be32(1);
+
+ return nfsd42_encode_nl4_server(resp, &cn->cpn_src);
+}
+
+static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_seek *seek)
{
@@ -4373,7 +4522,7 @@ static const nfsd4_enc nfsd4_enc_ops[] = {
/* NFSv4.2 operations */
[OP_ALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_COPY] = (nfsd4_enc)nfsd4_encode_copy,
- [OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_noop,
+ [OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_copy_notify,
[OP_DEALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_IO_ADVISE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_LAYOUTERROR] = (nfsd4_enc)nfsd4_encode_noop,
@@ -4500,8 +4649,6 @@ nfsd4_encode_replay(struct xdr_stream *xdr, struct nfsd4_op *op)
__be32 *p;
struct nfs4_replay *rp = op->replay;
- BUG_ON(!rp);
-
p = xdr_reserve_space(xdr, 8 + rp->rp_buflen);
if (!p) {
WARN_ON_ONCE(1);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 11b42c523f04..e109a1007704 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -157,11 +157,11 @@ static int exports_proc_open(struct inode *inode, struct file *file)
return exports_net_open(current->nsproxy->net_ns, file);
}
-static const struct file_operations exports_proc_operations = {
- .open = exports_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
+static const struct proc_ops exports_proc_ops = {
+ .proc_open = exports_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
static int exports_nfsd_open(struct inode *inode, struct file *file)
@@ -956,7 +956,7 @@ static ssize_t write_maxconn(struct file *file, char *buf, size_t size)
#ifdef CONFIG_NFSD_V4
static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
- time_t *time, struct nfsd_net *nn)
+ time64_t *time, struct nfsd_net *nn)
{
char *mesg = buf;
int rv, i;
@@ -984,11 +984,11 @@ static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size,
*time = i;
}
- return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n", *time);
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%lld\n", *time);
}
static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
- time_t *time, struct nfsd_net *nn)
+ time64_t *time, struct nfsd_net *nn)
{
ssize_t rv;
@@ -1431,8 +1431,7 @@ static int create_proc_exports_entry(void)
entry = proc_mkdir("fs/nfs", NULL);
if (!entry)
return -ENOMEM;
- entry = proc_create("exports", 0, entry,
- &exports_proc_operations);
+ entry = proc_create("exports", 0, entry, &exports_proc_ops);
if (!entry) {
remove_proc_entry("fs/nfs", NULL);
return -ENOMEM;
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 57b93d95fa5c..2ab5569126b8 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -19,6 +19,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/addr.h>
#include <uapi/linux/nfsd/debug.h>
@@ -142,7 +143,6 @@ int nfs4_state_start(void);
int nfs4_state_start_net(struct net *net);
void nfs4_state_shutdown(void);
void nfs4_state_shutdown_net(struct net *net);
-void nfs4_reset_lease(time_t leasetime);
int nfs4_reset_recoverydir(char *recdir);
char * nfs4_recoverydir(void);
bool nfsd4_spo_must_allow(struct svc_rqst *rqstp);
@@ -153,7 +153,6 @@ static inline int nfs4_state_start(void) { return 0; }
static inline int nfs4_state_start_net(struct net *net) { return 0; }
static inline void nfs4_state_shutdown(void) { }
static inline void nfs4_state_shutdown_net(struct net *net) { }
-static inline void nfs4_reset_lease(time_t leasetime) { }
static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
static inline char * nfs4_recoverydir(void) {return NULL; }
static inline bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
@@ -387,6 +386,37 @@ void nfsd_lockd_shutdown(void);
extern const u32 nfsd_suppattrs[3][3];
+static inline __be32 nfsd4_set_netaddr(struct sockaddr *addr,
+ struct nfs42_netaddr *netaddr)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
+ unsigned int port;
+ size_t ret_addr, ret_port;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ port = ntohs(sin->sin_port);
+ sprintf(netaddr->netid, "tcp");
+ netaddr->netid_len = 3;
+ break;
+ case AF_INET6:
+ port = ntohs(sin6->sin6_port);
+ sprintf(netaddr->netid, "tcp6");
+ netaddr->netid_len = 4;
+ break;
+ default:
+ return nfserr_inval;
+ }
+ ret_addr = rpc_ntop(addr, netaddr->addr, sizeof(netaddr->addr));
+ ret_port = snprintf(netaddr->addr + ret_addr,
+ RPCBIND_MAXUADDRLEN + 1 - ret_addr,
+ ".%u.%u", port >> 8, port & 0xff);
+ WARN_ON(ret_port >= RPCBIND_MAXUADDRLEN + 1 - ret_addr);
+ netaddr->addr_len = ret_addr + ret_port;
+ return 0;
+}
+
static inline bool bmval_is_subset(const u32 *bm1, const u32 *bm2)
{
return !((bm1[0] & ~bm2[0]) ||
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 755e256a9103..56cfbc361561 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -35,15 +35,15 @@ typedef struct svc_fh {
bool fh_locked; /* inode locked by us */
bool fh_want_write; /* remount protection taken */
-
+ int fh_flags; /* FH flags */
#ifdef CONFIG_NFSD_V3
bool fh_post_saved; /* post-op attrs saved */
bool fh_pre_saved; /* pre-op attrs saved */
/* Pre-op attributes saved during fh_lock */
__u64 fh_pre_size; /* size before operation */
- struct timespec fh_pre_mtime; /* mtime before oper */
- struct timespec fh_pre_ctime; /* ctime before oper */
+ struct timespec64 fh_pre_mtime; /* mtime before oper */
+ struct timespec64 fh_pre_ctime; /* ctime before oper */
/*
* pre-op nfsv4 change attr: note must check IS_I_VERSION(inode)
* to find out if it is valid.
@@ -56,6 +56,9 @@ typedef struct svc_fh {
#endif /* CONFIG_NFSD_V3 */
} svc_fh;
+#define NFSD4_FH_FOREIGN (1<<0)
+#define SET_FH_FLAG(c, f) ((c)->fh_flags |= (f))
+#define HAS_FH_FLAG(c, f) ((c)->fh_flags & (f))
enum nfsd_fsid {
FSID_DEV = 0,
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index c83ddac22f38..543bbe0a556e 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -94,7 +94,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
* Solaris, at least, doesn't seem to care what the time
* request is. We require it be within 30 minutes of now.
*/
- time_t delta = iap->ia_atime.tv_sec - get_seconds();
+ time64_t delta = iap->ia_atime.tv_sec - ktime_get_real_seconds();
nfserr = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
if (nfserr)
@@ -113,7 +113,7 @@ nfsd_proc_setattr(struct svc_rqst *rqstp)
}
}
- nfserr = nfsd_setattr(rqstp, fhp, iap, 0, (time_t)0);
+ nfserr = nfsd_setattr(rqstp, fhp, iap, 0, (time64_t)0);
done:
return nfsd_return_attrs(nfserr, resp);
}
@@ -226,7 +226,7 @@ nfsd_proc_write(struct svc_rqst *rqstp)
return nfserr_io;
nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
argp->offset, rqstp->rq_vec, nvecs,
- &cnt, NFS_DATA_SYNC);
+ &cnt, NFS_DATA_SYNC, NULL);
return nfsd_return_attrs(nfserr, resp);
}
@@ -380,7 +380,7 @@ nfsd_proc_create(struct svc_rqst *rqstp)
*/
attr->ia_valid &= ATTR_SIZE;
if (attr->ia_valid)
- nfserr = nfsd_setattr(rqstp, newfhp, attr, 0, (time_t)0);
+ nfserr = nfsd_setattr(rqstp, newfhp, attr, 0, (time64_t)0);
}
out_unlock:
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index e8bee8ff30c5..3b77b904212d 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -31,6 +31,12 @@
#define NFSDDBG_FACILITY NFSDDBG_SVC
+bool inter_copy_offload_enable;
+EXPORT_SYMBOL_GPL(inter_copy_offload_enable);
+module_param(inter_copy_offload_enable, bool, 0644);
+MODULE_PARM_DESC(inter_copy_offload_enable,
+ "Enable inter server to server copy offload. Default: false");
+
extern struct svc_program nfsd_program;
static int nfsd(void *vrqstp);
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
@@ -391,20 +397,25 @@ static int nfsd_startup_net(int nrservs, struct net *net, const struct cred *cre
ret = lockd_up(net, cred);
if (ret)
goto out_socks;
- nn->lockd_up = 1;
+ nn->lockd_up = true;
}
- ret = nfs4_state_start_net(net);
+ ret = nfsd_file_cache_start_net(net);
if (ret)
goto out_lockd;
+ ret = nfs4_state_start_net(net);
+ if (ret)
+ goto out_filecache;
nn->nfsd_net_up = true;
return 0;
+out_filecache:
+ nfsd_file_cache_shutdown_net(net);
out_lockd:
if (nn->lockd_up) {
lockd_down(net);
- nn->lockd_up = 0;
+ nn->lockd_up = false;
}
out_socks:
nfsd_shutdown_generic();
@@ -415,11 +426,11 @@ static void nfsd_shutdown_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- nfsd_file_cache_purge(net);
+ nfsd_file_cache_shutdown_net(net);
nfs4_state_shutdown_net(net);
if (nn->lockd_up) {
lockd_down(net);
- nn->lockd_up = 0;
+ nn->lockd_up = false;
}
nn->nfsd_net_up = false;
nfsd_shutdown_generic();
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index d61b83b9654c..68d3f30ee760 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -56,6 +56,14 @@ typedef struct {
stateid_opaque_t si_opaque;
} stateid_t;
+typedef struct {
+ stateid_t stid;
+#define NFS4_COPY_STID 1
+#define NFS4_COPYNOTIFY_STID 2
+ unsigned char sc_type;
+ refcount_t sc_count;
+} copy_stateid_t;
+
#define STATEID_FMT "(%08x/%08x/%08x/%08x)"
#define STATEID_VAL(s) \
(s)->si_opaque.so_clid.cl_boot, \
@@ -96,6 +104,7 @@ struct nfs4_stid {
#define NFS4_REVOKED_DELEG_STID 16
#define NFS4_CLOSED_DELEG_STID 32
#define NFS4_LAYOUT_STID 64
+ struct list_head sc_cp_list;
unsigned char sc_type;
stateid_t sc_stateid;
spinlock_t sc_lock;
@@ -104,6 +113,17 @@ struct nfs4_stid {
void (*sc_free)(struct nfs4_stid *);
};
+/* Keep a list of stateids issued by the COPY_NOTIFY, associate it with the
+ * parent OPEN/LOCK/DELEG stateid.
+ */
+struct nfs4_cpntf_state {
+ copy_stateid_t cp_stateid;
+ struct list_head cp_list; /* per parent nfs4_stid */
+ stateid_t cp_p_stateid; /* copy of parent's stateid */
+ clientid_t cp_p_clid; /* copy of parent's clid */
+ time64_t cpntf_time; /* last time stateid used */
+};
+
/*
* Represents a delegation stateid. The nfs4_client holds references to these
* and they are put when it is being destroyed or when the delegation is
@@ -132,7 +152,7 @@ struct nfs4_delegation {
struct list_head dl_recall_lru; /* delegation recalled */
struct nfs4_clnt_odstate *dl_clnt_odstate;
u32 dl_type;
- time_t dl_time;
+ time64_t dl_time;
/* For recall: */
int dl_retries;
struct nfsd4_callback dl_recall;
@@ -310,7 +330,7 @@ struct nfs4_client {
#endif
struct xdr_netobj cl_name; /* id generated by client */
nfs4_verifier cl_verifier; /* generated by client */
- time_t cl_time; /* time of last lease renewal */
+ time64_t cl_time; /* time of last lease renewal */
struct sockaddr_storage cl_addr; /* client ipaddress */
bool cl_mach_cred; /* SP4_MACH_CRED in force */
struct svc_cred cl_cred; /* setclientid principal */
@@ -320,7 +340,7 @@ struct nfs4_client {
/* NFSv4.1 client implementation id: */
struct xdr_netobj cl_nii_domain;
struct xdr_netobj cl_nii_name;
- struct timespec cl_nii_time;
+ struct timespec64 cl_nii_time;
/* for v4.0 and v4.1 callbacks: */
struct nfs4_cb_conn cl_cb_conn;
@@ -449,7 +469,7 @@ struct nfs4_openowner {
*/
struct list_head oo_close_lru;
struct nfs4_ol_stateid *oo_last_closed_stid;
- time_t oo_time; /* time of placement on so_close_lru */
+ time64_t oo_time; /* time of placement on so_close_lru */
#define NFS4_OO_CONFIRMED 1
unsigned char oo_flags;
};
@@ -606,7 +626,7 @@ static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b)
struct nfsd4_blocked_lock {
struct list_head nbl_list;
struct list_head nbl_lru;
- unsigned long nbl_time;
+ time64_t nbl_time;
struct file_lock nbl_lock;
struct knfsd_fh nbl_fh;
struct nfsd4_callback nbl_cb;
@@ -618,14 +638,17 @@ struct nfsd4_copy;
extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
- stateid_t *stateid, int flags, struct nfsd_file **filp);
+ stateid_t *stateid, int flags, struct nfsd_file **filp,
+ struct nfs4_stid **cstid);
__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
stateid_t *stateid, unsigned char typemask,
struct nfs4_stid **s, struct nfsd_net *nn);
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
void (*sc_free)(struct nfs4_stid *));
-int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy);
-void nfs4_free_cp_state(struct nfsd4_copy *copy);
+int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy);
+void nfs4_free_copy_state(struct nfsd4_copy *copy);
+struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
+ struct nfs4_stid *p_stid);
void nfs4_unhash_stid(struct nfs4_stid *s);
void nfs4_put_stid(struct nfs4_stid *s);
void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
@@ -655,6 +678,11 @@ void put_nfs4_file(struct nfs4_file *fi);
extern void nfs4_put_copy(struct nfsd4_copy *copy);
extern struct nfsd4_copy *
find_async_copy(struct nfs4_client *clp, stateid_t *staetid);
+extern void nfs4_put_cpntf_state(struct nfsd_net *nn,
+ struct nfs4_cpntf_state *cps);
+extern __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
+ struct nfs4_client *clp,
+ struct nfs4_cpntf_state **cps);
static inline void get_nfs4_file(struct nfs4_file *fi)
{
refcount_inc(&fi->fi_ref);
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 9bce3b913189..b1bc582b0493 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -84,17 +84,17 @@ static int nfsd_proc_open(struct inode *inode, struct file *file)
return single_open(file, nfsd_proc_show, NULL);
}
-static const struct file_operations nfsd_proc_fops = {
- .open = nfsd_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct proc_ops nfsd_proc_ops = {
+ .proc_open = nfsd_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
};
void
nfsd_stat_init(void)
{
- svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_fops);
+ svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
}
void
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index ffc78a0e28b2..06dd0d337049 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -166,6 +166,12 @@ DEFINE_STATEID_EVENT(layout_recall_done);
DEFINE_STATEID_EVENT(layout_recall_fail);
DEFINE_STATEID_EVENT(layout_recall_release);
+TRACE_DEFINE_ENUM(NFSD_FILE_HASHED);
+TRACE_DEFINE_ENUM(NFSD_FILE_PENDING);
+TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ);
+TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_WRITE);
+TRACE_DEFINE_ENUM(NFSD_FILE_REFERENCED);
+
#define show_nf_flags(val) \
__print_flags(val, "|", \
{ 1 << NFSD_FILE_HASHED, "HASHED" }, \
@@ -195,7 +201,7 @@ DECLARE_EVENT_CLASS(nfsd_file_class,
TP_fast_assign(
__entry->nf_hashval = nf->nf_hashval;
__entry->nf_inode = nf->nf_inode;
- __entry->nf_ref = atomic_read(&nf->nf_ref);
+ __entry->nf_ref = refcount_read(&nf->nf_ref);
__entry->nf_flags = nf->nf_flags;
__entry->nf_may = nf->nf_may;
__entry->nf_file = nf->nf_file;
@@ -228,7 +234,7 @@ TRACE_EVENT(nfsd_file_acquire,
TP_ARGS(rqstp, hash, inode, may_flags, nf, status),
TP_STRUCT__entry(
- __field(__be32, xid)
+ __field(u32, xid)
__field(unsigned int, hash)
__field(void *, inode)
__field(unsigned int, may_flags)
@@ -236,27 +242,27 @@ TRACE_EVENT(nfsd_file_acquire,
__field(unsigned long, nf_flags)
__field(unsigned char, nf_may)
__field(struct file *, nf_file)
- __field(__be32, status)
+ __field(u32, status)
),
TP_fast_assign(
- __entry->xid = rqstp->rq_xid;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->hash = hash;
__entry->inode = inode;
__entry->may_flags = may_flags;
- __entry->nf_ref = nf ? atomic_read(&nf->nf_ref) : 0;
+ __entry->nf_ref = nf ? refcount_read(&nf->nf_ref) : 0;
__entry->nf_flags = nf ? nf->nf_flags : 0;
__entry->nf_may = nf ? nf->nf_may : 0;
__entry->nf_file = nf ? nf->nf_file : NULL;
- __entry->status = status;
+ __entry->status = be32_to_cpu(status);
),
TP_printk("xid=0x%x hash=0x%x inode=0x%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=0x%p status=%u",
- be32_to_cpu(__entry->xid), __entry->hash, __entry->inode,
+ __entry->xid, __entry->hash, __entry->inode,
show_nf_may(__entry->may_flags), __entry->nf_ref,
show_nf_flags(__entry->nf_flags),
show_nf_may(__entry->nf_may), __entry->nf_file,
- be32_to_cpu(__entry->status))
+ __entry->status)
);
DECLARE_EVENT_CLASS(nfsd_file_search_class,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c0dc491537a6..0aa02eb18bd3 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -280,19 +280,25 @@ out:
* Commit metadata changes to stable storage.
*/
static int
-commit_metadata(struct svc_fh *fhp)
+commit_inode_metadata(struct inode *inode)
{
- struct inode *inode = d_inode(fhp->fh_dentry);
const struct export_operations *export_ops = inode->i_sb->s_export_op;
- if (!EX_ISSYNC(fhp->fh_export))
- return 0;
-
if (export_ops->commit_metadata)
return export_ops->commit_metadata(inode);
return sync_inode_metadata(inode, 1);
}
+static int
+commit_metadata(struct svc_fh *fhp)
+{
+ struct inode *inode = d_inode(fhp->fh_dentry);
+
+ if (!EX_ISSYNC(fhp->fh_export))
+ return 0;
+ return commit_inode_metadata(inode);
+}
+
/*
* Go over the attributes and take care of the small differences between
* NFS semantics and what Linux expects.
@@ -358,7 +364,7 @@ out_nfserrno:
*/
__be32
nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
- int check_guard, time_t guardtime)
+ int check_guard, time64_t guardtime)
{
struct dentry *dentry;
struct inode *inode;
@@ -524,23 +530,39 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
#endif
-__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
- u64 dst_pos, u64 count, bool sync)
+__be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
+ struct nfsd_file *nf_dst, u64 dst_pos, u64 count, bool sync)
{
+ struct file *src = nf_src->nf_file;
+ struct file *dst = nf_dst->nf_file;
loff_t cloned;
+ __be32 ret = 0;
+ down_write(&nf_dst->nf_rwsem);
cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
- if (cloned < 0)
- return nfserrno(cloned);
- if (count && cloned != count)
- return nfserrno(-EINVAL);
+ if (cloned < 0) {
+ ret = nfserrno(cloned);
+ goto out_err;
+ }
+ if (count && cloned != count) {
+ ret = nfserrno(-EINVAL);
+ goto out_err;
+ }
if (sync) {
loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
- if (status < 0)
- return nfserrno(status);
+
+ if (!status)
+ status = commit_inode_metadata(file_inode(src));
+ if (status < 0) {
+ nfsd_reset_boot_verifier(net_generic(nf_dst->nf_net,
+ nfsd_net_id));
+ ret = nfserrno(status);
+ }
}
- return 0;
+out_err:
+ up_write(&nf_dst->nf_rwsem);
+ return ret;
}
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
@@ -938,10 +960,12 @@ static int wait_for_concurrent_writes(struct file *file)
}
__be32
-nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
loff_t offset, struct kvec *vec, int vlen,
- unsigned long *cnt, int stable)
+ unsigned long *cnt, int stable,
+ __be32 *verf)
{
+ struct file *file = nf->nf_file;
struct svc_export *exp;
struct iov_iter iter;
__be32 nfserr;
@@ -972,9 +996,28 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
flags |= RWF_SYNC;
iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
- host_err = vfs_iter_write(file, &iter, &pos, flags);
- if (host_err < 0)
+ if (flags & RWF_SYNC) {
+ down_write(&nf->nf_rwsem);
+ host_err = vfs_iter_write(file, &iter, &pos, flags);
+ if (host_err < 0)
+ nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
+ nfsd_net_id));
+ up_write(&nf->nf_rwsem);
+ } else {
+ down_read(&nf->nf_rwsem);
+ if (verf)
+ nfsd_copy_boot_verifier(verf,
+ net_generic(SVC_NET(rqstp),
+ nfsd_net_id));
+ host_err = vfs_iter_write(file, &iter, &pos, flags);
+ up_read(&nf->nf_rwsem);
+ }
+ if (host_err < 0) {
+ nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
+ nfsd_net_id));
goto out_nfserr;
+ }
+ *cnt = host_err;
nfsdstats.io_write += *cnt;
fsnotify_modify(file);
@@ -1036,7 +1079,8 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
*/
__be32
nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
- struct kvec *vec, int vlen, unsigned long *cnt, int stable)
+ struct kvec *vec, int vlen, unsigned long *cnt, int stable,
+ __be32 *verf)
{
struct nfsd_file *nf;
__be32 err;
@@ -1047,8 +1091,8 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
if (err)
goto out;
- err = nfsd_vfs_write(rqstp, fhp, nf->nf_file, offset, vec,
- vlen, cnt, stable);
+ err = nfsd_vfs_write(rqstp, fhp, nf, offset, vec,
+ vlen, cnt, stable, verf);
nfsd_file_put(nf);
out:
trace_nfsd_write_done(rqstp, fhp, offset, *cnt);
@@ -1067,7 +1111,7 @@ out:
*/
__be32
nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
- loff_t offset, unsigned long count)
+ loff_t offset, unsigned long count, __be32 *verf)
{
struct nfsd_file *nf;
loff_t end = LLONG_MAX;
@@ -1086,10 +1130,14 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (err)
goto out;
if (EX_ISSYNC(fhp->fh_export)) {
- int err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
+ int err2;
+ down_write(&nf->nf_rwsem);
+ err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
switch (err2) {
case 0:
+ nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
+ nfsd_net_id));
break;
case -EINVAL:
err = nfserr_notsupp;
@@ -1099,7 +1147,10 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
nfsd_reset_boot_verifier(net_generic(nf->nf_net,
nfsd_net_id));
}
- }
+ up_write(&nf->nf_rwsem);
+ } else
+ nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
+ nfsd_net_id));
nfsd_file_put(nf);
out:
@@ -1123,7 +1174,7 @@ nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp,
if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
iap->ia_valid &= ~(ATTR_UID|ATTR_GID);
if (iap->ia_valid)
- return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
+ return nfsd_setattr(rqstp, resfhp, iap, 0, (time64_t)0);
/* Callers expect file metadata to be committed here */
return nfserrno(commit_metadata(resfhp));
}
@@ -1386,7 +1437,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
&& d_inode(dchild)->i_atime.tv_sec == v_atime
&& d_inode(dchild)->i_size == 0 ) {
if (created)
- *created = 1;
+ *created = true;
break;
}
/* fall through */
@@ -1395,7 +1446,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
&& d_inode(dchild)->i_atime.tv_sec == v_atime
&& d_inode(dchild)->i_size == 0 ) {
if (created)
- *created = 1;
+ *created = true;
goto set_attr;
}
/* fall through */
@@ -1412,7 +1463,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out_nfserr;
}
if (created)
- *created = 1;
+ *created = true;
nfsd_check_ignore_resizing(iap);
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index cc110a10bfe8..3eb660ad80d1 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -34,6 +34,8 @@
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
+struct nfsd_file;
+
/*
* Callback function for readdir
*/
@@ -48,15 +50,16 @@ __be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
const char *, unsigned int,
struct svc_export **, struct dentry **);
__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
- struct iattr *, int, time_t);
+ struct iattr *, int, time64_t);
int nfsd_mountpoint(struct dentry *, struct svc_export *);
#ifdef CONFIG_NFSD_V4
__be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
struct xdr_netobj *);
__be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
struct file *, loff_t, loff_t, int);
-__be32 nfsd4_clone_file_range(struct file *, u64, struct file *,
- u64, u64, bool);
+__be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
+ struct nfsd_file *nf_dst, u64 dst_pos,
+ u64 count, bool sync);
#endif /* CONFIG_NFSD_V4 */
__be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
@@ -71,7 +74,7 @@ __be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *,
struct svc_fh *res, int createmode,
u32 *verifier, bool *truncp, bool *created);
__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
- loff_t, unsigned long);
+ loff_t, unsigned long, __be32 *verf);
#endif /* CONFIG_NFSD_V3 */
int nfsd_open_break_lease(struct inode *, int);
__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
@@ -91,11 +94,12 @@ __be32 nfsd_read(struct svc_rqst *, struct svc_fh *,
loff_t, struct kvec *, int, unsigned long *,
u32 *eof);
__be32 nfsd_write(struct svc_rqst *, struct svc_fh *, loff_t,
- struct kvec *, int, unsigned long *, int);
+ struct kvec *, int, unsigned long *,
+ int stable, __be32 *verf);
__be32 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
- struct file *file, loff_t offset,
+ struct nfsd_file *nf, loff_t offset,
struct kvec *vec, int vlen, unsigned long *cnt,
- int stable);
+ int stable, __be32 *verf);
__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
char *, int *);
__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
diff --git a/fs/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index 99ff9f403ff1..4155fd71714c 100644
--- a/fs/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -14,7 +14,7 @@ struct nfsd3_sattrargs {
struct svc_fh fh;
struct iattr attrs;
int check_guard;
- time_t guardtime;
+ time64_t guardtime;
};
struct nfsd3_diropargs {
@@ -159,6 +159,7 @@ struct nfsd3_writeres {
struct svc_fh fh;
unsigned long count;
int committed;
+ __be32 verf[2];
};
struct nfsd3_renameres {
@@ -223,6 +224,7 @@ struct nfsd3_pathconfres {
struct nfsd3_commitres {
__be32 status;
struct svc_fh fh;
+ __be32 verf[2];
};
struct nfsd3_getaclres {
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index f4737d66ee98..db63d39b1507 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -46,9 +46,9 @@
#define CURRENT_STATE_ID_FLAG (1<<0)
#define SAVED_STATE_ID_FLAG (1<<1)
-#define SET_STATE_ID(c, f) ((c)->sid_flags |= (f))
-#define HAS_STATE_ID(c, f) ((c)->sid_flags & (f))
-#define CLEAR_STATE_ID(c, f) ((c)->sid_flags &= ~(f))
+#define SET_CSTATE_FLAG(c, f) ((c)->sid_flags |= (f))
+#define HAS_CSTATE_FLAG(c, f) ((c)->sid_flags & (f))
+#define CLEAR_CSTATE_FLAG(c, f) ((c)->sid_flags &= ~(f))
struct nfsd4_compound_state {
struct svc_fh current_fh;
@@ -221,6 +221,7 @@ struct nfsd4_lookup {
struct nfsd4_putfh {
u32 pf_fhlen; /* request */
char *pf_fhval; /* request */
+ bool no_verify; /* represents foreigh fh */
};
struct nfsd4_open {
@@ -518,11 +519,13 @@ struct nfsd42_write_res {
struct nfsd4_copy {
/* request */
- stateid_t cp_src_stateid;
- stateid_t cp_dst_stateid;
- u64 cp_src_pos;
- u64 cp_dst_pos;
- u64 cp_count;
+ stateid_t cp_src_stateid;
+ stateid_t cp_dst_stateid;
+ u64 cp_src_pos;
+ u64 cp_dst_pos;
+ u64 cp_count;
+ struct nl4_server cp_src;
+ bool cp_intra;
/* both */
bool cp_synchronous;
@@ -540,13 +543,18 @@ struct nfsd4_copy {
struct nfsd_file *nf_src;
struct nfsd_file *nf_dst;
- stateid_t cp_stateid;
+ copy_stateid_t cp_stateid;
struct list_head copies;
struct task_struct *copy_task;
refcount_t refcount;
bool stopped;
+
+ struct vfsmount *ss_mnt;
+ struct nfs_fh c_fh;
+ nfs4_stateid stateid;
};
+extern bool inter_copy_offload_enable;
struct nfsd4_seek {
/* request */
@@ -568,6 +576,18 @@ struct nfsd4_offload_status {
u32 status;
};
+struct nfsd4_copy_notify {
+ /* request */
+ stateid_t cpn_src_stateid;
+ struct nl4_server cpn_dst;
+
+ /* response */
+ stateid_t cpn_cnr_stateid;
+ u64 cpn_sec;
+ u32 cpn_nsec;
+ struct nl4_server cpn_src;
+};
+
struct nfsd4_op {
int opnum;
const struct nfsd4_operation * opdesc;
@@ -627,6 +647,7 @@ struct nfsd4_op {
struct nfsd4_clone clone;
struct nfsd4_copy copy;
struct nfsd4_offload_status offload_status;
+ struct nfsd4_copy_notify copy_notify;
struct nfsd4_seek seek;
} u;
struct nfs4_replay * replay;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index f75767bd623a..b13bfd406820 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -55,7 +55,7 @@ static void nsfs_evict(struct inode *inode)
ns->ops->put(ns);
}
-static void *__ns_get_path(struct path *path, struct ns_common *ns)
+static int __ns_get_path(struct path *path, struct ns_common *ns)
{
struct vfsmount *mnt = nsfs_mnt;
struct dentry *dentry;
@@ -74,13 +74,13 @@ static void *__ns_get_path(struct path *path, struct ns_common *ns)
got_it:
path->mnt = mntget(mnt);
path->dentry = dentry;
- return NULL;
+ return 0;
slow:
rcu_read_unlock();
inode = new_inode_pseudo(mnt->mnt_sb);
if (!inode) {
ns->ops->put(ns);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
inode->i_ino = ns->inum;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
@@ -92,7 +92,7 @@ slow:
dentry = d_alloc_anon(mnt->mnt_sb);
if (!dentry) {
iput(inode);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
d_instantiate(dentry, inode);
dentry->d_fsdata = (void *)ns->ops;
@@ -101,23 +101,22 @@ slow:
d_delete(dentry); /* make sure ->d_prune() does nothing */
dput(dentry);
cpu_relax();
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
}
goto got_it;
}
-void *ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
+int ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
void *private_data)
{
- void *ret;
+ int ret;
do {
struct ns_common *ns = ns_get_cb(private_data);
if (!ns)
- return ERR_PTR(-ENOENT);
-
+ return -ENOENT;
ret = __ns_get_path(path, ns);
- } while (ret == ERR_PTR(-EAGAIN));
+ } while (ret == -EAGAIN);
return ret;
}
@@ -134,7 +133,7 @@ static struct ns_common *ns_get_path_task(void *private_data)
return args->ns_ops->get(args->task);
}
-void *ns_get_path(struct path *path, struct task_struct *task,
+int ns_get_path(struct path *path, struct task_struct *task,
const struct proc_ns_operations *ns_ops)
{
struct ns_get_path_task_args args = {
@@ -150,7 +149,7 @@ int open_related_ns(struct ns_common *ns,
{
struct path path = {};
struct file *f;
- void *err;
+ int err;
int fd;
fd = get_unused_fd_flags(O_CLOEXEC);
@@ -167,11 +166,11 @@ int open_related_ns(struct ns_common *ns,
}
err = __ns_get_path(&path, relative);
- } while (err == ERR_PTR(-EAGAIN));
+ } while (err == -EAGAIN);
- if (IS_ERR(err)) {
+ if (err) {
put_unused_fd(fd);
- return PTR_ERR(err);
+ return err;
}
f = dentry_open(&path, O_RDONLY, current_cred());
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 6c7388430ad3..d4359a1df3d5 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2899,18 +2899,12 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
ia_valid |= ATTR_MTIME | ATTR_CTIME;
}
}
- if (ia_valid & ATTR_ATIME) {
- vi->i_atime = timestamp_truncate(attr->ia_atime,
- vi);
- }
- if (ia_valid & ATTR_MTIME) {
- vi->i_mtime = timestamp_truncate(attr->ia_mtime,
- vi);
- }
- if (ia_valid & ATTR_CTIME) {
- vi->i_ctime = timestamp_truncate(attr->ia_ctime,
- vi);
- }
+ if (ia_valid & ATTR_ATIME)
+ vi->i_atime = attr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+ vi->i_mtime = attr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+ vi->i_ctime = attr->ia_ctime;
mark_inode_dirty(vi);
out:
return err;
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 5c424a099280..1ef24574f481 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -73,7 +73,7 @@ static void o2quo_fence_self(void)
"system by restarting ***\n");
emergency_restart();
break;
- };
+ }
}
/* Indicate that a timeout occurred on a heartbeat region write. The
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index 38b224372776..5e700b45d32d 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/..
-
obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 4de89af96abf..6abaded3ff6b 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -23,15 +23,15 @@
#include <linux/spinlock.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index aaf24548b02a..0463dce65bb2 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -688,10 +688,6 @@ struct dlm_begin_reco
__be32 pad2;
};
-
-#define BITS_PER_BYTE 8
-#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
-
struct dlm_query_join_request
{
u8 node_idx;
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index 965f45dbe17b..6051edc33aef 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -23,9 +23,9 @@
#include <linux/spinlock.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
@@ -33,7 +33,7 @@
#include "dlmconvert.h"
#define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
/* NOTE: __dlmconvert_master is the only function in here that
* needs a spinlock held on entry (res->spinlock) and it is the
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 4d0b452012b2..c5c6efba7b5e 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -17,9 +17,9 @@
#include <linux/debugfs.h>
#include <linux/export.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
@@ -27,7 +27,7 @@
#include "dlmdebug.h"
#define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
static int stringify_lockname(const char *lockname, int locklen, char *buf,
int len);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index ee6f459f9770..357cfc702ce3 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -20,9 +20,9 @@
#include <linux/debugfs.h>
#include <linux/sched/signal.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
@@ -30,7 +30,7 @@
#include "dlmdebug.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
/*
* ocfs2 node maps are array of long int, which limits to send them freely
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index baff087f3863..83f0760e4fba 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -25,9 +25,9 @@
#include <linux/delay.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
@@ -35,7 +35,7 @@
#include "dlmconvert.h"
#define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
static struct kmem_cache *dlm_lock_cache;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 74b768ca1cd8..900f7e466d11 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -25,9 +25,9 @@
#include <linux/delay.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
@@ -35,7 +35,7 @@
#include "dlmdebug.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
static void dlm_mle_node_down(struct dlm_ctxt *dlm,
struct dlm_master_list_entry *mle,
@@ -2554,8 +2554,6 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
if (!dlm_grab(dlm))
return -EINVAL;
- BUG_ON(target == O2NM_MAX_NODES);
-
name = res->lockname.name;
namelen = res->lockname.len;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 064ce5bbc3f6..4b566e88582f 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -26,16 +26,16 @@
#include <linux/delay.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
@@ -1668,7 +1668,7 @@ static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 nodenum, u8 *real_master)
{
- int ret = -EINVAL;
+ int ret;
struct dlm_master_requery req;
int status = DLM_LOCK_RES_OWNER_UNKNOWN;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 61c51c268460..fd40c17cd022 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -25,16 +25,16 @@
#include <linux/delay.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
static int dlm_thread(void *data);
static void dlm_flush_asts(struct dlm_ctxt *dlm);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 3883633e82eb..dcb17ca8ae74 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -23,15 +23,15 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
-#include "cluster/heartbeat.h"
-#include "cluster/nodemanager.h"
-#include "cluster/tcp.h"
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#define MLOG_MASK_PREFIX ML_DLM
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
#define DLM_UNLOCK_FREE_LOCK 0x00000001
#define DLM_UNLOCK_CALL_AST 0x00000002
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
index a9874e441bd4..c7895f65be0e 100644
--- a/fs/ocfs2/dlmfs/Makefile
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/..
-
obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
ocfs2_dlmfs-objs := userdlm.o dlmfs.o
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 4f1668c81e1f..8e4f1ace467c 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -33,11 +33,11 @@
#include <linux/uaccess.h>
-#include "stackglue.h"
+#include "../stackglue.h"
#include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
static const struct super_operations dlmfs_ops;
diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
index 525b14ddfba5..3df5be25bfb1 100644
--- a/fs/ocfs2/dlmfs/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -21,12 +21,12 @@
#include <linux/types.h>
#include <linux/crc32.h>
-#include "ocfs2_lockingver.h"
-#include "stackglue.h"
+#include "../ocfs2_lockingver.h"
+#include "../stackglue.h"
#include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS
-#include "cluster/masklog.h"
+#include "../cluster/masklog.h"
static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index cda1027d0819..cb9e6a73bea9 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -570,7 +570,7 @@ void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
mlog_bug_on_msg(1, "type: %d\n", type);
ops = NULL; /* thanks, gcc */
break;
- };
+ }
ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
generation, res->l_name);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 9876db52913a..6cd5e4924e4d 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2101,17 +2101,15 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
struct buffer_head **di_bh,
int meta_level,
- int overwrite_io,
int write_sem,
int wait)
{
int ret = 0;
if (wait)
- ret = ocfs2_inode_lock(inode, NULL, meta_level);
+ ret = ocfs2_inode_lock(inode, di_bh, meta_level);
else
- ret = ocfs2_try_inode_lock(inode,
- overwrite_io ? NULL : di_bh, meta_level);
+ ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
if (ret < 0)
goto out;
@@ -2136,6 +2134,7 @@ static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
out_unlock:
brelse(*di_bh);
+ *di_bh = NULL;
ocfs2_inode_unlock(inode, meta_level);
out:
return ret;
@@ -2177,7 +2176,6 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
ret = ocfs2_inode_lock_for_extent_tree(inode,
&di_bh,
meta_level,
- overwrite_io,
write_sem,
wait);
if (ret < 0) {
@@ -2233,13 +2231,13 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
&di_bh,
meta_level,
write_sem);
+ meta_level = 1;
+ write_sem = 1;
ret = ocfs2_inode_lock_for_extent_tree(inode,
&di_bh,
meta_level,
- overwrite_io,
- 1,
+ write_sem,
wait);
- write_sem = 1;
if (ret < 0) {
if (ret != -EAGAIN)
mlog_errno(ret);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 3103ba7f97a2..bfe611ed1b1d 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -597,9 +597,11 @@ static inline void ocfs2_update_inode_fsync_trans(handle_t *handle,
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- oi->i_sync_tid = handle->h_transaction->t_tid;
- if (datasync)
- oi->i_datasync_tid = handle->h_transaction->t_tid;
+ if (!is_handle_aborted(handle)) {
+ oi->i_sync_tid = handle->h_transaction->t_tid;
+ if (datasync)
+ oi->i_datasync_tid = handle->h_transaction->t_tid;
+ }
}
#endif /* OCFS2_JOURNAL_H */
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 8ea51cf27b97..da65251ef815 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -586,8 +586,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
mlog_errno(status);
}
- oi->i_sync_tid = handle->h_transaction->t_tid;
- oi->i_datasync_tid = handle->h_transaction->t_tid;
+ ocfs2_update_inode_fsync_trans(handle, inode, 1);
leave:
if (status < 0) {
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 4180c3ef0a68..939df99d2dec 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -696,7 +696,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
ac, cl);
- if (IS_ERR(bg_bh) && (PTR_ERR(bg_bh) == -ENOSPC))
+ if (PTR_ERR(bg_bh) == -ENOSPC)
bg_bh = ocfs2_block_group_alloc_discontig(handle,
alloc_inode,
ac, cl);
diff --git a/fs/open.c b/fs/open.c
index b62f5c0923a8..0788b3715731 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -955,48 +955,83 @@ struct file *open_with_fake_path(const struct path *path, int flags,
}
EXPORT_SYMBOL(open_with_fake_path);
-static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
+#define WILL_CREATE(flags) (flags & (O_CREAT | __O_TMPFILE))
+#define O_PATH_FLAGS (O_DIRECTORY | O_NOFOLLOW | O_PATH | O_CLOEXEC)
+
+inline struct open_how build_open_how(int flags, umode_t mode)
+{
+ struct open_how how = {
+ .flags = flags & VALID_OPEN_FLAGS,
+ .mode = mode & S_IALLUGO,
+ };
+
+ /* O_PATH beats everything else. */
+ if (how.flags & O_PATH)
+ how.flags &= O_PATH_FLAGS;
+ /* Modes should only be set for create-like flags. */
+ if (!WILL_CREATE(how.flags))
+ how.mode = 0;
+ return how;
+}
+
+inline int build_open_flags(const struct open_how *how, struct open_flags *op)
{
+ int flags = how->flags;
int lookup_flags = 0;
int acc_mode = ACC_MODE(flags);
+ /* Must never be set by userspace */
+ flags &= ~(FMODE_NONOTIFY | O_CLOEXEC);
+
/*
- * Clear out all open flags we don't know about so that we don't report
- * them in fcntl(F_GETFD) or similar interfaces.
+ * Older syscalls implicitly clear all of the invalid flags or argument
+ * values before calling build_open_flags(), but openat2(2) checks all
+ * of its arguments.
*/
- flags &= VALID_OPEN_FLAGS;
+ if (flags & ~VALID_OPEN_FLAGS)
+ return -EINVAL;
+ if (how->resolve & ~VALID_RESOLVE_FLAGS)
+ return -EINVAL;
- if (flags & (O_CREAT | __O_TMPFILE))
- op->mode = (mode & S_IALLUGO) | S_IFREG;
- else
+ /* Deal with the mode. */
+ if (WILL_CREATE(flags)) {
+ if (how->mode & ~S_IALLUGO)
+ return -EINVAL;
+ op->mode = how->mode | S_IFREG;
+ } else {
+ if (how->mode != 0)
+ return -EINVAL;
op->mode = 0;
-
- /* Must never be set by userspace */
- flags &= ~FMODE_NONOTIFY & ~O_CLOEXEC;
+ }
/*
- * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
- * check for O_DSYNC if the need any syncing at all we enforce it's
- * always set instead of having to deal with possibly weird behaviour
- * for malicious applications setting only __O_SYNC.
+ * In order to ensure programs get explicit errors when trying to use
+ * O_TMPFILE on old kernels, O_TMPFILE is implemented such that it
+ * looks like (O_DIRECTORY|O_RDWR & ~O_CREAT) to old kernels. But we
+ * have to require userspace to explicitly set it.
*/
- if (flags & __O_SYNC)
- flags |= O_DSYNC;
-
if (flags & __O_TMPFILE) {
if ((flags & O_TMPFILE_MASK) != O_TMPFILE)
return -EINVAL;
if (!(acc_mode & MAY_WRITE))
return -EINVAL;
- } else if (flags & O_PATH) {
- /*
- * If we have O_PATH in the open flag. Then we
- * cannot have anything other than the below set of flags
- */
- flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH;
+ }
+ if (flags & O_PATH) {
+ /* O_PATH only permits certain other flags to be set. */
+ if (flags & ~O_PATH_FLAGS)
+ return -EINVAL;
acc_mode = 0;
}
+ /*
+ * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
+ * check for O_DSYNC if the need any syncing at all we enforce it's
+ * always set instead of having to deal with possibly weird behaviour
+ * for malicious applications setting only __O_SYNC.
+ */
+ if (flags & __O_SYNC)
+ flags |= O_DSYNC;
+
op->open_flag = flags;
/* O_TRUNC implies we need access checks for write permissions */
@@ -1022,6 +1057,18 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
lookup_flags |= LOOKUP_DIRECTORY;
if (!(flags & O_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
+
+ if (how->resolve & RESOLVE_NO_XDEV)
+ lookup_flags |= LOOKUP_NO_XDEV;
+ if (how->resolve & RESOLVE_NO_MAGICLINKS)
+ lookup_flags |= LOOKUP_NO_MAGICLINKS;
+ if (how->resolve & RESOLVE_NO_SYMLINKS)
+ lookup_flags |= LOOKUP_NO_SYMLINKS;
+ if (how->resolve & RESOLVE_BENEATH)
+ lookup_flags |= LOOKUP_BENEATH;
+ if (how->resolve & RESOLVE_IN_ROOT)
+ lookup_flags |= LOOKUP_IN_ROOT;
+
op->lookup_flags = lookup_flags;
return 0;
}
@@ -1040,8 +1087,11 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
struct file *file_open_name(struct filename *name, int flags, umode_t mode)
{
struct open_flags op;
- int err = build_open_flags(flags, mode, &op);
- return err ? ERR_PTR(err) : do_filp_open(AT_FDCWD, name, &op);
+ struct open_how how = build_open_how(flags, mode);
+ int err = build_open_flags(&how, &op);
+ if (err)
+ return ERR_PTR(err);
+ return do_filp_open(AT_FDCWD, name, &op);
}
/**
@@ -1072,17 +1122,19 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
const char *filename, int flags, umode_t mode)
{
struct open_flags op;
- int err = build_open_flags(flags, mode, &op);
+ struct open_how how = build_open_how(flags, mode);
+ int err = build_open_flags(&how, &op);
if (err)
return ERR_PTR(err);
return do_file_open_root(dentry, mnt, filename, &op);
}
EXPORT_SYMBOL(file_open_root);
-long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+static long do_sys_openat2(int dfd, const char __user *filename,
+ struct open_how *how)
{
struct open_flags op;
- int fd = build_open_flags(flags, mode, &op);
+ int fd = build_open_flags(how, &op);
struct filename *tmp;
if (fd)
@@ -1092,7 +1144,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
if (IS_ERR(tmp))
return PTR_ERR(tmp);
- fd = get_unused_fd_flags(flags);
+ fd = get_unused_fd_flags(how->flags);
if (fd >= 0) {
struct file *f = do_filp_open(dfd, tmp, &op);
if (IS_ERR(f)) {
@@ -1107,12 +1159,16 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
return fd;
}
-SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
+long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
- if (force_o_largefile())
- flags |= O_LARGEFILE;
+ struct open_how how = build_open_how(flags, mode);
+ return do_sys_openat2(dfd, filename, &how);
+}
- return do_sys_open(AT_FDCWD, filename, flags, mode);
+
+SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode)
+{
+ return ksys_open(filename, flags, mode);
}
SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
@@ -1120,10 +1176,32 @@ SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags,
{
if (force_o_largefile())
flags |= O_LARGEFILE;
-
return do_sys_open(dfd, filename, flags, mode);
}
+SYSCALL_DEFINE4(openat2, int, dfd, const char __user *, filename,
+ struct open_how __user *, how, size_t, usize)
+{
+ int err;
+ struct open_how tmp;
+
+ BUILD_BUG_ON(sizeof(struct open_how) < OPEN_HOW_SIZE_VER0);
+ BUILD_BUG_ON(sizeof(struct open_how) != OPEN_HOW_SIZE_LATEST);
+
+ if (unlikely(usize < OPEN_HOW_SIZE_VER0))
+ return -EINVAL;
+
+ err = copy_struct_from_user(&tmp, sizeof(tmp), how, usize);
+ if (err)
+ return err;
+
+ /* O_LARGEFILE is only allowed for non-O_PATH. */
+ if (!(tmp.flags & O_PATH) && force_o_largefile())
+ tmp.flags |= O_LARGEFILE;
+
+ return do_sys_openat2(dfd, filename, &tmp);
+}
+
#ifdef CONFIG_COMPAT
/*
* Exactly like sys_open(), except that it doesn't set the
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 25543a966c48..29eaa4544372 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -273,6 +273,7 @@ static void *help_start(struct seq_file *m, loff_t *pos)
static void *help_next(struct seq_file *m, void *v, loff_t *pos)
{
+ (*pos)++;
gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n");
return NULL;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 6220642fe113..9fc47c2e078d 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -24,7 +24,7 @@
static int ovl_ccup_set(const char *buf, const struct kernel_param *param)
{
- pr_warn("overlayfs: \"check_copy_up\" module option is obsolete\n");
+ pr_warn("\"check_copy_up\" module option is obsolete\n");
return 0;
}
@@ -123,6 +123,9 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
loff_t old_pos = 0;
loff_t new_pos = 0;
loff_t cloned;
+ loff_t data_pos = -1;
+ loff_t hole_len;
+ bool skip_hole = false;
int error = 0;
if (len == 0)
@@ -144,7 +147,11 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
goto out;
/* Couldn't clone, so now we try to copy the data */
- /* FIXME: copy up sparse files efficiently */
+ /* Check if lower fs supports seek operation */
+ if (old_file->f_mode & FMODE_LSEEK &&
+ old_file->f_op->llseek)
+ skip_hole = true;
+
while (len) {
size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
long bytes;
@@ -157,6 +164,36 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
break;
}
+ /*
+ * Fill zero for hole will cost unnecessary disk space
+ * and meanwhile slow down the copy-up speed, so we do
+ * an optimization for hole during copy-up, it relies
+ * on SEEK_DATA implementation in lower fs so if lower
+ * fs does not support it, copy-up will behave as before.
+ *
+ * Detail logic of hole detection as below:
+ * When we detect next data position is larger than current
+ * position we will skip that hole, otherwise we copy
+ * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually,
+ * it may not recognize all kind of holes and sometimes
+ * only skips partial of hole area. However, it will be
+ * enough for most of the use cases.
+ */
+
+ if (skip_hole && data_pos < old_pos) {
+ data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
+ if (data_pos > old_pos) {
+ hole_len = data_pos - old_pos;
+ len -= hole_len;
+ old_pos = new_pos = data_pos;
+ continue;
+ } else if (data_pos == -ENXIO) {
+ break;
+ } else if (data_pos < 0) {
+ skip_hole = false;
+ }
+ }
+
bytes = do_splice_direct(old_file, &old_pos,
new_file, &new_pos,
this_len, SPLICE_F_MOVE);
@@ -480,7 +517,7 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
}
inode_lock(temp->d_inode);
- if (c->metacopy)
+ if (S_ISREG(c->stat.mode))
err = ovl_set_size(temp, &c->stat);
if (!err)
err = ovl_set_attr(temp, &c->stat);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 29abdb1d3b5c..8e57d5372b8f 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -35,7 +35,7 @@ int ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
dput(wdentry);
if (err) {
- pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n",
+ pr_err("cleanup of '%pd2' failed (%i)\n",
wdentry, err);
}
@@ -53,7 +53,7 @@ static struct dentry *ovl_lookup_temp(struct dentry *workdir)
temp = lookup_one_len(name, workdir, strlen(name));
if (!IS_ERR(temp) && temp->d_inode) {
- pr_err("overlayfs: workdir/%s already exists\n", name);
+ pr_err("workdir/%s already exists\n", name);
dput(temp);
temp = ERR_PTR(-EIO);
}
@@ -134,7 +134,7 @@ static int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry,
d = lookup_one_len(dentry->d_name.name, dentry->d_parent,
dentry->d_name.len);
if (IS_ERR(d)) {
- pr_warn("overlayfs: failed lookup after mkdir (%pd2, err=%i).\n",
+ pr_warn("failed lookup after mkdir (%pd2, err=%i).\n",
dentry, err);
return PTR_ERR(d);
}
@@ -267,7 +267,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
d_instantiate(dentry, inode);
if (inode != oip.newinode) {
- pr_warn_ratelimited("overlayfs: newly created inode found in cache (%pd2)\n",
+ pr_warn_ratelimited("newly created inode found in cache (%pd2)\n",
dentry);
}
@@ -1009,7 +1009,7 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
spin_unlock(&dentry->d_lock);
} else {
kfree(redirect);
- pr_warn_ratelimited("overlayfs: failed to set redirect (%i)\n",
+ pr_warn_ratelimited("failed to set redirect (%i)\n",
err);
/* Fall back to userspace copy-up */
err = -EXDEV;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 70e55588aedc..6f54d70cef27 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -30,7 +30,7 @@ static int ovl_encode_maybe_copy_up(struct dentry *dentry)
}
if (err) {
- pr_warn_ratelimited("overlayfs: failed to copy up on encode (%pd2, err=%i)\n",
+ pr_warn_ratelimited("failed to copy up on encode (%pd2, err=%i)\n",
dentry, err);
}
@@ -244,7 +244,7 @@ out:
return err;
fail:
- pr_warn_ratelimited("overlayfs: failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
+ pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
dentry, err, buflen, fh ? (int)fh->fb.len : 0,
fh ? fh->fb.type : 0);
goto out;
@@ -358,7 +358,7 @@ static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx)
*/
static struct dentry *ovl_lookup_real_one(struct dentry *connected,
struct dentry *real,
- struct ovl_layer *layer)
+ const struct ovl_layer *layer)
{
struct inode *dir = d_inode(connected);
struct dentry *this, *parent = NULL;
@@ -406,7 +406,7 @@ out:
return this;
fail:
- pr_warn_ratelimited("overlayfs: failed to lookup one by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
+ pr_warn_ratelimited("failed to lookup one by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
real, layer->idx, connected, err);
this = ERR_PTR(err);
goto out;
@@ -414,17 +414,16 @@ fail:
static struct dentry *ovl_lookup_real(struct super_block *sb,
struct dentry *real,
- struct ovl_layer *layer);
+ const struct ovl_layer *layer);
/*
* Lookup an indexed or hashed overlay dentry by real inode.
*/
static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
struct dentry *real,
- struct ovl_layer *layer)
+ const struct ovl_layer *layer)
{
struct ovl_fs *ofs = sb->s_fs_info;
- struct ovl_layer upper_layer = { .mnt = ofs->upper_mnt };
struct dentry *index = NULL;
struct dentry *this = NULL;
struct inode *inode;
@@ -466,7 +465,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
* recursive call walks back from indexed upper to the topmost
* connected/hashed upper parent (or up to root).
*/
- this = ovl_lookup_real(sb, upper, &upper_layer);
+ this = ovl_lookup_real(sb, upper, &ofs->layers[0]);
dput(upper);
}
@@ -487,7 +486,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
*/
static struct dentry *ovl_lookup_real_ancestor(struct super_block *sb,
struct dentry *real,
- struct ovl_layer *layer)
+ const struct ovl_layer *layer)
{
struct dentry *next, *parent = NULL;
struct dentry *ancestor = ERR_PTR(-EIO);
@@ -540,7 +539,7 @@ static struct dentry *ovl_lookup_real_ancestor(struct super_block *sb,
*/
static struct dentry *ovl_lookup_real(struct super_block *sb,
struct dentry *real,
- struct ovl_layer *layer)
+ const struct ovl_layer *layer)
{
struct dentry *connected;
int err = 0;
@@ -631,7 +630,7 @@ static struct dentry *ovl_lookup_real(struct super_block *sb,
return connected;
fail:
- pr_warn_ratelimited("overlayfs: failed to lookup by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
+ pr_warn_ratelimited("failed to lookup by real (%pd2, layer=%d, connected=%pd2, err=%i)\n",
real, layer->idx, connected, err);
dput(connected);
return ERR_PTR(err);
@@ -646,8 +645,7 @@ static struct dentry *ovl_get_dentry(struct super_block *sb,
struct dentry *index)
{
struct ovl_fs *ofs = sb->s_fs_info;
- struct ovl_layer upper_layer = { .mnt = ofs->upper_mnt };
- struct ovl_layer *layer = upper ? &upper_layer : lowerpath->layer;
+ const struct ovl_layer *layer = upper ? &ofs->layers[0] : lowerpath->layer;
struct dentry *real = upper ?: (index ?: lowerpath->dentry);
/*
@@ -822,7 +820,7 @@ out:
return dentry;
out_err:
- pr_warn_ratelimited("overlayfs: failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n",
+ pr_warn_ratelimited("failed to decode file handle (len=%d, type=%d, flags=%x, err=%i)\n",
fh_len, fh_type, flags, err);
dentry = ERR_PTR(err);
goto out;
@@ -831,7 +829,7 @@ out_err:
static struct dentry *ovl_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
- pr_warn_ratelimited("overlayfs: connectable file handles not supported; use 'no_subtree_check' exportfs option.\n");
+ pr_warn_ratelimited("connectable file handles not supported; use 'no_subtree_check' exportfs option.\n");
return ERR_PTR(-EACCES);
}
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index e235a635d9ec..a5317216de73 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -9,8 +9,19 @@
#include <linux/xattr.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
+#include <linux/splice.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
#include "overlayfs.h"
+struct ovl_aio_req {
+ struct kiocb iocb;
+ struct kiocb *orig_iocb;
+ struct fd fd;
+};
+
+static struct kmem_cache *ovl_aio_request_cachep;
+
static char ovl_whatisit(struct inode *inode, struct inode *realinode)
{
if (realinode != ovl_inode_upper(inode))
@@ -146,7 +157,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
struct inode *inode = file_inode(file);
struct fd real;
const struct cred *old_cred;
- ssize_t ret;
+ loff_t ret;
/*
* The two special cases below do not need to involve real fs,
@@ -171,7 +182,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
* limitations that are more strict than ->s_maxbytes for specific
* files, so we use the real file to perform seeks.
*/
- inode_lock(inode);
+ ovl_inode_lock(inode);
real.file->f_pos = file->f_pos;
old_cred = ovl_override_creds(inode->i_sb);
@@ -179,7 +190,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
revert_creds(old_cred);
file->f_pos = real.file->f_pos;
- inode_unlock(inode);
+ ovl_inode_unlock(inode);
fdput(real);
@@ -225,6 +236,33 @@ static rwf_t ovl_iocb_to_rwf(struct kiocb *iocb)
return flags;
}
+static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
+{
+ struct kiocb *iocb = &aio_req->iocb;
+ struct kiocb *orig_iocb = aio_req->orig_iocb;
+
+ if (iocb->ki_flags & IOCB_WRITE) {
+ struct inode *inode = file_inode(orig_iocb->ki_filp);
+
+ file_end_write(iocb->ki_filp);
+ ovl_copyattr(ovl_inode_real(inode), inode);
+ }
+
+ orig_iocb->ki_pos = iocb->ki_pos;
+ fdput(aio_req->fd);
+ kmem_cache_free(ovl_aio_request_cachep, aio_req);
+}
+
+static void ovl_aio_rw_complete(struct kiocb *iocb, long res, long res2)
+{
+ struct ovl_aio_req *aio_req = container_of(iocb,
+ struct ovl_aio_req, iocb);
+ struct kiocb *orig_iocb = aio_req->orig_iocb;
+
+ ovl_aio_cleanup_handler(aio_req);
+ orig_iocb->ki_complete(orig_iocb, res, res2);
+}
+
static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
@@ -240,10 +278,28 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
return ret;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
- ovl_iocb_to_rwf(iocb));
+ if (is_sync_kiocb(iocb)) {
+ ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
+ ovl_iocb_to_rwf(iocb));
+ } else {
+ struct ovl_aio_req *aio_req;
+
+ ret = -ENOMEM;
+ aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
+ if (!aio_req)
+ goto out;
+
+ aio_req->fd = real;
+ real.flags = 0;
+ aio_req->orig_iocb = iocb;
+ kiocb_clone(&aio_req->iocb, iocb, real.file);
+ aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+ ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
+ if (ret != -EIOCBQUEUED)
+ ovl_aio_cleanup_handler(aio_req);
+ }
+out:
revert_creds(old_cred);
-
ovl_file_accessed(file);
fdput(real);
@@ -274,15 +330,33 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
goto out_unlock;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- file_start_write(real.file);
- ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
- ovl_iocb_to_rwf(iocb));
- file_end_write(real.file);
+ if (is_sync_kiocb(iocb)) {
+ file_start_write(real.file);
+ ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
+ ovl_iocb_to_rwf(iocb));
+ file_end_write(real.file);
+ /* Update size */
+ ovl_copyattr(ovl_inode_real(inode), inode);
+ } else {
+ struct ovl_aio_req *aio_req;
+
+ ret = -ENOMEM;
+ aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
+ if (!aio_req)
+ goto out;
+
+ file_start_write(real.file);
+ aio_req->fd = real;
+ real.flags = 0;
+ aio_req->orig_iocb = iocb;
+ kiocb_clone(&aio_req->iocb, iocb, real.file);
+ aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+ ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
+ if (ret != -EIOCBQUEUED)
+ ovl_aio_cleanup_handler(aio_req);
+ }
+out:
revert_creds(old_cred);
-
- /* Update size */
- ovl_copyattr(ovl_inode_real(inode), inode);
-
fdput(real);
out_unlock:
@@ -291,6 +365,48 @@ out_unlock:
return ret;
}
+static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ ssize_t ret;
+ struct fd real;
+ const struct cred *old_cred;
+
+ ret = ovl_real_fdget(in, &real);
+ if (ret)
+ return ret;
+
+ old_cred = ovl_override_creds(file_inode(in)->i_sb);
+ ret = generic_file_splice_read(real.file, ppos, pipe, len, flags);
+ revert_creds(old_cred);
+
+ ovl_file_accessed(in);
+ fdput(real);
+ return ret;
+}
+
+static ssize_t
+ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+{
+ struct fd real;
+ const struct cred *old_cred;
+ ssize_t ret;
+
+ ret = ovl_real_fdget(out, &real);
+ if (ret)
+ return ret;
+
+ old_cred = ovl_override_creds(file_inode(out)->i_sb);
+ ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
+ revert_creds(old_cred);
+
+ ovl_file_accessed(out);
+ fdput(real);
+ return ret;
+}
+
static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct fd real;
@@ -647,7 +763,25 @@ const struct file_operations ovl_file_operations = {
.fadvise = ovl_fadvise,
.unlocked_ioctl = ovl_ioctl,
.compat_ioctl = ovl_compat_ioctl,
+ .splice_read = ovl_splice_read,
+ .splice_write = ovl_splice_write,
.copy_file_range = ovl_copy_file_range,
.remap_file_range = ovl_remap_file_range,
};
+
+int __init ovl_aio_request_cache_init(void)
+{
+ ovl_aio_request_cachep = kmem_cache_create("ovl_aio_req",
+ sizeof(struct ovl_aio_req),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!ovl_aio_request_cachep)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ovl_aio_request_cache_destroy(void)
+{
+ kmem_cache_destroy(ovl_aio_request_cachep);
+}
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b045cf1826fc..79e8994e3bc1 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -75,10 +75,9 @@ out:
return err;
}
-static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
- struct ovl_layer *lower_layer)
+static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
{
- bool samefs = ovl_same_sb(dentry->d_sb);
+ bool samefs = ovl_same_fs(dentry->d_sb);
unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
if (samefs) {
@@ -100,12 +99,10 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
* persistent for a given layer configuration.
*/
if (stat->ino >> shift) {
- pr_warn_ratelimited("overlayfs: inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
+ pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
dentry, stat->ino, xinobits);
} else {
- if (lower_layer)
- stat->ino |= ((u64)lower_layer->fsid) << shift;
-
+ stat->ino |= ((u64)fsid) << shift;
stat->dev = dentry->d_sb->s_dev;
return 0;
}
@@ -124,15 +121,14 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
*/
stat->dev = dentry->d_sb->s_dev;
stat->ino = dentry->d_inode->i_ino;
- } else if (lower_layer && lower_layer->fsid) {
+ } else {
/*
* For non-samefs setup, if we cannot map all layers st_ino
* to a unified address space, we need to make sure that st_dev
- * is unique per lower fs. Upper layer uses real st_dev and
- * lower layers use the unique anonymous bdev assigned to the
- * lower fs.
+ * is unique per underlying fs, so we use the unique anonymous
+ * bdev assigned to the underlying fs.
*/
- stat->dev = lower_layer->fs->pseudo_dev;
+ stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
}
return 0;
@@ -146,8 +142,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
struct path realpath;
const struct cred *old_cred;
bool is_dir = S_ISDIR(dentry->d_inode->i_mode);
- bool samefs = ovl_same_sb(dentry->d_sb);
- struct ovl_layer *lower_layer = NULL;
+ int fsid = 0;
int err;
bool metacopy_blocks = false;
@@ -168,9 +163,9 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
* If lower filesystem supports NFS file handles, this also guaranties
* persistent st_ino across mount cycle.
*/
- if (!is_dir || samefs || ovl_xino_bits(dentry->d_sb)) {
+ if (!is_dir || ovl_same_dev(dentry->d_sb)) {
if (!OVL_TYPE_UPPER(type)) {
- lower_layer = ovl_layer_lower(dentry);
+ fsid = ovl_layer_lower(dentry)->fsid;
} else if (OVL_TYPE_ORIGIN(type)) {
struct kstat lowerstat;
u32 lowermask = STATX_INO | STATX_BLOCKS |
@@ -200,14 +195,8 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
(!ovl_verify_lower(dentry->d_sb) &&
(is_dir || lowerstat.nlink == 1))) {
- lower_layer = ovl_layer_lower(dentry);
- /*
- * Cannot use origin st_dev;st_ino because
- * origin inode content may differ from overlay
- * inode content.
- */
- if (samefs || lower_layer->fsid)
- stat->ino = lowerstat.ino;
+ fsid = ovl_layer_lower(dentry)->fsid;
+ stat->ino = lowerstat.ino;
}
/*
@@ -241,7 +230,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
}
}
- err = ovl_map_dev_ino(dentry, stat, lower_layer);
+ err = ovl_map_dev_ino(dentry, stat, fsid);
if (err)
goto out;
@@ -527,6 +516,27 @@ static const struct address_space_operations ovl_aops = {
* [...] &ovl_i_mutex_dir_key[depth] (stack_depth=2)
* [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
* [...] &type->i_mutex_dir_key (stack_depth=0)
+ *
+ * Locking order w.r.t ovl_want_write() is important for nested overlayfs.
+ *
+ * This chain is valid:
+ * - inode->i_rwsem (inode_lock[2])
+ * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0])
+ * - OVL_I(inode)->lock (ovl_inode_lock[2])
+ * - OVL_I(lowerinode)->lock (ovl_inode_lock[1])
+ *
+ * And this chain is valid:
+ * - inode->i_rwsem (inode_lock[2])
+ * - OVL_I(inode)->lock (ovl_inode_lock[2])
+ * - lowerinode->i_rwsem (inode_lock[1])
+ * - OVL_I(lowerinode)->lock (ovl_inode_lock[1])
+ *
+ * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is
+ * held, because it is in reverse order of the non-nested case using the same
+ * upper fs:
+ * - inode->i_rwsem (inode_lock[1])
+ * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0])
+ * - OVL_I(inode)->lock (ovl_inode_lock[1])
*/
#define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
@@ -565,7 +575,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
* ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
* upper inode i_ino on ovl_inode_init() or ovl_inode_update().
*/
- if (ovl_same_sb(inode->i_sb) || xinobits) {
+ if (ovl_same_dev(inode->i_sb)) {
inode->i_ino = ino;
if (xinobits && fsid && !(ino >> (64 - xinobits)))
inode->i_ino |= (unsigned long)fsid << (64 - xinobits);
@@ -698,7 +708,7 @@ unsigned int ovl_get_nlink(struct dentry *lowerdentry,
return nlink;
fail:
- pr_warn_ratelimited("overlayfs: failed to get index nlink (%pd2, err=%i)\n",
+ pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n",
upperdentry, err);
return fallback;
}
@@ -969,7 +979,7 @@ out:
return inode;
out_err:
- pr_warn_ratelimited("overlayfs: failed to get inode (%i)\n", err);
+ pr_warn_ratelimited("failed to get inode (%i)\n", err);
inode = ERR_PTR(err);
goto out;
}
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 76ff66339173..ed9e129fae04 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -141,10 +141,10 @@ out:
return NULL;
fail:
- pr_warn_ratelimited("overlayfs: failed to get origin (%i)\n", res);
+ pr_warn_ratelimited("failed to get origin (%i)\n", res);
goto out;
invalid:
- pr_warn_ratelimited("overlayfs: invalid origin (%*phN)\n", res, fh);
+ pr_warn_ratelimited("invalid origin (%*phN)\n", res, fh);
goto out;
}
@@ -322,16 +322,16 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
struct dentry *origin = NULL;
int i;
- for (i = 0; i < ofs->numlower; i++) {
+ for (i = 1; i < ofs->numlayer; i++) {
/*
* If lower fs uuid is not unique among lower fs we cannot match
* fh->uuid to layer.
*/
- if (ofs->lower_layers[i].fsid &&
- ofs->lower_layers[i].fs->bad_uuid)
+ if (ofs->layers[i].fsid &&
+ ofs->layers[i].fs->bad_uuid)
continue;
- origin = ovl_decode_real_fh(fh, ofs->lower_layers[i].mnt,
+ origin = ovl_decode_real_fh(fh, ofs->layers[i].mnt,
connected);
if (origin)
break;
@@ -354,13 +354,13 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
}
**stackp = (struct ovl_path){
.dentry = origin,
- .layer = &ofs->lower_layers[i]
+ .layer = &ofs->layers[i]
};
return 0;
invalid:
- pr_warn_ratelimited("overlayfs: invalid origin (%pd2, ftype=%x, origin ftype=%x).\n",
+ pr_warn_ratelimited("invalid origin (%pd2, ftype=%x, origin ftype=%x).\n",
upperdentry, d_inode(upperdentry)->i_mode & S_IFMT,
d_inode(origin)->i_mode & S_IFMT);
dput(origin);
@@ -449,7 +449,7 @@ out:
fail:
inode = d_inode(real);
- pr_warn_ratelimited("overlayfs: failed to verify %s (%pd2, ino=%lu, err=%i)\n",
+ pr_warn_ratelimited("failed to verify %s (%pd2, ino=%lu, err=%i)\n",
is_upper ? "upper" : "origin", real,
inode ? inode->i_ino : 0, err);
goto out;
@@ -475,7 +475,7 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index)
return upper ?: ERR_PTR(-ESTALE);
if (!d_is_dir(upper)) {
- pr_warn_ratelimited("overlayfs: invalid index upper (%pd2, upper=%pd2).\n",
+ pr_warn_ratelimited("invalid index upper (%pd2, upper=%pd2).\n",
index, upper);
dput(upper);
return ERR_PTR(-EIO);
@@ -589,12 +589,12 @@ out:
return err;
fail:
- pr_warn_ratelimited("overlayfs: failed to verify index (%pd2, ftype=%x, err=%i)\n",
+ pr_warn_ratelimited("failed to verify index (%pd2, ftype=%x, err=%i)\n",
index, d_inode(index)->i_mode & S_IFMT, err);
goto out;
orphan:
- pr_warn_ratelimited("overlayfs: orphan index entry (%pd2, ftype=%x, nlink=%u)\n",
+ pr_warn_ratelimited("orphan index entry (%pd2, ftype=%x, nlink=%u)\n",
index, d_inode(index)->i_mode & S_IFMT,
d_inode(index)->i_nlink);
err = -ENOENT;
@@ -696,7 +696,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
index = NULL;
goto out;
}
- pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
+ pr_warn_ratelimited("failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
"overlayfs: mount with '-o index=off' to disable inodes index.\n",
d_inode(origin)->i_ino, name.len, name.name,
err);
@@ -723,13 +723,13 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
* unlinked, which means that finding a lower origin on lookup
* whose index is a whiteout should be treated as an error.
*/
- pr_warn_ratelimited("overlayfs: bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n",
+ pr_warn_ratelimited("bad index found (index=%pd2, ftype=%x, origin ftype=%x).\n",
index, d_inode(index)->i_mode & S_IFMT,
d_inode(origin)->i_mode & S_IFMT);
goto fail;
} else if (is_dir && verify) {
if (!upper) {
- pr_warn_ratelimited("overlayfs: suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n",
+ pr_warn_ratelimited("suspected uncovered redirected dir found (origin=%pd2, index=%pd2).\n",
origin, index);
goto fail;
}
@@ -738,7 +738,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
err = ovl_verify_upper(index, upper, false);
if (err) {
if (err == -ESTALE) {
- pr_warn_ratelimited("overlayfs: suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n",
+ pr_warn_ratelimited("suspected multiply redirected dir found (upper=%pd2, origin=%pd2, index=%pd2).\n",
upper, origin, index);
}
goto fail;
@@ -885,7 +885,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
if (!d.stop && poe->numlower) {
err = -ENOMEM;
- stack = kcalloc(ofs->numlower, sizeof(struct ovl_path),
+ stack = kcalloc(ofs->numlayer - 1, sizeof(struct ovl_path),
GFP_KERNEL);
if (!stack)
goto out_put_upper;
@@ -967,7 +967,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
*/
err = -EPERM;
if (d.redirect && !ofs->config.redirect_follow) {
- pr_warn_ratelimited("overlayfs: refusing to follow redirect for (%pd2)\n",
+ pr_warn_ratelimited("refusing to follow redirect for (%pd2)\n",
dentry);
goto out_put;
}
@@ -994,7 +994,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
err = -EPERM;
if (!ofs->config.metacopy) {
- pr_warn_ratelimited("overlay: refusing to follow metacopy origin for (%pd2)\n",
+ pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n",
dentry);
goto out_put;
}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index f283b1d69a9e..3623d28aa4fa 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -9,6 +9,9 @@
#include <linux/fs.h>
#include "ovl_entry.h"
+#undef pr_fmt
+#define pr_fmt(fmt) "overlayfs: " fmt
+
enum ovl_path_type {
__OVL_PATH_UPPER = (1 << 0),
__OVL_PATH_MERGE = (1 << 1),
@@ -221,7 +224,6 @@ int ovl_want_write(struct dentry *dentry);
void ovl_drop_write(struct dentry *dentry);
struct dentry *ovl_workdir(struct dentry *dentry);
const struct cred *ovl_override_creds(struct super_block *sb);
-struct super_block *ovl_same_sb(struct super_block *sb);
int ovl_can_decode_fh(struct super_block *sb);
struct dentry *ovl_indexdir(struct super_block *sb);
bool ovl_index_all(struct super_block *sb);
@@ -237,7 +239,7 @@ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
struct dentry *ovl_dentry_upper(struct dentry *dentry);
struct dentry *ovl_dentry_lower(struct dentry *dentry);
struct dentry *ovl_dentry_lowerdata(struct dentry *dentry);
-struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
+const struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
struct dentry *ovl_dentry_real(struct dentry *dentry);
struct dentry *ovl_i_dentry_upper(struct inode *inode);
struct inode *ovl_inode_upper(struct inode *inode);
@@ -299,11 +301,21 @@ static inline bool ovl_is_impuredir(struct dentry *dentry)
return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE);
}
-static inline unsigned int ovl_xino_bits(struct super_block *sb)
+/* All layers on same fs? */
+static inline bool ovl_same_fs(struct super_block *sb)
+{
+ return OVL_FS(sb)->xino_mode == 0;
+}
+
+/* All overlay inodes have same st_dev? */
+static inline bool ovl_same_dev(struct super_block *sb)
{
- struct ovl_fs *ofs = sb->s_fs_info;
+ return OVL_FS(sb)->xino_mode >= 0;
+}
- return ofs->xino_bits;
+static inline unsigned int ovl_xino_bits(struct super_block *sb)
+{
+ return ovl_same_dev(sb) ? OVL_FS(sb)->xino_mode : 0;
}
static inline int ovl_inode_lock(struct inode *inode)
@@ -438,6 +450,8 @@ struct dentry *ovl_create_temp(struct dentry *workdir, struct ovl_cattr *attr);
/* file.c */
extern const struct file_operations ovl_file_operations;
+int __init ovl_aio_request_cache_init(void);
+void ovl_aio_request_cache_destroy(void);
/* copy_up.c */
int ovl_copy_up(struct dentry *dentry);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 28348c44ea5b..89015ea822e7 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -24,6 +24,8 @@ struct ovl_sb {
dev_t pseudo_dev;
/* Unusable (conflicting) uuid */
bool bad_uuid;
+ /* Used as a lower layer (but maybe also as upper) */
+ bool is_lower;
};
struct ovl_layer {
@@ -38,18 +40,18 @@ struct ovl_layer {
};
struct ovl_path {
- struct ovl_layer *layer;
+ const struct ovl_layer *layer;
struct dentry *dentry;
};
/* private information held for overlayfs's superblock */
struct ovl_fs {
struct vfsmount *upper_mnt;
- unsigned int numlower;
- /* Number of unique lower sb that differ from upper sb */
- unsigned int numlowerfs;
- struct ovl_layer *lower_layers;
- struct ovl_sb *lower_fs;
+ unsigned int numlayer;
+ /* Number of unique fs among layers including upper fs */
+ unsigned int numfs;
+ const struct ovl_layer *layers;
+ struct ovl_sb *fs;
/* workbasedir is the path at workdir= mount option */
struct dentry *workbasedir;
/* workdir is the 'work' directory under workbasedir */
@@ -71,10 +73,15 @@ struct ovl_fs {
struct inode *workbasedir_trap;
struct inode *workdir_trap;
struct inode *indexdir_trap;
- /* Inode numbers in all layers do not use the high xino_bits */
- unsigned int xino_bits;
+ /* -1: disabled, 0: same fs, 1..32: number of unused ino bits */
+ int xino_mode;
};
+static inline struct ovl_fs *OVL_FS(struct super_block *sb)
+{
+ return (struct ovl_fs *)sb->s_fs_info;
+}
+
/* private information held for every overlayfs dentry */
struct ovl_entry {
union {
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 47a91c9733a5..40ac9ce2465a 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -441,7 +441,7 @@ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
const char *name, int namelen)
{
if (ino >> (64 - xinobits)) {
- pr_warn_ratelimited("overlayfs: d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
+ pr_warn_ratelimited("d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
namelen, name, ino, xinobits);
return ino;
}
@@ -469,7 +469,7 @@ static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p)
int xinobits = ovl_xino_bits(dir->d_sb);
int err = 0;
- if (!ovl_same_sb(dir->d_sb) && !xinobits)
+ if (!ovl_same_dev(dir->d_sb))
goto out;
if (p->name[0] == '.') {
@@ -504,7 +504,13 @@ get:
if (err)
goto fail;
- WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
+ /*
+ * Directory inode is always on overlay st_dev.
+ * Non-dir with ovl_same_dev() could be on pseudo st_dev in case
+ * of xino bits overflow.
+ */
+ WARN_ON_ONCE(S_ISDIR(stat.mode) &&
+ dir->d_sb->s_dev != stat.dev);
ino = stat.ino;
} else if (xinobits && !OVL_TYPE_UPPER(type)) {
ino = ovl_remap_lower_ino(ino, xinobits,
@@ -518,7 +524,7 @@ out:
return err;
fail:
- pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n",
+ pr_warn_ratelimited("failed to look up (%s) for ino (%i)\n",
p->name, err);
goto out;
}
@@ -685,7 +691,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
int err;
struct ovl_dir_file *od = file->private_data;
struct dentry *dir = file->f_path.dentry;
- struct ovl_layer *lower_layer = ovl_layer_lower(dir);
+ const struct ovl_layer *lower_layer = ovl_layer_lower(dir);
struct ovl_readdir_translate rdt = {
.ctx.actor = ovl_fill_real,
.orig_ctx = ctx,
@@ -738,7 +744,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
* entries.
*/
if (ovl_xino_bits(dentry->d_sb) ||
- (ovl_same_sb(dentry->d_sb) &&
+ (ovl_same_fs(dentry->d_sb) &&
(ovl_is_impure_dir(file) ||
OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
return ovl_iterate_real(file, ctx);
@@ -965,7 +971,7 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
dentry = lookup_one_len(p->name, upper, p->len);
if (IS_ERR(dentry)) {
- pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
+ pr_err("lookup '%s/%.*s' failed (%i)\n",
upper->d_name.name, p->len, p->name,
(int) PTR_ERR(dentry));
continue;
@@ -1147,6 +1153,6 @@ next:
out:
ovl_cache_free(&list);
if (err)
- pr_err("overlayfs: failed index dir cleanup (%i)\n", err);
+ pr_err("failed index dir cleanup (%i)\n", err);
return err;
}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 7621ff176d15..319fe0d355b0 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -224,14 +224,14 @@ static void ovl_free_fs(struct ovl_fs *ofs)
if (ofs->upperdir_locked)
ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
mntput(ofs->upper_mnt);
- for (i = 0; i < ofs->numlower; i++) {
- iput(ofs->lower_layers[i].trap);
- mntput(ofs->lower_layers[i].mnt);
+ for (i = 1; i < ofs->numlayer; i++) {
+ iput(ofs->layers[i].trap);
+ mntput(ofs->layers[i].mnt);
}
- for (i = 0; i < ofs->numlowerfs; i++)
- free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
- kfree(ofs->lower_layers);
- kfree(ofs->lower_fs);
+ kfree(ofs->layers);
+ for (i = 0; i < ofs->numfs; i++)
+ free_anon_bdev(ofs->fs[i].pseudo_dev);
+ kfree(ofs->fs);
kfree(ofs->config.lowerdir);
kfree(ofs->config.upperdir);
@@ -358,7 +358,7 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
if (ofs->config.nfs_export != ovl_nfs_export_def)
seq_printf(m, ",nfs_export=%s", ofs->config.nfs_export ?
"on" : "off");
- if (ofs->config.xino != ovl_xino_def())
+ if (ofs->config.xino != ovl_xino_def() && !ovl_same_fs(sb))
seq_printf(m, ",xino=%s", ovl_xino_str[ofs->config.xino]);
if (ofs->config.metacopy != ovl_metacopy_def)
seq_printf(m, ",metacopy=%s",
@@ -462,7 +462,7 @@ static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode)
if (ovl_redirect_always_follow)
config->redirect_follow = true;
} else if (strcmp(mode, "nofollow") != 0) {
- pr_err("overlayfs: bad mount option \"redirect_dir=%s\"\n",
+ pr_err("bad mount option \"redirect_dir=%s\"\n",
mode);
return -EINVAL;
}
@@ -560,14 +560,15 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
break;
default:
- pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
+ pr_err("unrecognized mount option \"%s\" or missing value\n",
+ p);
return -EINVAL;
}
}
/* Workdir is useless in non-upper mount */
if (!config->upperdir && config->workdir) {
- pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
+ pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n",
config->workdir);
kfree(config->workdir);
config->workdir = NULL;
@@ -587,7 +588,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
/* Resolve metacopy -> redirect_dir dependency */
if (config->metacopy && !config->redirect_dir) {
if (metacopy_opt && redirect_opt) {
- pr_err("overlayfs: conflicting options: metacopy=on,redirect_dir=%s\n",
+ pr_err("conflicting options: metacopy=on,redirect_dir=%s\n",
config->redirect_mode);
return -EINVAL;
}
@@ -596,7 +597,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
* There was an explicit redirect_dir=... that resulted
* in this conflict.
*/
- pr_info("overlayfs: disabling metacopy due to redirect_dir=%s\n",
+ pr_info("disabling metacopy due to redirect_dir=%s\n",
config->redirect_mode);
config->metacopy = false;
} else {
@@ -692,7 +693,7 @@ out_unlock:
out_dput:
dput(work);
out_err:
- pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
+ pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n",
ofs->config.workdir, name, -err);
work = NULL;
goto out_unlock;
@@ -716,21 +717,21 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
int err = -EINVAL;
if (!*name) {
- pr_err("overlayfs: empty lowerdir\n");
+ pr_err("empty lowerdir\n");
goto out;
}
err = kern_path(name, LOOKUP_FOLLOW, path);
if (err) {
- pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+ pr_err("failed to resolve '%s': %i\n", name, err);
goto out;
}
err = -EINVAL;
if (ovl_dentry_weird(path->dentry)) {
- pr_err("overlayfs: filesystem on '%s' not supported\n", name);
+ pr_err("filesystem on '%s' not supported\n", name);
goto out_put;
}
if (!d_is_dir(path->dentry)) {
- pr_err("overlayfs: '%s' not a directory\n", name);
+ pr_err("'%s' not a directory\n", name);
goto out_put;
}
return 0;
@@ -752,7 +753,7 @@ static int ovl_mount_dir(const char *name, struct path *path)
if (!err)
if (ovl_dentry_remote(path->dentry)) {
- pr_err("overlayfs: filesystem on '%s' not supported as upperdir\n",
+ pr_err("filesystem on '%s' not supported as upperdir\n",
tmp);
path_put_init(path);
err = -EINVAL;
@@ -769,7 +770,7 @@ static int ovl_check_namelen(struct path *path, struct ovl_fs *ofs,
int err = vfs_statfs(path, &statfs);
if (err)
- pr_err("overlayfs: statfs failed on '%s'\n", name);
+ pr_err("statfs failed on '%s'\n", name);
else
ofs->namelen = max(ofs->namelen, statfs.f_namelen);
@@ -804,13 +805,13 @@ static int ovl_lower_dir(const char *name, struct path *path,
(ofs->config.index && ofs->config.upperdir)) && !fh_type) {
ofs->config.index = false;
ofs->config.nfs_export = false;
- pr_warn("overlayfs: fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n",
+ pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n",
name);
}
/* Check if lower fs has 32bit inode numbers */
if (fh_type != FILEID_INO32_GEN)
- ofs->xino_bits = 0;
+ ofs->xino_mode = -1;
return 0;
@@ -996,7 +997,7 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
err = PTR_ERR_OR_ZERO(trap);
if (err) {
if (err == -ELOOP)
- pr_err("overlayfs: conflicting %s path\n", name);
+ pr_err("conflicting %s path\n", name);
return err;
}
@@ -1013,11 +1014,11 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
{
if (ofs->config.index) {
- pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
+ pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
name);
return -EBUSY;
} else {
- pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
+ pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
name);
return 0;
}
@@ -1035,7 +1036,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
/* Upper fs should not be r/o */
if (sb_rdonly(upperpath->mnt->mnt_sb)) {
- pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n");
+ pr_err("upper fs is r/o, try multi-lower layers mount\n");
err = -EINVAL;
goto out;
}
@@ -1052,7 +1053,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
upper_mnt = clone_private_mount(upperpath);
err = PTR_ERR(upper_mnt);
if (IS_ERR(upper_mnt)) {
- pr_err("overlayfs: failed to clone upperpath\n");
+ pr_err("failed to clone upperpath\n");
goto out;
}
@@ -1108,7 +1109,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
* kernel upgrade. So warn instead of erroring out.
*/
if (!err)
- pr_warn("overlayfs: upper fs needs to support d_type.\n");
+ pr_warn("upper fs needs to support d_type.\n");
/* Check if upper/work fs supports O_TMPFILE */
temp = ovl_do_tmpfile(ofs->workdir, S_IFREG | 0);
@@ -1116,7 +1117,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
if (ofs->tmpfile)
dput(temp);
else
- pr_warn("overlayfs: upper fs does not support tmpfile.\n");
+ pr_warn("upper fs does not support tmpfile.\n");
/*
* Check if upper/work fs supports trusted.overlay.* xattr
@@ -1126,7 +1127,7 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
ofs->noxattr = true;
ofs->config.index = false;
ofs->config.metacopy = false;
- pr_warn("overlayfs: upper fs does not support xattr, falling back to index=off and metacopy=off.\n");
+ pr_warn("upper fs does not support xattr, falling back to index=off and metacopy=off.\n");
err = 0;
} else {
vfs_removexattr(ofs->workdir, OVL_XATTR_OPAQUE);
@@ -1136,16 +1137,16 @@ static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
fh_type = ovl_can_decode_fh(ofs->workdir->d_sb);
if (ofs->config.index && !fh_type) {
ofs->config.index = false;
- pr_warn("overlayfs: upper fs does not support file handles, falling back to index=off.\n");
+ pr_warn("upper fs does not support file handles, falling back to index=off.\n");
}
/* Check if upper fs has 32bit inode numbers */
if (fh_type != FILEID_INO32_GEN)
- ofs->xino_bits = 0;
+ ofs->xino_mode = -1;
/* NFS export of r/w mount depends on index */
if (ofs->config.nfs_export && !ofs->config.index) {
- pr_warn("overlayfs: NFS export requires \"index=on\", falling back to nfs_export=off.\n");
+ pr_warn("NFS export requires \"index=on\", falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
out:
@@ -1165,11 +1166,11 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
err = -EINVAL;
if (upperpath->mnt != workpath.mnt) {
- pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
+ pr_err("workdir and upperdir must reside under the same mount\n");
goto out;
}
if (!ovl_workdir_ok(workpath.dentry, upperpath->dentry)) {
- pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
+ pr_err("workdir and upperdir must be separate subtrees\n");
goto out;
}
@@ -1210,7 +1211,7 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
err = ovl_verify_origin(upperpath->dentry, oe->lowerstack[0].dentry,
true);
if (err) {
- pr_err("overlayfs: failed to verify upper root origin\n");
+ pr_err("failed to verify upper root origin\n");
goto out;
}
@@ -1233,18 +1234,18 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
err = ovl_verify_set_fh(ofs->indexdir, OVL_XATTR_ORIGIN,
upperpath->dentry, true, false);
if (err)
- pr_err("overlayfs: failed to verify index dir 'origin' xattr\n");
+ pr_err("failed to verify index dir 'origin' xattr\n");
}
err = ovl_verify_upper(ofs->indexdir, upperpath->dentry, true);
if (err)
- pr_err("overlayfs: failed to verify index dir 'upper' xattr\n");
+ pr_err("failed to verify index dir 'upper' xattr\n");
/* Cleanup bad/stale/orphan index entries */
if (!err)
err = ovl_indexdir_cleanup(ofs);
}
if (err || !ofs->indexdir)
- pr_warn("overlayfs: try deleting index dir or mounting with '-o index=off' to disable inodes index.\n");
+ pr_warn("try deleting index dir or mounting with '-o index=off' to disable inodes index.\n");
out:
mnt_drop_write(mnt);
@@ -1258,7 +1259,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
if (!ofs->config.nfs_export && !ofs->upper_mnt)
return true;
- for (i = 0; i < ofs->numlowerfs; i++) {
+ for (i = 0; i < ofs->numfs; i++) {
/*
* We use uuid to associate an overlay lower file handle with a
* lower layer, so we can accept lower fs with null uuid as long
@@ -1266,8 +1267,9 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
* if we detect multiple lower fs with the same uuid, we
* disable lower file handle decoding on all of them.
*/
- if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) {
- ofs->lower_fs[i].bad_uuid = true;
+ if (ofs->fs[i].is_lower &&
+ uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) {
+ ofs->fs[i].bad_uuid = true;
return false;
}
}
@@ -1283,13 +1285,9 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
int err;
bool bad_uuid = false;
- /* fsid 0 is reserved for upper fs even with non upper overlay */
- if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb)
- return 0;
-
- for (i = 0; i < ofs->numlowerfs; i++) {
- if (ofs->lower_fs[i].sb == sb)
- return i + 1;
+ for (i = 0; i < ofs->numfs; i++) {
+ if (ofs->fs[i].sb == sb)
+ return i;
}
if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
@@ -1297,7 +1295,7 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
if (ofs->config.index || ofs->config.nfs_export) {
ofs->config.index = false;
ofs->config.nfs_export = false;
- pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
+ pr_warn("%s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
uuid_is_null(&sb->s_uuid) ? "null" :
"conflicting",
path->dentry);
@@ -1306,35 +1304,59 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
err = get_anon_bdev(&dev);
if (err) {
- pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n");
+ pr_err("failed to get anonymous bdev for lowerpath\n");
return err;
}
- ofs->lower_fs[ofs->numlowerfs].sb = sb;
- ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev;
- ofs->lower_fs[ofs->numlowerfs].bad_uuid = bad_uuid;
- ofs->numlowerfs++;
+ ofs->fs[ofs->numfs].sb = sb;
+ ofs->fs[ofs->numfs].pseudo_dev = dev;
+ ofs->fs[ofs->numfs].bad_uuid = bad_uuid;
- return ofs->numlowerfs;
+ return ofs->numfs++;
}
-static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
- struct path *stack, unsigned int numlower)
+static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
+ struct path *stack, unsigned int numlower)
{
int err;
unsigned int i;
+ struct ovl_layer *layers;
err = -ENOMEM;
- ofs->lower_layers = kcalloc(numlower, sizeof(struct ovl_layer),
- GFP_KERNEL);
- if (ofs->lower_layers == NULL)
+ layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL);
+ if (!layers)
goto out;
+ ofs->layers = layers;
- ofs->lower_fs = kcalloc(numlower, sizeof(struct ovl_sb),
- GFP_KERNEL);
- if (ofs->lower_fs == NULL)
+ ofs->fs = kcalloc(numlower + 1, sizeof(struct ovl_sb), GFP_KERNEL);
+ if (ofs->fs == NULL)
goto out;
+ /* idx/fsid 0 are reserved for upper fs even with lower only overlay */
+ ofs->numfs++;
+
+ layers[0].mnt = ofs->upper_mnt;
+ layers[0].idx = 0;
+ layers[0].fsid = 0;
+ ofs->numlayer = 1;
+
+ /*
+ * All lower layers that share the same fs as upper layer, use the same
+ * pseudo_dev as upper layer. Allocate fs[0].pseudo_dev even for lower
+ * only overlay to simplify ovl_fs_free().
+ * is_lower will be set if upper fs is shared with a lower layer.
+ */
+ err = get_anon_bdev(&ofs->fs[0].pseudo_dev);
+ if (err) {
+ pr_err("failed to get anonymous bdev for upper fs\n");
+ goto out;
+ }
+
+ if (ofs->upper_mnt) {
+ ofs->fs[0].sb = ofs->upper_mnt->mnt_sb;
+ ofs->fs[0].is_lower = false;
+ }
+
for (i = 0; i < numlower; i++) {
struct vfsmount *mnt;
struct inode *trap;
@@ -1357,7 +1379,7 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
mnt = clone_private_mount(&stack[i]);
err = PTR_ERR(mnt);
if (IS_ERR(mnt)) {
- pr_err("overlayfs: failed to clone lowerpath\n");
+ pr_err("failed to clone lowerpath\n");
iput(trap);
goto out;
}
@@ -1368,15 +1390,13 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
*/
mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
- ofs->lower_layers[ofs->numlower].trap = trap;
- ofs->lower_layers[ofs->numlower].mnt = mnt;
- ofs->lower_layers[ofs->numlower].idx = i + 1;
- ofs->lower_layers[ofs->numlower].fsid = fsid;
- if (fsid) {
- ofs->lower_layers[ofs->numlower].fs =
- &ofs->lower_fs[fsid - 1];
- }
- ofs->numlower++;
+ layers[ofs->numlayer].trap = trap;
+ layers[ofs->numlayer].mnt = mnt;
+ layers[ofs->numlayer].idx = ofs->numlayer;
+ layers[ofs->numlayer].fsid = fsid;
+ layers[ofs->numlayer].fs = &ofs->fs[fsid];
+ ofs->numlayer++;
+ ofs->fs[fsid].is_lower = true;
}
/*
@@ -1387,22 +1407,23 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
* bits reserved for fsid, it emits a warning and uses the original
* inode number.
*/
- if (!ofs->numlowerfs || (ofs->numlowerfs == 1 && !ofs->upper_mnt)) {
- ofs->xino_bits = 0;
- ofs->config.xino = OVL_XINO_OFF;
- } else if (ofs->config.xino == OVL_XINO_ON && !ofs->xino_bits) {
+ if (ofs->numfs - !ofs->upper_mnt == 1) {
+ if (ofs->config.xino == OVL_XINO_ON)
+ pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
+ ofs->xino_mode = 0;
+ } else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) {
/*
- * This is a roundup of number of bits needed for numlowerfs+1
- * (i.e. ilog2(numlowerfs+1 - 1) + 1). fsid 0 is reserved for
- * upper fs even with non upper overlay.
+ * This is a roundup of number of bits needed for encoding
+ * fsid, where fsid 0 is reserved for upper fs even with
+ * lower only overlay.
*/
BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 31);
- ofs->xino_bits = ilog2(ofs->numlowerfs) + 1;
+ ofs->xino_mode = ilog2(ofs->numfs - 1) + 1;
}
- if (ofs->xino_bits) {
- pr_info("overlayfs: \"xino\" feature enabled using %d upper inode bits.\n",
- ofs->xino_bits);
+ if (ofs->xino_mode > 0) {
+ pr_info("\"xino\" feature enabled using %d upper inode bits.\n",
+ ofs->xino_mode);
}
err = 0;
@@ -1428,15 +1449,15 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
err = -EINVAL;
stacklen = ovl_split_lowerdirs(lowertmp);
if (stacklen > OVL_MAX_STACK) {
- pr_err("overlayfs: too many lower directories, limit is %d\n",
+ pr_err("too many lower directories, limit is %d\n",
OVL_MAX_STACK);
goto out_err;
} else if (!ofs->config.upperdir && stacklen == 1) {
- pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n");
+ pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n");
goto out_err;
} else if (!ofs->config.upperdir && ofs->config.nfs_export &&
ofs->config.redirect_follow) {
- pr_warn("overlayfs: NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
+ pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
@@ -1459,11 +1480,11 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
err = -EINVAL;
sb->s_stack_depth++;
if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
- pr_err("overlayfs: maximum fs stacking depth exceeded\n");
+ pr_err("maximum fs stacking depth exceeded\n");
goto out_err;
}
- err = ovl_get_lower_layers(sb, ofs, stack, numlower);
+ err = ovl_get_layers(sb, ofs, stack, numlower);
if (err)
goto out_err;
@@ -1474,7 +1495,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
for (i = 0; i < numlower; i++) {
oe->lowerstack[i].dentry = dget(stack[i].dentry);
- oe->lowerstack[i].layer = &ofs->lower_layers[i];
+ oe->lowerstack[i].layer = &ofs->layers[i+1];
}
if (remote)
@@ -1515,7 +1536,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
while (!err && parent != next) {
if (ovl_lookup_trap_inode(sb, parent)) {
err = -ELOOP;
- pr_err("overlayfs: overlapping %s path\n", name);
+ pr_err("overlapping %s path\n", name);
} else if (ovl_is_inuse(parent)) {
err = ovl_report_in_use(ofs, name);
}
@@ -1555,9 +1576,9 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
return err;
}
- for (i = 0; i < ofs->numlower; i++) {
+ for (i = 1; i < ofs->numlayer; i++) {
err = ovl_check_layer(sb, ofs,
- ofs->lower_layers[i].mnt->mnt_root,
+ ofs->layers[i].mnt->mnt_root,
"lowerdir");
if (err)
return err;
@@ -1595,7 +1616,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
err = -EINVAL;
if (!ofs->config.lowerdir) {
if (!silent)
- pr_err("overlayfs: missing 'lowerdir'\n");
+ pr_err("missing 'lowerdir'\n");
goto out_err;
}
@@ -1603,14 +1624,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_maxbytes = MAX_LFS_FILESIZE;
/* Assume underlaying fs uses 32bit inodes unless proven otherwise */
if (ofs->config.xino != OVL_XINO_OFF)
- ofs->xino_bits = BITS_PER_LONG - 32;
+ ofs->xino_mode = BITS_PER_LONG - 32;
/* alloc/destroy_inode needed for setting up traps in inode cache */
sb->s_op = &ovl_super_operations;
if (ofs->config.upperdir) {
if (!ofs->config.workdir) {
- pr_err("overlayfs: missing 'workdir'\n");
+ pr_err("missing 'workdir'\n");
goto out_err;
}
@@ -1660,13 +1681,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!ofs->indexdir) {
ofs->config.index = false;
if (ofs->upper_mnt && ofs->config.nfs_export) {
- pr_warn("overlayfs: NFS export requires an index dir, falling back to nfs_export=off.\n");
+ pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
}
if (ofs->config.metacopy && ofs->config.nfs_export) {
- pr_warn("overlayfs: NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n");
+ pr_warn("NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
@@ -1749,9 +1770,15 @@ static int __init ovl_init(void)
if (ovl_inode_cachep == NULL)
return -ENOMEM;
- err = register_filesystem(&ovl_fs_type);
- if (err)
- kmem_cache_destroy(ovl_inode_cachep);
+ err = ovl_aio_request_cache_init();
+ if (!err) {
+ err = register_filesystem(&ovl_fs_type);
+ if (!err)
+ return 0;
+
+ ovl_aio_request_cache_destroy();
+ }
+ kmem_cache_destroy(ovl_inode_cachep);
return err;
}
@@ -1766,7 +1793,7 @@ static void __exit ovl_exit(void)
*/
rcu_barrier();
kmem_cache_destroy(ovl_inode_cachep);
-
+ ovl_aio_request_cache_destroy();
}
module_init(ovl_init);
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index f5678a3f8350..ea005085803f 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -40,18 +40,6 @@ const struct cred *ovl_override_creds(struct super_block *sb)
return override_creds(ofs->creator_cred);
}
-struct super_block *ovl_same_sb(struct super_block *sb)
-{
- struct ovl_fs *ofs = sb->s_fs_info;
-
- if (!ofs->numlowerfs)
- return ofs->upper_mnt->mnt_sb;
- else if (ofs->numlowerfs == 1 && !ofs->upper_mnt)
- return ofs->lower_fs[0].sb;
- else
- return NULL;
-}
-
/*
* Check if underlying fs supports file handles and try to determine encoding
* type, in order to deduce maximum inode number used by fs.
@@ -198,7 +186,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry)
return oe->numlower ? oe->lowerstack[0].dentry : NULL;
}
-struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
+const struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
{
struct ovl_entry *oe = dentry->d_fsdata;
@@ -576,7 +564,7 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
err = ovl_do_setxattr(upperdentry, name, value, size, 0);
if (err == -EOPNOTSUPP) {
- pr_warn("overlayfs: cannot set %s xattr on upper\n", name);
+ pr_warn("cannot set %s xattr on upper\n", name);
ofs->noxattr = true;
return xerr;
}
@@ -700,7 +688,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
inode = d_inode(upperdentry);
if (!S_ISDIR(inode->i_mode) && inode->i_nlink != 1) {
- pr_warn_ratelimited("overlayfs: cleanup linked index (%pd2, ino=%lu, nlink=%u)\n",
+ pr_warn_ratelimited("cleanup linked index (%pd2, ino=%lu, nlink=%u)\n",
upperdentry, inode->i_ino, inode->i_nlink);
/*
* We either have a bug with persistent union nlink or a lower
@@ -739,7 +727,7 @@ out:
return;
fail:
- pr_err("overlayfs: cleanup index of '%pd2' failed (%i)\n", dentry, err);
+ pr_err("cleanup index of '%pd2' failed (%i)\n", dentry, err);
goto out;
}
@@ -830,7 +818,7 @@ int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
err_unlock:
unlock_rename(workdir, upperdir);
err:
- pr_err("overlayfs: failed to lock workdir+upperdir\n");
+ pr_err("failed to lock workdir+upperdir\n");
return -EIO;
}
@@ -852,7 +840,7 @@ int ovl_check_metacopy_xattr(struct dentry *dentry)
return 1;
out:
- pr_warn_ratelimited("overlayfs: failed to get metacopy (%i)\n", res);
+ pr_warn_ratelimited("failed to get metacopy (%i)\n", res);
return res;
}
@@ -899,7 +887,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
return res;
fail:
- pr_warn_ratelimited("overlayfs: failed to get xattr %s: err=%zi)\n",
+ pr_warn_ratelimited("failed to get xattr %s: err=%zi)\n",
name, res);
kfree(buf);
return res;
@@ -931,7 +919,7 @@ char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
return buf;
invalid:
- pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
+ pr_warn_ratelimited("invalid redirect (%s)\n", buf);
res = -EINVAL;
kfree(buf);
return ERR_PTR(res);
diff --git a/fs/pipe.c b/fs/pipe.c
index 57502c3c0fba..5a34d6c22d4c 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -108,16 +108,19 @@ void pipe_double_lock(struct pipe_inode_info *pipe1,
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe)
{
- DEFINE_WAIT(wait);
+ DEFINE_WAIT(rdwait);
+ DEFINE_WAIT(wrwait);
/*
* Pipes are system-local resources, so sleeping on them
* is considered a noninteractive wait:
*/
- prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&pipe->wr_wait, &wrwait, TASK_INTERRUPTIBLE);
pipe_unlock(pipe);
schedule();
- finish_wait(&pipe->wait, &wait);
+ finish_wait(&pipe->rd_wait, &rdwait);
+ finish_wait(&pipe->wr_wait, &wrwait);
pipe_lock(pipe);
}
@@ -286,7 +289,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
size_t total_len = iov_iter_count(to);
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
- bool was_full;
+ bool was_full, wake_next_reader = false;
ssize_t ret;
/* Null read succeeds. */
@@ -344,10 +347,10 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
if (!buf->len) {
pipe_buf_release(pipe, buf);
- spin_lock_irq(&pipe->wait.lock);
+ spin_lock_irq(&pipe->rd_wait.lock);
tail++;
pipe->tail = tail;
- spin_unlock_irq(&pipe->wait.lock);
+ spin_unlock_irq(&pipe->rd_wait.lock);
}
total_len -= chars;
if (!total_len)
@@ -384,7 +387,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
* no data.
*/
if (unlikely(was_full)) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
@@ -394,18 +397,23 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
* since we've done any required wakeups and there's no need
* to mark anything accessed. And we've dropped the lock.
*/
- if (wait_event_interruptible(pipe->wait, pipe_readable(pipe)) < 0)
+ if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
return -ERESTARTSYS;
__pipe_lock(pipe);
was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
+ wake_next_reader = true;
}
+ if (pipe_empty(pipe->head, pipe->tail))
+ wake_next_reader = false;
__pipe_unlock(pipe);
if (was_full) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
+ if (wake_next_reader)
+ wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
if (ret > 0)
file_accessed(filp);
return ret;
@@ -437,6 +445,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
size_t total_len = iov_iter_count(from);
ssize_t chars;
bool was_empty = false;
+ bool wake_next_writer = false;
/* Null write succeeds. */
if (unlikely(total_len == 0))
@@ -515,16 +524,16 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
* it, either the reader will consume it or it'll still
* be there for the next write.
*/
- spin_lock_irq(&pipe->wait.lock);
+ spin_lock_irq(&pipe->rd_wait.lock);
head = pipe->head;
if (pipe_full(head, pipe->tail, pipe->max_usage)) {
- spin_unlock_irq(&pipe->wait.lock);
+ spin_unlock_irq(&pipe->rd_wait.lock);
continue;
}
pipe->head = head + 1;
- spin_unlock_irq(&pipe->wait.lock);
+ spin_unlock_irq(&pipe->rd_wait.lock);
/* Insert it into the buffer array */
buf = &pipe->bufs[head & mask];
@@ -576,14 +585,17 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
*/
__pipe_unlock(pipe);
if (was_empty) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
+ wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
- wait_event_interruptible(pipe->wait, pipe_writable(pipe));
+ wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
__pipe_lock(pipe);
was_empty = pipe_empty(pipe->head, pipe->tail);
+ wake_next_writer = true;
}
out:
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ wake_next_writer = false;
__pipe_unlock(pipe);
/*
@@ -596,9 +608,11 @@ out:
* wake up pending jobs
*/
if (was_empty) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
+ wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
+ if (wake_next_writer)
+ wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
int err = file_update_time(filp);
if (err)
@@ -642,12 +656,15 @@ pipe_poll(struct file *filp, poll_table *wait)
unsigned int head, tail;
/*
- * Reading only -- no need for acquiring the semaphore.
+ * Reading pipe state only -- no need for acquiring the semaphore.
*
* But because this is racy, the code has to add the
* entry to the poll table _first_ ..
*/
- poll_wait(filp, &pipe->wait, wait);
+ if (filp->f_mode & FMODE_READ)
+ poll_wait(filp, &pipe->rd_wait, wait);
+ if (filp->f_mode & FMODE_WRITE)
+ poll_wait(filp, &pipe->wr_wait, wait);
/*
* .. and only then can you do the racy tests. That way,
@@ -706,7 +723,8 @@ pipe_release(struct inode *inode, struct file *file)
pipe->writers--;
if (pipe->readers || pipe->writers) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
+ wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLHUP);
+ wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
@@ -789,7 +807,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
GFP_KERNEL_ACCOUNT);
if (pipe->bufs) {
- init_waitqueue_head(&pipe->wait);
+ init_waitqueue_head(&pipe->rd_wait);
+ init_waitqueue_head(&pipe->wr_wait);
pipe->r_counter = pipe->w_counter = 1;
pipe->max_usage = pipe_bufs;
pipe->ring_size = pipe_bufs;
@@ -1007,7 +1026,8 @@ static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
static void wake_up_partner(struct pipe_inode_info *pipe)
{
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible(&pipe->rd_wait);
+ wake_up_interruptible(&pipe->wr_wait);
}
static int fifo_open(struct inode *inode, struct file *filp)
@@ -1118,13 +1138,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
err_rd:
if (!--pipe->readers)
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible(&pipe->wr_wait);
ret = -ERESTARTSYS;
goto err;
err_wr:
if (!--pipe->writers)
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible(&pipe->rd_wait);
ret = -ERESTARTSYS;
goto err;
@@ -1251,7 +1271,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
pipe->max_usage = nr_slots;
pipe->tail = tail;
pipe->head = head;
- wake_up_interruptible_all(&pipe->wait);
+ wake_up_interruptible_all(&pipe->rd_wait);
+ wake_up_interruptible_all(&pipe->wr_wait);
return pipe->max_usage * PAGE_SIZE;
out_revert_acct:
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 733881a6387b..27ef84d99f59 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -103,3 +103,7 @@ config PROC_CHILDREN
config PROC_PID_ARCH_STATUS
def_bool n
depends on PROC_FS
+
+config PROC_CPU_RESCTRL
+ def_bool n
+ depends on PROC_FS
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index ead487e80510..bd08616ed8ba 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -33,3 +33,4 @@ proc-$(CONFIG_PROC_KCORE) += kcore.o
proc-$(CONFIG_PROC_VMCORE) += vmcore.o
proc-$(CONFIG_PRINTK) += kmsg.o
proc-$(CONFIG_PROC_PAGE_MONITOR) += page.o
+proc-$(CONFIG_BOOT_CONFIG) += bootconfig.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ebea9501afb8..c7c64272b0fa 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -94,6 +94,8 @@
#include <linux/sched/debug.h>
#include <linux/sched/stat.h>
#include <linux/posix-timers.h>
+#include <linux/time_namespace.h>
+#include <linux/resctrl.h>
#include <trace/events/oom.h>
#include "internal.h"
#include "fd.h"
@@ -1533,6 +1535,96 @@ static const struct file_operations proc_pid_sched_autogroup_operations = {
#endif /* CONFIG_SCHED_AUTOGROUP */
+#ifdef CONFIG_TIME_NS
+static int timens_offsets_show(struct seq_file *m, void *v)
+{
+ struct task_struct *p;
+
+ p = get_proc_task(file_inode(m->file));
+ if (!p)
+ return -ESRCH;
+ proc_timens_show_offsets(p, m);
+
+ put_task_struct(p);
+
+ return 0;
+}
+
+static ssize_t timens_offsets_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct inode *inode = file_inode(file);
+ struct proc_timens_offset offsets[2];
+ char *kbuf = NULL, *pos, *next_line;
+ struct task_struct *p;
+ int ret, noffsets;
+
+ /* Only allow < page size writes at the beginning of the file */
+ if ((*ppos != 0) || (count >= PAGE_SIZE))
+ return -EINVAL;
+
+ /* Slurp in the user data */
+ kbuf = memdup_user_nul(buf, count);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ /* Parse the user data */
+ ret = -EINVAL;
+ noffsets = 0;
+ for (pos = kbuf; pos; pos = next_line) {
+ struct proc_timens_offset *off = &offsets[noffsets];
+ int err;
+
+ /* Find the end of line and ensure we don't look past it */
+ next_line = strchr(pos, '\n');
+ if (next_line) {
+ *next_line = '\0';
+ next_line++;
+ if (*next_line == '\0')
+ next_line = NULL;
+ }
+
+ err = sscanf(pos, "%u %lld %lu", &off->clockid,
+ &off->val.tv_sec, &off->val.tv_nsec);
+ if (err != 3 || off->val.tv_nsec >= NSEC_PER_SEC)
+ goto out;
+ noffsets++;
+ if (noffsets == ARRAY_SIZE(offsets)) {
+ if (next_line)
+ count = next_line - kbuf;
+ break;
+ }
+ }
+
+ ret = -ESRCH;
+ p = get_proc_task(inode);
+ if (!p)
+ goto out;
+ ret = proc_timens_set_offset(file, p, offsets, noffsets);
+ put_task_struct(p);
+ if (ret)
+ goto out;
+
+ ret = count;
+out:
+ kfree(kbuf);
+ return ret;
+}
+
+static int timens_offsets_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, timens_offsets_show, inode);
+}
+
+static const struct file_operations proc_timens_offsets_operations = {
+ .open = timens_offsets_open,
+ .read = seq_read,
+ .write = timens_offsets_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_TIME_NS */
+
static ssize_t comm_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
@@ -1626,8 +1718,7 @@ static const char *proc_pid_get_link(struct dentry *dentry,
if (error)
goto out;
- nd_jump_link(&path);
- return NULL;
+ error = nd_jump_link(&path);
out:
return ERR_PTR(error);
}
@@ -3016,6 +3107,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SCHED_AUTOGROUP
REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
#endif
+#ifdef CONFIG_TIME_NS
+ REG("timens_offsets", S_IRUGO|S_IWUSR, proc_timens_offsets_operations),
+#endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
ONE("syscall", S_IRUSR, proc_pid_syscall),
@@ -3061,6 +3155,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_CGROUPS
ONE("cgroup", S_IRUGO, proc_cgroup_show),
#endif
+#ifdef CONFIG_PROC_CPU_RESCTRL
+ ONE("cpu_resctrl_groups", S_IRUGO, proc_resctrl_show),
+#endif
ONE("oom_score", S_IRUGO, proc_oom_score),
REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
@@ -3461,6 +3558,9 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_CGROUPS
ONE("cgroup", S_IRUGO, proc_cgroup_show),
#endif
+#ifdef CONFIG_PROC_CPU_RESCTRL
+ ONE("cpu_resctrl_groups", S_IRUGO, proc_resctrl_show),
+#endif
ONE("oom_score", S_IRUGO, proc_oom_score),
REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
new file mode 100644
index 000000000000..9955d75c0585
--- /dev/null
+++ b/fs/proc/bootconfig.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * /proc/bootconfig - Extra boot configuration
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/bootconfig.h>
+#include <linux/slab.h>
+
+static char *saved_boot_config;
+
+static int boot_config_proc_show(struct seq_file *m, void *v)
+{
+ if (saved_boot_config)
+ seq_puts(m, saved_boot_config);
+ return 0;
+}
+
+/* Rest size of buffer */
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+/* Return the needed total length if @size is 0 */
+static int __init copy_xbc_key_value_list(char *dst, size_t size)
+{
+ struct xbc_node *leaf, *vnode;
+ const char *val;
+ char *key, *end = dst + size;
+ int ret = 0;
+
+ key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
+
+ xbc_for_each_key_value(leaf, val) {
+ ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
+ if (ret < 0)
+ break;
+ ret = snprintf(dst, rest(dst, end), "%s = ", key);
+ if (ret < 0)
+ break;
+ dst += ret;
+ vnode = xbc_node_get_child(leaf);
+ if (vnode && xbc_node_is_array(vnode)) {
+ xbc_array_for_each_value(vnode, val) {
+ ret = snprintf(dst, rest(dst, end), "\"%s\"%s",
+ val, vnode->next ? ", " : "\n");
+ if (ret < 0)
+ goto out;
+ dst += ret;
+ }
+ } else {
+ ret = snprintf(dst, rest(dst, end), "\"%s\"\n", val);
+ if (ret < 0)
+ break;
+ dst += ret;
+ }
+ }
+out:
+ kfree(key);
+
+ return ret < 0 ? ret : dst - (end - size);
+}
+
+static int __init proc_boot_config_init(void)
+{
+ int len;
+
+ len = copy_xbc_key_value_list(NULL, 0);
+ if (len < 0)
+ return len;
+
+ if (len > 0) {
+ saved_boot_config = kzalloc(len + 1, GFP_KERNEL);
+ if (!saved_boot_config)
+ return -ENOMEM;
+
+ len = copy_xbc_key_value_list(saved_boot_config, len + 1);
+ if (len < 0) {
+ kfree(saved_boot_config);
+ return len;
+ }
+ }
+
+ proc_create_single("bootconfig", 0, NULL, boot_config_proc_show);
+
+ return 0;
+}
+fs_initcall(proc_boot_config_init);
diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c
index 96f1087e372c..c1dea9b8222e 100644
--- a/fs/proc/cpuinfo.c
+++ b/fs/proc/cpuinfo.c
@@ -16,16 +16,16 @@ static int cpuinfo_open(struct inode *inode, struct file *file)
return seq_open(file, &cpuinfo_op);
}
-static const struct file_operations proc_cpuinfo_operations = {
- .open = cpuinfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
+static const struct proc_ops cpuinfo_proc_ops = {
+ .proc_open = cpuinfo_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
static int __init proc_cpuinfo_init(void)
{
- proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations);
+ proc_create("cpuinfo", 0, NULL, &cpuinfo_proc_ops);
return 0;
}
fs_initcall(proc_cpuinfo_init);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 074e9585c699..3faed94e4b65 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -473,7 +473,7 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
if (ent) {
ent->data = data;
- ent->proc_fops = &proc_dir_operations;
+ ent->proc_dir_ops = &proc_dir_operations;
ent->proc_iops = &proc_dir_inode_operations;
ent = proc_register(parent, ent);
}
@@ -503,7 +503,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
ent = __proc_create(&parent, name, mode, 2);
if (ent) {
ent->data = NULL;
- ent->proc_fops = NULL;
+ ent->proc_dir_ops = NULL;
ent->proc_iops = NULL;
ent = proc_register(parent, ent);
}
@@ -533,25 +533,23 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
struct proc_dir_entry *parent,
- const struct file_operations *proc_fops, void *data)
+ const struct proc_ops *proc_ops, void *data)
{
struct proc_dir_entry *p;
- BUG_ON(proc_fops == NULL);
-
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
- p->proc_fops = proc_fops;
+ p->proc_ops = proc_ops;
return proc_register(parent, p);
}
EXPORT_SYMBOL(proc_create_data);
struct proc_dir_entry *proc_create(const char *name, umode_t mode,
struct proc_dir_entry *parent,
- const struct file_operations *proc_fops)
+ const struct proc_ops *proc_ops)
{
- return proc_create_data(name, mode, parent, proc_fops, NULL);
+ return proc_create_data(name, mode, parent, proc_ops, NULL);
}
EXPORT_SYMBOL(proc_create);
@@ -573,11 +571,11 @@ static int proc_seq_release(struct inode *inode, struct file *file)
return seq_release(inode, file);
}
-static const struct file_operations proc_seq_fops = {
- .open = proc_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = proc_seq_release,
+static const struct proc_ops proc_seq_ops = {
+ .proc_open = proc_seq_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = proc_seq_release,
};
struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
@@ -589,7 +587,7 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
- p->proc_fops = &proc_seq_fops;
+ p->proc_ops = &proc_seq_ops;
p->seq_ops = ops;
p->state_size = state_size;
return proc_register(parent, p);
@@ -603,11 +601,11 @@ static int proc_single_open(struct inode *inode, struct file *file)
return single_open(file, de->single_show, de->data);
}
-static const struct file_operations proc_single_fops = {
- .open = proc_single_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct proc_ops proc_single_ops = {
+ .proc_open = proc_single_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
};
struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
@@ -619,7 +617,7 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
- p->proc_fops = &proc_single_fops;
+ p->proc_ops = &proc_single_ops;
p->single_show = show;
return proc_register(parent, p);
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index dbe43a50caf2..6da18316d209 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -163,7 +163,7 @@ static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
pdeo->closing = true;
spin_unlock(&pde->pde_unload_lock);
file = pdeo->file;
- pde->proc_fops->release(file_inode(file), file);
+ pde->proc_ops->proc_release(file_inode(file), file);
spin_lock(&pde->pde_unload_lock);
/* After ->release. */
list_del(&pdeo->lh);
@@ -200,12 +200,12 @@ static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence)
struct proc_dir_entry *pde = PDE(file_inode(file));
loff_t rv = -EINVAL;
if (use_pde(pde)) {
- typeof_member(struct file_operations, llseek) llseek;
+ typeof_member(struct proc_ops, proc_lseek) lseek;
- llseek = pde->proc_fops->llseek;
- if (!llseek)
- llseek = default_llseek;
- rv = llseek(file, offset, whence);
+ lseek = pde->proc_ops->proc_lseek;
+ if (!lseek)
+ lseek = default_llseek;
+ rv = lseek(file, offset, whence);
unuse_pde(pde);
}
return rv;
@@ -216,9 +216,9 @@ static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count,
struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
if (use_pde(pde)) {
- typeof_member(struct file_operations, read) read;
+ typeof_member(struct proc_ops, proc_read) read;
- read = pde->proc_fops->read;
+ read = pde->proc_ops->proc_read;
if (read)
rv = read(file, buf, count, ppos);
unuse_pde(pde);
@@ -231,9 +231,9 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
struct proc_dir_entry *pde = PDE(file_inode(file));
ssize_t rv = -EIO;
if (use_pde(pde)) {
- typeof_member(struct file_operations, write) write;
+ typeof_member(struct proc_ops, proc_write) write;
- write = pde->proc_fops->write;
+ write = pde->proc_ops->proc_write;
if (write)
rv = write(file, buf, count, ppos);
unuse_pde(pde);
@@ -246,9 +246,9 @@ static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
struct proc_dir_entry *pde = PDE(file_inode(file));
__poll_t rv = DEFAULT_POLLMASK;
if (use_pde(pde)) {
- typeof_member(struct file_operations, poll) poll;
+ typeof_member(struct proc_ops, proc_poll) poll;
- poll = pde->proc_fops->poll;
+ poll = pde->proc_ops->proc_poll;
if (poll)
rv = poll(file, pts);
unuse_pde(pde);
@@ -261,9 +261,9 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
struct proc_dir_entry *pde = PDE(file_inode(file));
long rv = -ENOTTY;
if (use_pde(pde)) {
- typeof_member(struct file_operations, unlocked_ioctl) ioctl;
+ typeof_member(struct proc_ops, proc_ioctl) ioctl;
- ioctl = pde->proc_fops->unlocked_ioctl;
+ ioctl = pde->proc_ops->proc_ioctl;
if (ioctl)
rv = ioctl(file, cmd, arg);
unuse_pde(pde);
@@ -277,9 +277,9 @@ static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned
struct proc_dir_entry *pde = PDE(file_inode(file));
long rv = -ENOTTY;
if (use_pde(pde)) {
- typeof_member(struct file_operations, compat_ioctl) compat_ioctl;
+ typeof_member(struct proc_ops, proc_compat_ioctl) compat_ioctl;
- compat_ioctl = pde->proc_fops->compat_ioctl;
+ compat_ioctl = pde->proc_ops->proc_compat_ioctl;
if (compat_ioctl)
rv = compat_ioctl(file, cmd, arg);
unuse_pde(pde);
@@ -293,9 +293,9 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
struct proc_dir_entry *pde = PDE(file_inode(file));
int rv = -EIO;
if (use_pde(pde)) {
- typeof_member(struct file_operations, mmap) mmap;
+ typeof_member(struct proc_ops, proc_mmap) mmap;
- mmap = pde->proc_fops->mmap;
+ mmap = pde->proc_ops->proc_mmap;
if (mmap)
rv = mmap(file, vma);
unuse_pde(pde);
@@ -312,9 +312,9 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
unsigned long rv = -EIO;
if (use_pde(pde)) {
- typeof_member(struct file_operations, get_unmapped_area) get_area;
+ typeof_member(struct proc_ops, proc_get_unmapped_area) get_area;
- get_area = pde->proc_fops->get_unmapped_area;
+ get_area = pde->proc_ops->proc_get_unmapped_area;
#ifdef CONFIG_MMU
if (!get_area)
get_area = current->mm->get_unmapped_area;
@@ -333,8 +333,8 @@ static int proc_reg_open(struct inode *inode, struct file *file)
{
struct proc_dir_entry *pde = PDE(inode);
int rv = 0;
- typeof_member(struct file_operations, open) open;
- typeof_member(struct file_operations, release) release;
+ typeof_member(struct proc_ops, proc_open) open;
+ typeof_member(struct proc_ops, proc_release) release;
struct pde_opener *pdeo;
/*
@@ -351,7 +351,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
if (!use_pde(pde))
return -ENOENT;
- release = pde->proc_fops->release;
+ release = pde->proc_ops->proc_release;
if (release) {
pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL);
if (!pdeo) {
@@ -360,7 +360,7 @@ static int proc_reg_open(struct inode *inode, struct file *file)
}
}
- open = pde->proc_fops->open;
+ open = pde->proc_ops->proc_open;
if (open)
rv = open(inode, file);
@@ -468,21 +468,23 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
inode->i_size = de->size;
if (de->nlink)
set_nlink(inode, de->nlink);
- WARN_ON(!de->proc_iops);
- inode->i_op = de->proc_iops;
- if (de->proc_fops) {
- if (S_ISREG(inode->i_mode)) {
+
+ if (S_ISREG(inode->i_mode)) {
+ inode->i_op = de->proc_iops;
+ inode->i_fop = &proc_reg_file_ops;
#ifdef CONFIG_COMPAT
- if (!de->proc_fops->compat_ioctl)
- inode->i_fop =
- &proc_reg_file_ops_no_compat;
- else
-#endif
- inode->i_fop = &proc_reg_file_ops;
- } else {
- inode->i_fop = de->proc_fops;
+ if (!de->proc_ops->proc_compat_ioctl) {
+ inode->i_fop = &proc_reg_file_ops_no_compat;
}
- }
+#endif
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_op = de->proc_iops;
+ inode->i_fop = de->proc_dir_ops;
+ } else if (S_ISLNK(inode->i_mode)) {
+ inode->i_op = de->proc_iops;
+ inode->i_fop = NULL;
+ } else
+ BUG();
} else
pde_put(de);
return inode;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 0f3b557c9b77..41587276798e 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -39,7 +39,10 @@ struct proc_dir_entry {
spinlock_t pde_unload_lock;
struct completion *pde_unload_completion;
const struct inode_operations *proc_iops;
- const struct file_operations *proc_fops;
+ union {
+ const struct proc_ops *proc_ops;
+ const struct file_operations *proc_dir_ops;
+ };
const struct dentry_operations *proc_dops;
union {
const struct seq_operations *seq_ops;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e2ed8e08cc7a..8ba492d44e68 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -574,11 +574,11 @@ static int release_kcore(struct inode *inode, struct file *file)
return 0;
}
-static const struct file_operations proc_kcore_operations = {
- .read = read_kcore,
- .open = open_kcore,
- .release = release_kcore,
- .llseek = default_llseek,
+static const struct proc_ops kcore_proc_ops = {
+ .proc_read = read_kcore,
+ .proc_open = open_kcore,
+ .proc_release = release_kcore,
+ .proc_lseek = default_llseek,
};
/* just remember that we have to update kcore */
@@ -637,8 +637,7 @@ static void __init add_modules_range(void)
static int __init proc_kcore_init(void)
{
- proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
- &proc_kcore_operations);
+ proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
if (!proc_root_kcore) {
pr_err("couldn't create /proc/kcore\n");
return 0; /* Always returns 0. */
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index 4f4a2abb225e..ec1b7d2fb773 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -49,17 +49,17 @@ static __poll_t kmsg_poll(struct file *file, poll_table *wait)
}
-static const struct file_operations proc_kmsg_operations = {
- .read = kmsg_read,
- .poll = kmsg_poll,
- .open = kmsg_open,
- .release = kmsg_release,
- .llseek = generic_file_llseek,
+static const struct proc_ops kmsg_proc_ops = {
+ .proc_read = kmsg_read,
+ .proc_poll = kmsg_poll,
+ .proc_open = kmsg_open,
+ .proc_release = kmsg_release,
+ .proc_lseek = generic_file_llseek,
};
static int __init proc_kmsg_init(void)
{
- proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations);
+ proc_create("kmsg", S_IRUSR, NULL, &kmsg_proc_ops);
return 0;
}
fs_initcall(proc_kmsg_init);
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index dd2b35f78b09..8e159fc78c0a 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -33,6 +33,10 @@ static const struct proc_ns_operations *ns_entries[] = {
#ifdef CONFIG_CGROUPS
&cgroupns_operations,
#endif
+#ifdef CONFIG_TIME_NS
+ &timens_operations,
+ &timens_for_children_operations,
+#endif
};
static const char *proc_ns_get_link(struct dentry *dentry,
@@ -42,22 +46,26 @@ static const char *proc_ns_get_link(struct dentry *dentry,
const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
struct task_struct *task;
struct path ns_path;
- void *error = ERR_PTR(-EACCES);
+ int error = -EACCES;
if (!dentry)
return ERR_PTR(-ECHILD);
task = get_proc_task(inode);
if (!task)
- return error;
+ return ERR_PTR(-EACCES);
- if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
- error = ns_get_path(&ns_path, task, ns_ops);
- if (!error)
- nd_jump_link(&ns_path);
- }
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ goto out;
+
+ error = ns_get_path(&ns_path, task, ns_ops);
+ if (error)
+ goto out;
+
+ error = nd_jump_link(&ns_path);
+out:
put_task_struct(task);
- return error;
+ return ERR_PTR(error);
}
static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen)
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 7c952ee732e6..f909243d4a66 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -21,6 +21,21 @@
#define KPMMASK (KPMSIZE - 1)
#define KPMBITS (KPMSIZE * BITS_PER_BYTE)
+static inline unsigned long get_max_dump_pfn(void)
+{
+#ifdef CONFIG_SPARSEMEM
+ /*
+ * The memmap of early sections is completely populated and marked
+ * online even if max_pfn does not fall on a section boundary -
+ * pfn_to_online_page() will succeed on all pages. Allow inspecting
+ * these memmaps.
+ */
+ return round_up(max_pfn, PAGES_PER_SECTION);
+#else
+ return max_pfn;
+#endif
+}
+
/* /proc/kpagecount - an array exposing page counts
*
* Each entry is a u64 representing the corresponding
@@ -29,6 +44,7 @@
static ssize_t kpagecount_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ const unsigned long max_dump_pfn = get_max_dump_pfn();
u64 __user *out = (u64 __user *)buf;
struct page *ppage;
unsigned long src = *ppos;
@@ -37,9 +53,11 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
u64 pcount;
pfn = src / KPMSIZE;
- count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
if (src & KPMMASK || count & KPMMASK)
return -EINVAL;
+ if (src >= max_dump_pfn * KPMSIZE)
+ return 0;
+ count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
while (count > 0) {
/*
@@ -71,9 +89,9 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
return ret;
}
-static const struct file_operations proc_kpagecount_operations = {
- .llseek = mem_lseek,
- .read = kpagecount_read,
+static const struct proc_ops kpagecount_proc_ops = {
+ .proc_lseek = mem_lseek,
+ .proc_read = kpagecount_read,
};
/* /proc/kpageflags - an array exposing page flags
@@ -206,6 +224,7 @@ u64 stable_page_flags(struct page *page)
static ssize_t kpageflags_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ const unsigned long max_dump_pfn = get_max_dump_pfn();
u64 __user *out = (u64 __user *)buf;
struct page *ppage;
unsigned long src = *ppos;
@@ -213,9 +232,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
ssize_t ret = 0;
pfn = src / KPMSIZE;
- count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
if (src & KPMMASK || count & KPMMASK)
return -EINVAL;
+ if (src >= max_dump_pfn * KPMSIZE)
+ return 0;
+ count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
while (count > 0) {
/*
@@ -242,15 +263,16 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
return ret;
}
-static const struct file_operations proc_kpageflags_operations = {
- .llseek = mem_lseek,
- .read = kpageflags_read,
+static const struct proc_ops kpageflags_proc_ops = {
+ .proc_lseek = mem_lseek,
+ .proc_read = kpageflags_read,
};
#ifdef CONFIG_MEMCG
static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ const unsigned long max_dump_pfn = get_max_dump_pfn();
u64 __user *out = (u64 __user *)buf;
struct page *ppage;
unsigned long src = *ppos;
@@ -259,9 +281,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
u64 ino;
pfn = src / KPMSIZE;
- count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
if (src & KPMMASK || count & KPMMASK)
return -EINVAL;
+ if (src >= max_dump_pfn * KPMSIZE)
+ return 0;
+ count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
while (count > 0) {
/*
@@ -293,18 +317,18 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
return ret;
}
-static const struct file_operations proc_kpagecgroup_operations = {
- .llseek = mem_lseek,
- .read = kpagecgroup_read,
+static const struct proc_ops kpagecgroup_proc_ops = {
+ .proc_lseek = mem_lseek,
+ .proc_read = kpagecgroup_read,
};
#endif /* CONFIG_MEMCG */
static int __init proc_page_init(void)
{
- proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
- proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
+ proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
+ proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
#ifdef CONFIG_MEMCG
- proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations);
+ proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
#endif
return 0;
}
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 76ae278df1c4..4888c5224442 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -90,12 +90,12 @@ static int seq_release_net(struct inode *ino, struct file *f)
return 0;
}
-static const struct file_operations proc_net_seq_fops = {
- .open = seq_open_net,
- .read = seq_read,
- .write = proc_simple_write,
- .llseek = seq_lseek,
- .release = seq_release_net,
+static const struct proc_ops proc_net_seq_ops = {
+ .proc_open = seq_open_net,
+ .proc_read = seq_read,
+ .proc_write = proc_simple_write,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_net,
};
struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
@@ -108,7 +108,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
if (!p)
return NULL;
pde_force_lookup(p);
- p->proc_fops = &proc_net_seq_fops;
+ p->proc_ops = &proc_net_seq_ops;
p->seq_ops = ops;
p->state_size = state_size;
return proc_register(parent, p);
@@ -152,7 +152,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
if (!p)
return NULL;
pde_force_lookup(p);
- p->proc_fops = &proc_net_seq_fops;
+ p->proc_ops = &proc_net_seq_ops;
p->seq_ops = ops;
p->state_size = state_size;
p->write = write;
@@ -183,12 +183,12 @@ static int single_release_net(struct inode *ino, struct file *f)
return single_release(ino, f);
}
-static const struct file_operations proc_net_single_fops = {
- .open = single_open_net,
- .read = seq_read,
- .write = proc_simple_write,
- .llseek = seq_lseek,
- .release = single_release_net,
+static const struct proc_ops proc_net_single_ops = {
+ .proc_open = single_open_net,
+ .proc_read = seq_read,
+ .proc_write = proc_simple_write,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release_net,
};
struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
@@ -201,7 +201,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
if (!p)
return NULL;
pde_force_lookup(p);
- p->proc_fops = &proc_net_single_fops;
+ p->proc_ops = &proc_net_single_ops;
p->single_show = show;
return proc_register(parent, p);
}
@@ -244,7 +244,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
if (!p)
return NULL;
pde_force_lookup(p);
- p->proc_fops = &proc_net_single_fops;
+ p->proc_ops = &proc_net_single_ops;
p->single_show = show;
p->write = write;
return proc_register(parent, p);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d80989b6c344..c75bb4632ed1 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1720,7 +1720,7 @@ int __init proc_sys_init(void)
proc_sys_root = proc_mkdir("sys", NULL);
proc_sys_root->proc_iops = &proc_sys_dir_operations;
- proc_sys_root->proc_fops = &proc_sys_dir_file_operations;
+ proc_sys_root->proc_dir_ops = &proc_sys_dir_file_operations;
proc_sys_root->nlink = 0;
return sysctl_init();
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 0b7c8dffc9ae..608233dfd29c 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -41,24 +41,19 @@ enum proc_param {
Opt_hidepid,
};
-static const struct fs_parameter_spec proc_param_specs[] = {
+static const struct fs_parameter_spec proc_fs_parameters[] = {
fsparam_u32("gid", Opt_gid),
fsparam_u32("hidepid", Opt_hidepid),
{}
};
-static const struct fs_parameter_description proc_fs_parameters = {
- .name = "proc",
- .specs = proc_param_specs,
-};
-
static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct proc_fs_context *ctx = fc->fs_private;
struct fs_parse_result result;
int opt;
- opt = fs_parse(fc, &proc_fs_parameters, param, &result);
+ opt = fs_parse(fc, proc_fs_parameters, param, &result);
if (opt < 0)
return opt;
@@ -71,7 +66,7 @@ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
ctx->hidepid = result.uint_32;
if (ctx->hidepid < HIDEPID_OFF ||
ctx->hidepid > HIDEPID_INVISIBLE)
- return invalf(fc, "proc: hidepid value must be between 0 and 2.\n");
+ return invalfc(fc, "hidepid value must be between 0 and 2.\n");
break;
default:
@@ -207,7 +202,7 @@ static void proc_kill_sb(struct super_block *sb)
static struct file_system_type proc_fs_type = {
.name = "proc",
.init_fs_context = proc_init_fs_context,
- .parameters = &proc_fs_parameters,
+ .parameters = proc_fs_parameters,
.kill_sb = proc_kill_sb,
.fs_flags = FS_USERNS_MOUNT | FS_DISALLOW_NOTIFY_PERM,
};
@@ -292,7 +287,7 @@ struct proc_dir_entry proc_root = {
.nlink = 2,
.refcnt = REFCOUNT_INIT(1),
.proc_iops = &proc_root_inode_operations,
- .proc_fops = &proc_root_operations,
+ .proc_dir_ops = &proc_root_operations,
.parent = &proc_root,
.subdir = RB_ROOT,
.name = "/proc",
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index fd931d3e77be..0449edf460f5 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -223,16 +223,16 @@ static int stat_open(struct inode *inode, struct file *file)
return single_open_size(file, show_stat, NULL, size);
}
-static const struct file_operations proc_stat_operations = {
- .open = stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct proc_ops stat_proc_ops = {
+ .proc_open = stat_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
};
static int __init proc_stat_init(void)
{
- proc_create("stat", 0, NULL, &proc_stat_operations);
+ proc_create("stat", 0, NULL, &stat_proc_ops);
return 0;
}
fs_initcall(proc_stat_init);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9442631fd4af..3ba9ae83bff5 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -505,7 +505,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
#ifdef CONFIG_SHMEM
static int smaps_pte_hole(unsigned long addr, unsigned long end,
- struct mm_walk *walk)
+ __always_unused int depth, struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
@@ -1282,7 +1282,7 @@ static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
}
static int pagemap_pte_hole(unsigned long start, unsigned long end,
- struct mm_walk *walk)
+ __always_unused int depth, struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
unsigned long addr = start;
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
index a4c2791ab70b..5a1b228964fb 100644
--- a/fs/proc/uptime.c
+++ b/fs/proc/uptime.c
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/time.h>
+#include <linux/time_namespace.h>
#include <linux/kernel_stat.h>
static int uptime_proc_show(struct seq_file *m, void *v)
@@ -20,6 +21,8 @@ static int uptime_proc_show(struct seq_file *m, void *v)
nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
ktime_get_boottime_ts64(&uptime);
+ timens_add_boottime(&uptime);
+
idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
idle.tv_nsec = rem;
seq_printf(m, "%lu.%02lu %lu.%02lu\n",
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 7b13988796e1..7dc800cce354 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -667,10 +667,10 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
}
#endif
-static const struct file_operations proc_vmcore_operations = {
- .read = read_vmcore,
- .llseek = default_llseek,
- .mmap = mmap_vmcore,
+static const struct proc_ops vmcore_proc_ops = {
+ .proc_read = read_vmcore,
+ .proc_lseek = default_llseek,
+ .proc_mmap = mmap_vmcore,
};
static struct vmcore* __init get_new_element(void)
@@ -1555,7 +1555,7 @@ static int __init vmcore_init(void)
elfcorehdr_free(elfcorehdr_addr);
elfcorehdr_addr = ELFCORE_ADDR_ERR;
- proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
+ proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
if (proc_vmcore)
proc_vmcore->size = vmcore_size;
return 0;
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 53429c29c784..58fc2a7c7fd1 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -22,8 +22,6 @@ MODULE_AUTHOR("Jan Kara");
MODULE_DESCRIPTION("Quota format v2 support");
MODULE_LICENSE("GPL");
-#define __QUOTA_V2_PARANOIA
-
static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot);
static void v2r0_disk2memdqb(struct dquot *dquot, void *dp);
static int v2r0_is_id(void *dp, struct dquot *dquot);
diff --git a/fs/quota/quotaio_v1.h b/fs/quota/quotaio_v1.h
index bd11e2c08119..31dca9a89176 100644
--- a/fs/quota/quotaio_v1.h
+++ b/fs/quota/quotaio_v1.h
@@ -25,8 +25,10 @@ struct v1_disk_dqblk {
__u32 dqb_ihardlimit; /* absolute limit on allocated inodes */
__u32 dqb_isoftlimit; /* preferred inode limit */
__u32 dqb_curinodes; /* current # allocated inodes */
- time_t dqb_btime; /* time limit for excessive disk use */
- time_t dqb_itime; /* time limit for excessive inode use */
+
+ /* below fields differ in length on 32-bit vs 64-bit architectures */
+ unsigned long dqb_btime; /* time limit for excessive disk use */
+ unsigned long dqb_itime; /* time limit for excessive inode use */
};
#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk)))
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index d82636e8eb65..ee179a81b3da 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -181,23 +181,18 @@ enum ramfs_param {
Opt_mode,
};
-static const struct fs_parameter_spec ramfs_param_specs[] = {
+const struct fs_parameter_spec ramfs_fs_parameters[] = {
fsparam_u32oct("mode", Opt_mode),
{}
};
-const struct fs_parameter_description ramfs_fs_parameters = {
- .name = "ramfs",
- .specs = ramfs_param_specs,
-};
-
static int ramfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct fs_parse_result result;
struct ramfs_fs_info *fsi = fc->s_fs_info;
int opt;
- opt = fs_parse(fc, &ramfs_fs_parameters, param, &result);
+ opt = fs_parse(fc, ramfs_fs_parameters, param, &result);
if (opt < 0) {
/*
* We might like to report bad mount options here;
@@ -278,7 +273,7 @@ static void ramfs_kill_sb(struct super_block *sb)
static struct file_system_type ramfs_fs_type = {
.name = "ramfs",
.init_fs_context = ramfs_init_fs_context,
- .parameters = &ramfs_fs_parameters,
+ .parameters = ramfs_fs_parameters,
.kill_sb = ramfs_kill_sb,
.fs_flags = FS_USERNS_MOUNT,
};
diff --git a/fs/read_write.c b/fs/read_write.c
index 5bbf587f5bc1..59d819c5b92e 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -939,6 +939,34 @@ out:
return ret;
}
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter)
+{
+ size_t tot_len;
+ ssize_t ret = 0;
+
+ if (!file->f_op->read_iter)
+ return -EINVAL;
+ if (!(file->f_mode & FMODE_READ))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_READ))
+ return -EINVAL;
+
+ tot_len = iov_iter_count(iter);
+ if (!tot_len)
+ goto out;
+ ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
+ if (ret < 0)
+ return ret;
+
+ ret = call_read_iter(file, iocb, iter);
+out:
+ if (ret >= 0)
+ fsnotify_access(file);
+ return ret;
+}
+EXPORT_SYMBOL(vfs_iocb_iter_read);
+
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags)
{
@@ -975,6 +1003,34 @@ static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
return ret;
}
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter)
+{
+ size_t tot_len;
+ ssize_t ret = 0;
+
+ if (!file->f_op->write_iter)
+ return -EINVAL;
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return -EINVAL;
+
+ tot_len = iov_iter_count(iter);
+ if (!tot_len)
+ return 0;
+ ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
+ if (ret < 0)
+ return ret;
+
+ ret = call_write_iter(file, iocb, iter);
+ if (ret > 0)
+ fsnotify_modify(file);
+
+ return ret;
+}
+EXPORT_SYMBOL(vfs_iocb_iter_write);
+
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags)
{
@@ -1777,10 +1833,9 @@ static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
* else. Assume that the offsets have already been checked for block
* alignment.
*
- * For deduplication we always scale down to the previous block because we
- * can't meaningfully compare post-EOF contents.
- *
- * For clone we only link a partial EOF block above the destination file's EOF.
+ * For clone we only link a partial EOF block above or at the destination file's
+ * EOF. For deduplication we accept a partial EOF block only if it ends at the
+ * destination file's EOF (can not link it into the middle of a file).
*
* Shorten the request if possible.
*/
@@ -1796,8 +1851,7 @@ static int generic_remap_check_len(struct inode *inode_in,
if ((*len & blkmask) == 0)
return 0;
- if ((remap_flags & REMAP_FILE_DEDUP) ||
- pos_out + *len < i_size_read(inode_out))
+ if (pos_out + *len < i_size_read(inode_out))
new_len &= ~blkmask;
if (new_len == *len)
diff --git a/fs/readdir.c b/fs/readdir.c
index d26d5ea4de7b..de2eceffdee8 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -102,10 +102,14 @@ EXPORT_SYMBOL(iterate_dir);
* filename length, and the above "soft error" worry means
* that it's probably better left alone until we have that
* issue clarified.
+ *
+ * Note the PATH_MAX check - it's arbitrary but the real
+ * kernel limit on a possible path component, not NAME_MAX,
+ * which is the technical standard limit.
*/
static int verify_dirent_name(const char *name, int len)
{
- if (!len)
+ if (len <= 0 || len >= PATH_MAX)
return -EIO;
if (memchr(name, '/', len))
return -EIO;
@@ -206,7 +210,7 @@ struct linux_dirent {
struct getdents_callback {
struct dir_context ctx;
struct linux_dirent __user * current_dir;
- struct linux_dirent __user * previous;
+ int prev_reclen;
int count;
int error;
};
@@ -214,12 +218,13 @@ struct getdents_callback {
static int filldir(struct dir_context *ctx, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
- struct linux_dirent __user * dirent;
+ struct linux_dirent __user *dirent, *prev;
struct getdents_callback *buf =
container_of(ctx, struct getdents_callback, ctx);
unsigned long d_ino;
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
sizeof(long));
+ int prev_reclen;
buf->error = verify_dirent_name(name, namlen);
if (unlikely(buf->error))
@@ -232,28 +237,24 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
buf->error = -EOVERFLOW;
return -EOVERFLOW;
}
- dirent = buf->previous;
- if (dirent && signal_pending(current))
+ prev_reclen = buf->prev_reclen;
+ if (prev_reclen && signal_pending(current))
return -EINTR;
-
- /*
- * Note! This range-checks 'previous' (which may be NULL).
- * The real range was checked in getdents
- */
- if (!user_access_begin(dirent, sizeof(*dirent)))
- goto efault;
- if (dirent)
- unsafe_put_user(offset, &dirent->d_off, efault_end);
dirent = buf->current_dir;
+ prev = (void __user *) dirent - prev_reclen;
+ if (!user_access_begin(prev, reclen + prev_reclen))
+ goto efault;
+
+ /* This might be 'dirent->d_off', but if so it will get overwritten */
+ unsafe_put_user(offset, &prev->d_off, efault_end);
unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
user_access_end();
- buf->previous = dirent;
- dirent = (void __user *)dirent + reclen;
- buf->current_dir = dirent;
+ buf->current_dir = (void __user *)dirent + reclen;
+ buf->prev_reclen = reclen;
buf->count -= reclen;
return 0;
efault_end:
@@ -267,7 +268,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
struct linux_dirent __user *, dirent, unsigned int, count)
{
struct fd f;
- struct linux_dirent __user * lastdirent;
struct getdents_callback buf = {
.ctx.actor = filldir,
.count = count,
@@ -285,8 +285,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
error = iterate_dir(f.file, &buf.ctx);
if (error >= 0)
error = buf.error;
- lastdirent = buf.previous;
- if (lastdirent) {
+ if (buf.prev_reclen) {
+ struct linux_dirent __user * lastdirent;
+ lastdirent = (void __user *)buf.current_dir - buf.prev_reclen;
+
if (put_user(buf.ctx.pos, &lastdirent->d_off))
error = -EFAULT;
else
@@ -299,7 +301,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
struct getdents_callback64 {
struct dir_context ctx;
struct linux_dirent64 __user * current_dir;
- struct linux_dirent64 __user * previous;
+ int prev_reclen;
int count;
int error;
};
@@ -307,11 +309,12 @@ struct getdents_callback64 {
static int filldir64(struct dir_context *ctx, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
- struct linux_dirent64 __user *dirent;
+ struct linux_dirent64 __user *dirent, *prev;
struct getdents_callback64 *buf =
container_of(ctx, struct getdents_callback64, ctx);
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
sizeof(u64));
+ int prev_reclen;
buf->error = verify_dirent_name(name, namlen);
if (unlikely(buf->error))
@@ -319,30 +322,27 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
- dirent = buf->previous;
- if (dirent && signal_pending(current))
+ prev_reclen = buf->prev_reclen;
+ if (prev_reclen && signal_pending(current))
return -EINTR;
-
- /*
- * Note! This range-checks 'previous' (which may be NULL).
- * The real range was checked in getdents
- */
- if (!user_access_begin(dirent, sizeof(*dirent)))
- goto efault;
- if (dirent)
- unsafe_put_user(offset, &dirent->d_off, efault_end);
dirent = buf->current_dir;
+ prev = (void __user *)dirent - prev_reclen;
+ if (!user_access_begin(prev, reclen + prev_reclen))
+ goto efault;
+
+ /* This might be 'dirent->d_off', but if so it will get overwritten */
+ unsafe_put_user(offset, &prev->d_off, efault_end);
unsafe_put_user(ino, &dirent->d_ino, efault_end);
unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
unsafe_put_user(d_type, &dirent->d_type, efault_end);
unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
user_access_end();
- buf->previous = dirent;
- dirent = (void __user *)dirent + reclen;
- buf->current_dir = dirent;
+ buf->prev_reclen = reclen;
+ buf->current_dir = (void __user *)dirent + reclen;
buf->count -= reclen;
return 0;
+
efault_end:
user_access_end();
efault:
@@ -354,7 +354,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
unsigned int count)
{
struct fd f;
- struct linux_dirent64 __user * lastdirent;
struct getdents_callback64 buf = {
.ctx.actor = filldir64,
.count = count,
@@ -372,9 +371,11 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
error = iterate_dir(f.file, &buf.ctx);
if (error >= 0)
error = buf.error;
- lastdirent = buf.previous;
- if (lastdirent) {
+ if (buf.prev_reclen) {
+ struct linux_dirent64 __user * lastdirent;
typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+
+ lastdirent = (void __user *) buf.current_dir - buf.prev_reclen;
if (__put_user(d_off, &lastdirent->d_off))
error = -EFAULT;
else
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 4b3e3e73b512..072156c4f895 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -56,8 +56,6 @@
/* gets a struct reiserfs_journal_list * from a list head */
#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
j_list))
-#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
- j_working_list))
/* must be correct to keep the desc and commit structs at 4k */
#define JOURNAL_TRANS_HALF 1018
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index f2cf3441fdfc..ff336513c254 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -63,7 +63,6 @@ static int show_version(struct seq_file *m, void *unused)
#define MAP( i ) D4C( objectid_map( sb, rs )[ i ] )
#define DJF( x ) le32_to_cpu( rs -> x )
-#define DJV( x ) le32_to_cpu( s_v1 -> x )
#define DJP( x ) le32_to_cpu( jp -> x )
#define JF( x ) ( r -> s_journal -> x )
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index da9ebe33882b..8bf88d690729 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -918,12 +918,6 @@ int comp_items(const struct item_head *stored_ih, const struct treepath *path)
return memcmp(stored_ih, ih, IH_SIZE);
}
-/* unformatted nodes are not logged anymore, ever. This is safe now */
-#define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
-
-/* block can not be forgotten as it is in I/O or held by someone */
-#define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
-
/* prepare for delete or cut of direct item */
static inline int prepare_for_direct_item(struct treepath *path,
struct item_head *le_ih,
@@ -2246,7 +2240,8 @@ error_out:
/* also releases the path */
unfix_nodes(&s_ins_balance);
#ifdef REISERQUOTA_DEBUG
- reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
+ if (inode)
+ reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
"reiserquota insert_item(): freeing %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 3244037b1286..a6bce5b1fb1d 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -629,6 +629,7 @@ static void reiserfs_put_super(struct super_block *s)
reiserfs_write_unlock(s);
mutex_destroy(&REISERFS_SB(s)->lock);
destroy_workqueue(REISERFS_SB(s)->commit_wq);
+ kfree(REISERFS_SB(s)->s_jdev);
kfree(s->s_fs_info);
s->s_fs_info = NULL;
}
@@ -1947,7 +1948,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (!sbi->s_jdev) {
SWARN(silent, s, "", "Cannot allocate memory for "
"journal device name");
- goto error;
+ goto error_unlocked;
}
}
#ifdef CONFIG_QUOTA
@@ -2240,6 +2241,7 @@ error_unlocked:
kfree(qf_names[j]);
}
#endif
+ kfree(sbi->s_jdev);
kfree(sbi);
s->s_fs_info = NULL;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 62b40df36c98..28b241cd6987 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -319,8 +319,12 @@ static int reiserfs_for_each_xattr(struct inode *inode,
out_dir:
dput(dir);
out:
- /* -ENODATA isn't an error */
- if (err == -ENODATA)
+ /*
+ * -ENODATA: this object doesn't have any xattrs
+ * -EOPNOTSUPP: this file system doesn't have xattrs enabled on disk.
+ * Neither are errors
+ */
+ if (err == -ENODATA || err == -EOPNOTSUPP)
err = 0;
return err;
}
diff --git a/fs/splice.c b/fs/splice.c
index 3009652a41c8..d671936d0aad 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -165,8 +165,8 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = {
static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
{
smp_mb();
- if (waitqueue_active(&pipe->wait))
- wake_up_interruptible(&pipe->wait);
+ if (waitqueue_active(&pipe->rd_wait))
+ wake_up_interruptible(&pipe->rd_wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
@@ -462,8 +462,8 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
{
smp_mb();
- if (waitqueue_active(&pipe->wait))
- wake_up_interruptible(&pipe->wait);
+ if (waitqueue_active(&pipe->wr_wait))
+ wake_up_interruptible(&pipe->wr_wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
diff --git a/fs/stack.c b/fs/stack.c
index 4ef2c056269d..c9830924eb12 100644
--- a/fs/stack.c
+++ b/fs/stack.c
@@ -23,7 +23,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
/*
* But on 32-bit, we ought to make an effort to keep the two halves of
- * i_blocks in sync despite SMP or PREEMPT - though stat's
+ * i_blocks in sync despite SMP or PREEMPTION - though stat's
* generic_fillattr() doesn't bother, and we won't be applying quotas
* (where i_blocks does become important) at the upper level.
*
@@ -38,14 +38,14 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
spin_unlock(&src->i_lock);
/*
- * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for
+ * If CONFIG_SMP or CONFIG_PREEMPTION on 32-bit, it's vital for
* fsstack_copy_inode_size() to hold some lock around
* i_size_write(), otherwise i_size_read() may spin forever (see
* include/linux/fs.h). We don't necessarily hold i_mutex when this
* is called, so take i_lock for that case.
*
* And if on 32-bit, continue our effort to keep the two halves of
- * i_blocks in sync despite SMP or PREEMPT: use i_lock for that case
+ * i_blocks in sync despite SMP or PREEMPTION: use i_lock for that case
* too, and do both at once by combining the tests.
*
* There is none of this locking overhead in the 64-bit case.
diff --git a/fs/stat.c b/fs/stat.c
index c38e4c2e1221..030008796479 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -21,6 +21,8 @@
#include <linux/uaccess.h>
#include <asm/unistd.h>
+#include "internal.h"
+
/**
* generic_fillattr - Fill in the basic attributes from the inode struct
* @inode: Inode to use as the source
@@ -150,6 +152,23 @@ int vfs_statx_fd(unsigned int fd, struct kstat *stat,
}
EXPORT_SYMBOL(vfs_statx_fd);
+inline unsigned vfs_stat_set_lookup_flags(unsigned *lookup_flags, int flags)
+{
+ if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
+ AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
+ return -EINVAL;
+
+ *lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
+ if (flags & AT_SYMLINK_NOFOLLOW)
+ *lookup_flags &= ~LOOKUP_FOLLOW;
+ if (flags & AT_NO_AUTOMOUNT)
+ *lookup_flags &= ~LOOKUP_AUTOMOUNT;
+ if (flags & AT_EMPTY_PATH)
+ *lookup_flags |= LOOKUP_EMPTY;
+
+ return 0;
+}
+
/**
* vfs_statx - Get basic and extra attributes by filename
* @dfd: A file descriptor representing the base dir for a relative filename
@@ -170,19 +189,10 @@ int vfs_statx(int dfd, const char __user *filename, int flags,
{
struct path path;
int error = -EINVAL;
- unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
+ unsigned lookup_flags;
- if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
- AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
+ if (vfs_stat_set_lookup_flags(&lookup_flags, flags))
return -EINVAL;
-
- if (flags & AT_SYMLINK_NOFOLLOW)
- lookup_flags &= ~LOOKUP_FOLLOW;
- if (flags & AT_NO_AUTOMOUNT)
- lookup_flags &= ~LOOKUP_AUTOMOUNT;
- if (flags & AT_EMPTY_PATH)
- lookup_flags |= LOOKUP_EMPTY;
-
retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
@@ -523,7 +533,7 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
}
#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
-static noinline_for_stack int
+noinline_for_stack int
cp_statx(const struct kstat *stat, struct statx __user *buffer)
{
struct statx tmp;
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index d41c21fef138..c4ab045926b7 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -449,7 +449,7 @@ int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
}
link = kernfs_create_link(kobj->sd, target_name, entry);
- if (IS_ERR(link) && PTR_ERR(link) == -EEXIST)
+ if (PTR_ERR(link) == -EEXIST)
sysfs_warn_dup(kobj->sd, target_name);
kernfs_put(entry);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index ac7f59a58f94..c5509d2448e3 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -26,6 +26,7 @@
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/rcupdate.h>
+#include <linux/time_namespace.h>
struct timerfd_ctx {
union {
@@ -196,6 +197,8 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
}
if (texp != 0) {
+ if (flags & TFD_TIMER_ABSTIME)
+ texp = timens_ktime_to_host(clockid, texp);
if (isalarm(ctx)) {
if (flags & TFD_TIMER_ABSTIME)
alarm_start(&ctx->t.alarm, texp);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 0caa151cae4e..0ee8c6dfb036 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -330,7 +330,10 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
parent = tracefs_mount->mnt_root;
inode_lock(parent->d_inode);
- dentry = lookup_one_len(name, parent, strlen(name));
+ if (unlikely(IS_DEADDIR(parent->d_inode)))
+ dentry = ERR_PTR(-ENOENT);
+ else
+ dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry) && dentry->d_inode) {
dput(dentry);
dentry = ERR_PTR(-EEXIST);
@@ -499,122 +502,27 @@ __init struct dentry *tracefs_create_instance_dir(const char *name,
return dentry;
}
-static int __tracefs_remove(struct dentry *dentry, struct dentry *parent)
+static void remove_one(struct dentry *victim)
{
- int ret = 0;
-
- if (simple_positive(dentry)) {
- if (dentry->d_inode) {
- dget(dentry);
- switch (dentry->d_inode->i_mode & S_IFMT) {
- case S_IFDIR:
- ret = simple_rmdir(parent->d_inode, dentry);
- if (!ret)
- fsnotify_rmdir(parent->d_inode, dentry);
- break;
- default:
- simple_unlink(parent->d_inode, dentry);
- fsnotify_unlink(parent->d_inode, dentry);
- break;
- }
- if (!ret)
- d_delete(dentry);
- dput(dentry);
- }
- }
- return ret;
-}
-
-/**
- * tracefs_remove - removes a file or directory from the tracefs filesystem
- * @dentry: a pointer to a the dentry of the file or directory to be
- * removed.
- *
- * This function removes a file or directory in tracefs that was previously
- * created with a call to another tracefs function (like
- * tracefs_create_file() or variants thereof.)
- */
-void tracefs_remove(struct dentry *dentry)
-{
- struct dentry *parent;
- int ret;
-
- if (IS_ERR_OR_NULL(dentry))
- return;
-
- parent = dentry->d_parent;
- inode_lock(parent->d_inode);
- ret = __tracefs_remove(dentry, parent);
- inode_unlock(parent->d_inode);
- if (!ret)
- simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
}
/**
- * tracefs_remove_recursive - recursively removes a directory
+ * tracefs_remove - recursively removes a directory
* @dentry: a pointer to a the dentry of the directory to be removed.
*
* This function recursively removes a directory tree in tracefs that
* was previously created with a call to another tracefs function
* (like tracefs_create_file() or variants thereof.)
*/
-void tracefs_remove_recursive(struct dentry *dentry)
+void tracefs_remove(struct dentry *dentry)
{
- struct dentry *child, *parent;
-
if (IS_ERR_OR_NULL(dentry))
return;
- parent = dentry;
- down:
- inode_lock(parent->d_inode);
- loop:
- /*
- * The parent->d_subdirs is protected by the d_lock. Outside that
- * lock, the child can be unlinked and set to be freed which can
- * use the d_u.d_child as the rcu head and corrupt this list.
- */
- spin_lock(&parent->d_lock);
- list_for_each_entry(child, &parent->d_subdirs, d_child) {
- if (!simple_positive(child))
- continue;
-
- /* perhaps simple_empty(child) makes more sense */
- if (!list_empty(&child->d_subdirs)) {
- spin_unlock(&parent->d_lock);
- inode_unlock(parent->d_inode);
- parent = child;
- goto down;
- }
-
- spin_unlock(&parent->d_lock);
-
- if (!__tracefs_remove(child, parent))
- simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-
- /*
- * The parent->d_lock protects agaist child from unlinking
- * from d_subdirs. When releasing the parent->d_lock we can
- * no longer trust that the next pointer is valid.
- * Restart the loop. We'll skip this one with the
- * simple_positive() check.
- */
- goto loop;
- }
- spin_unlock(&parent->d_lock);
-
- inode_unlock(parent->d_inode);
- child = parent;
- parent = parent->d_parent;
- inode_lock(parent->d_inode);
-
- if (child != dentry)
- /* go up */
- goto loop;
-
- if (!__tracefs_remove(child, parent))
- simple_release_fs(&tracefs_mount, &tracefs_mount_count);
- inode_unlock(parent->d_inode);
+ simple_pin_fs(&trace_fs_type, &tracefs_mount, &tracefs_mount_count);
+ simple_recursive_removal(dentry, remove_one);
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
}
/**
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 69932bcfa920..45d3d207fb99 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -12,6 +12,7 @@ config UBIFS_FS
select CRYPTO_ZSTD if UBIFS_FS_ZSTD
select CRYPTO_HASH_INFO
select UBIFS_FS_XATTR if FS_ENCRYPTION
+ select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
depends on MTD_UBI
help
UBIFS is a file system for flash devices which works on top of UBI.
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 0b98e3c8b461..ef85ec167a84 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -81,7 +81,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
struct ubifs_inode *ui;
bool encrypted = false;
- if (ubifs_crypt_is_encrypted(dir)) {
+ if (IS_ENCRYPTED(dir)) {
err = fscrypt_get_encryption_info(dir);
if (err) {
ubifs_err(c, "fscrypt_get_encryption_info failed: %i", err);
@@ -225,9 +225,9 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
goto done;
}
- if (nm.hash) {
- ubifs_assert(c, fname_len(&nm) == 0);
- ubifs_assert(c, fname_name(&nm) == NULL);
+ if (fname_name(&nm) == NULL) {
+ if (nm.hash & ~UBIFS_S_KEY_HASH_MASK)
+ goto done; /* ENOENT */
dent_key_init_hash(c, &key, dir->i_ino, nm.hash);
err = ubifs_tnc_lookup_dh(c, &key, dent, nm.minor_hash);
} else {
@@ -261,7 +261,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
goto done;
}
- if (ubifs_crypt_is_encrypted(dir) &&
+ if (IS_ENCRYPTED(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
ubifs_warn(c, "Inconsistent encryption contexts: %lu/%lu",
@@ -499,7 +499,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
struct ubifs_dent_node *dent;
struct inode *dir = file_inode(file);
struct ubifs_info *c = dir->i_sb->s_fs_info;
- bool encrypted = ubifs_crypt_is_encrypted(dir);
+ bool encrypted = IS_ENCRYPTED(dir);
dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, ctx->pos);
@@ -512,7 +512,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
if (encrypted) {
err = fscrypt_get_encryption_info(dir);
- if (err && err != -ENOKEY)
+ if (err)
return err;
err = fscrypt_fname_alloc_buffer(dir, UBIFS_MAX_NLEN, &fstr);
@@ -1618,7 +1618,7 @@ int ubifs_getattr(const struct path *path, struct kstat *stat,
static int ubifs_dir_open(struct inode *dir, struct file *file)
{
- if (ubifs_crypt_is_encrypted(dir))
+ if (IS_ENCRYPTED(dir))
return fscrypt_get_encryption_info(dir) ? -EACCES : 0;
return 0;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index cd52585c8f4f..743928efffc1 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -67,7 +67,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
- if (ubifs_crypt_is_encrypted(inode)) {
+ if (IS_ENCRYPTED(inode)) {
err = ubifs_decrypt(inode, dn, &dlen, block);
if (err)
goto dump;
@@ -647,7 +647,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
out_len = UBIFS_BLOCK_SIZE;
- if (ubifs_crypt_is_encrypted(inode)) {
+ if (IS_ENCRYPTED(inode)) {
err = ubifs_decrypt(inode, dn, &dlen, page_block);
if (err)
goto out_err;
@@ -786,7 +786,9 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
if (page_offset > end_index)
break;
- page = find_or_create_page(mapping, page_offset, ra_gfp_mask);
+ page = pagecache_get_page(mapping, page_offset,
+ FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
+ ra_gfp_mask);
if (!page)
break;
if (!PageUptodate(page))
@@ -1078,18 +1080,12 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr)
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
- if (attr->ia_valid & ATTR_ATIME) {
- inode->i_atime = timestamp_truncate(attr->ia_atime,
- inode);
- }
- if (attr->ia_valid & ATTR_MTIME) {
- inode->i_mtime = timestamp_truncate(attr->ia_mtime,
- inode);
- }
- if (attr->ia_valid & ATTR_CTIME) {
- inode->i_ctime = timestamp_truncate(attr->ia_ctime,
- inode);
- }
+ if (attr->ia_valid & ATTR_ATIME)
+ inode->i_atime = attr->ia_atime;
+ if (attr->ia_valid & ATTR_MTIME)
+ inode->i_mtime = attr->ia_mtime;
+ if (attr->ia_valid & ATTR_CTIME)
+ inode->i_ctime = attr->ia_ctime;
if (attr->ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 5dc5abca11c7..d49fc04f2d7d 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -17,10 +17,14 @@
#include "ubifs.h"
/* Need to be kept consistent with checked flags in ioctl2ubifs() */
-#define UBIFS_SUPPORTED_IOCTL_FLAGS \
+#define UBIFS_SETTABLE_IOCTL_FLAGS \
(FS_COMPR_FL | FS_SYNC_FL | FS_APPEND_FL | \
FS_IMMUTABLE_FL | FS_DIRSYNC_FL)
+/* Need to be kept consistent with checked flags in ubifs2ioctl() */
+#define UBIFS_GETTABLE_IOCTL_FLAGS \
+ (UBIFS_SETTABLE_IOCTL_FLAGS | FS_ENCRYPT_FL)
+
/**
* ubifs_set_inode_flags - set VFS inode flags.
* @inode: VFS inode to set flags for
@@ -91,6 +95,8 @@ static int ubifs2ioctl(int ubifs_flags)
ioctl_flags |= FS_IMMUTABLE_FL;
if (ubifs_flags & UBIFS_DIRSYNC_FL)
ioctl_flags |= FS_DIRSYNC_FL;
+ if (ubifs_flags & UBIFS_CRYPT_FL)
+ ioctl_flags |= FS_ENCRYPT_FL;
return ioctl_flags;
}
@@ -113,7 +119,8 @@ static int setflags(struct inode *inode, int flags)
if (err)
goto out_unlock;
- ui->flags = ioctl2ubifs(flags);
+ ui->flags &= ~ioctl2ubifs(UBIFS_SETTABLE_IOCTL_FLAGS);
+ ui->flags |= ioctl2ubifs(flags);
ubifs_set_inode_flags(inode);
inode->i_ctime = current_time(inode);
release = ui->dirty;
@@ -155,8 +162,9 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (get_user(flags, (int __user *) arg))
return -EFAULT;
- if (flags & ~UBIFS_SUPPORTED_IOCTL_FLAGS)
+ if (flags & ~UBIFS_GETTABLE_IOCTL_FLAGS)
return -EOPNOTSUPP;
+ flags &= UBIFS_SETTABLE_IOCTL_FLAGS;
if (!S_ISDIR(inode->i_mode))
flags &= ~FS_DIRSYNC_FL;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 388fe8f5dc51..3bf8b1fda9d7 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -588,7 +588,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
if (!xent) {
dent->ch.node_type = UBIFS_DENT_NODE;
- if (nm->hash)
+ if (fname_name(nm) == NULL)
dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash);
else
dent_key_init(c, &dent_key, dir->i_ino, nm);
@@ -646,7 +646,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
ubifs_add_auth_dirt(c, lnum);
if (deletion) {
- if (nm->hash)
+ if (fname_name(nm) == NULL)
err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash);
else
err = ubifs_tnc_remove_nm(c, &dent_key, nm);
@@ -727,7 +727,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
int write_len;
struct ubifs_inode *ui = ubifs_inode(inode);
- bool encrypted = ubifs_crypt_is_encrypted(inode);
+ bool encrypted = IS_ENCRYPTED(inode);
u8 hash[UBIFS_HASH_ARR_SZ];
dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
@@ -1449,7 +1449,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
compr_type = le16_to_cpu(dn->compr_type);
- if (ubifs_crypt_is_encrypted(inode)) {
+ if (IS_ENCRYPTED(inode)) {
err = ubifs_decrypt(inode, dn, &dlen, block);
if (err)
goto out;
@@ -1465,7 +1465,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
}
- if (ubifs_crypt_is_encrypted(inode)) {
+ if (IS_ENCRYPTED(inode)) {
err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
if (err)
goto out;
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h
index afa704ff5ca0..8142d9d6fe5d 100644
--- a/fs/ubifs/key.h
+++ b/fs/ubifs/key.h
@@ -150,7 +150,6 @@ static inline void dent_key_init(const struct ubifs_info *c,
uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK));
- ubifs_assert(c, !nm->hash && !nm->minor_hash);
key->u32[0] = inum;
key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS);
}
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 54d6db61106f..edf43ddd7dce 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,7 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
{
if (orph->del) {
- dbg_gen("deleted twice ino %lu", orph->inum);
+ dbg_gen("deleted twice ino %lu", (unsigned long)orph->inum);
return;
}
@@ -137,7 +137,7 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
orph->del = 1;
orph->dnext = c->orph_dnext;
c->orph_dnext = orph;
- dbg_gen("delete later ino %lu", orph->inum);
+ dbg_gen("delete later ino %lu", (unsigned long)orph->inum);
return;
}
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 2b7c04bf8983..4b4b65b48c57 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -84,7 +84,6 @@ static int create_default_filesystem(struct ubifs_info *c)
int idx_node_size;
long long tmp64, main_bytes;
__le64 tmp_le64;
- __le32 tmp_le32;
struct timespec64 ts;
u8 hash[UBIFS_HASH_ARR_SZ];
u8 hash_lpt[UBIFS_HASH_ARR_SZ];
@@ -161,7 +160,7 @@ static int create_default_filesystem(struct ubifs_info *c)
sup = kzalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_KERNEL);
mst = kzalloc(c->mst_node_alsz, GFP_KERNEL);
idx_node_size = ubifs_idx_node_sz(c, 1);
- idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL);
+ idx = kzalloc(ALIGN(idx_node_size, c->min_io_size), GFP_KERNEL);
ino = kzalloc(ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size), GFP_KERNEL);
cs = kzalloc(ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size), GFP_KERNEL);
@@ -291,16 +290,14 @@ static int create_default_filesystem(struct ubifs_info *c)
ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
ino->nlink = cpu_to_le32(2);
- ktime_get_real_ts64(&ts);
- ts = timespec64_trunc(ts, DEFAULT_TIME_GRAN);
+ ktime_get_coarse_real_ts64(&ts);
tmp_le64 = cpu_to_le64(ts.tv_sec);
ino->atime_sec = tmp_le64;
ino->ctime_sec = tmp_le64;
ino->mtime_sec = tmp_le64;
- tmp_le32 = cpu_to_le32(ts.tv_nsec);
- ino->atime_nsec = tmp_le32;
- ino->ctime_nsec = tmp_le32;
- ino->mtime_nsec = tmp_le32;
+ ino->atime_nsec = 0;
+ ino->ctime_nsec = 0;
+ ino->mtime_nsec = 0;
ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 5e1e8ec0589e..7fc2f3f07c16 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1599,6 +1599,7 @@ out_free:
vfree(c->ileb_buf);
vfree(c->sbuf);
kfree(c->bottom_up_buf);
+ kfree(c->sup_node);
ubifs_debugging_exit(c);
return err;
}
@@ -1641,6 +1642,7 @@ static void ubifs_umount(struct ubifs_info *c)
vfree(c->ileb_buf);
vfree(c->sbuf);
kfree(c->bottom_up_buf);
+ kfree(c->sup_node);
ubifs_debugging_exit(c);
}
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index c55f212dcb75..bff682309fbe 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -2095,13 +2095,6 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
extern const struct fscrypt_operations ubifs_crypt_operations;
-static inline bool ubifs_crypt_is_encrypted(const struct inode *inode)
-{
- const struct ubifs_inode *ui = ubifs_inode(inode);
-
- return ui->flags & UBIFS_CRYPT_FL;
-}
-
/* Normal UBIFS messages */
__printf(2, 3)
void ubifs_msg(const struct ubifs_info *c, const char *fmt, ...);
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index fb7f2c7bec9c..3fd85464abd5 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -4,7 +4,8 @@
* This file is based on ECMA-167 3rd edition (June 1997)
* http://www.ecma.ch
*
- * Copyright (c) 2001-2002 Ben Fennema <bfennema@falcon.csc.calpoly.edu>
+ * Copyright (c) 2001-2002 Ben Fennema
+ * Copyright (c) 2017-2019 Pali Rohár <pali.rohar@gmail.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,11 +33,19 @@
* SUCH DAMAGE.
*/
+/**
+ * @file
+ * ECMA-167r3 defines and structure definitions
+ */
+
#include <linux/types.h>
#ifndef _ECMA_167_H
#define _ECMA_167_H 1
+/* Character sets and coding - d-characters (ECMA 167r3 1/7.2) */
+typedef uint8_t dchars;
+
/* Character set specification (ECMA 167r3 1/7.2.1) */
struct charspec {
uint8_t charSetType;
@@ -54,6 +63,7 @@ struct charspec {
#define CHARSPEC_TYPE_CS7 0x07 /* (1/7.2.9) */
#define CHARSPEC_TYPE_CS8 0x08 /* (1/7.2.10) */
+/* Fixed-length character fields - d-string (EMCA 167r3 1/7.2.12) */
typedef uint8_t dstring;
/* Timestamp (ECMA 167r3 1/7.3) */
@@ -85,22 +95,8 @@ struct regid {
} __packed;
/* Flags (ECMA 167r3 1/7.4.1) */
-#define ENTITYID_FLAGS_DIRTY 0x00
-#define ENTITYID_FLAGS_PROTECTED 0x01
-
-/* OSTA UDF 2.1.5.2 */
-#define UDF_ID_COMPLIANT "*OSTA UDF Compliant"
-
-/* OSTA UDF 2.1.5.3 */
-struct domainEntityIDSuffix {
- uint16_t revision;
- uint8_t flags;
- uint8_t reserved[5];
-};
-
-/* OSTA UDF 2.1.5.3 */
-#define ENTITYIDSUFFIX_FLAGS_HARDWRITEPROTECT 0
-#define ENTITYIDSUFFIX_FLAGS_SOFTWRITEPROTECT 1
+#define ENTITYID_FLAGS_DIRTY 0x01
+#define ENTITYID_FLAGS_PROTECTED 0x02
/* Volume Structure Descriptor (ECMA 167r3 2/9.1) */
#define VSD_STD_ID_LEN 5
@@ -202,6 +198,13 @@ struct NSRDesc {
uint8_t structData[2040];
} __packed;
+/* Generic Descriptor */
+struct genericDesc {
+ struct tag descTag;
+ __le32 volDescSeqNum;
+ uint8_t reserved[492];
+} __packed;
+
/* Primary Volume Descriptor (ECMA 167r3 3/10.1) */
struct primaryVolDesc {
struct tag descTag;
@@ -316,7 +319,7 @@ struct genericPartitionMap {
/* Partition Map Type (ECMA 167r3 3/10.7.1.1) */
#define GP_PARTITION_MAP_TYPE_UNDEF 0x00
-#define GP_PARTIITON_MAP_TYPE_1 0x01
+#define GP_PARTITION_MAP_TYPE_1 0x01
#define GP_PARTITION_MAP_TYPE_2 0x02
/* Type 1 Partition Map (ECMA 167r3 3/10.7.2) */
@@ -723,6 +726,7 @@ struct appUseExtAttr {
#define EXTATTR_DEV_SPEC 12
#define EXTATTR_IMP_USE 2048
#define EXTATTR_APP_USE 65536
+#define EXTATTR_SUBTYPE 1
/* Unallocated Space Entry (ECMA 167r3 4/14.11) */
struct unallocSpaceEntry {
@@ -754,10 +758,12 @@ struct partitionIntegrityEntry {
/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
/* Extent Length (ECMA 167r3 4/14.14.1.1) */
+#define EXT_LENGTH_MASK 0x3FFFFFFF
+#define EXT_TYPE_MASK 0xC0000000
#define EXT_RECORDED_ALLOCATED 0x00000000
#define EXT_NOT_RECORDED_ALLOCATED 0x40000000
#define EXT_NOT_RECORDED_NOT_ALLOCATED 0x80000000
-#define EXT_NEXT_EXTENT_ALLOCDECS 0xC0000000
+#define EXT_NEXT_EXTENT_ALLOCDESCS 0xC0000000
/* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */
@@ -774,7 +780,7 @@ struct pathComponent {
uint8_t componentType;
uint8_t lengthComponentIdent;
__le16 componentFileVersionNum;
- dstring componentIdent[0];
+ dchars componentIdent[0];
} __packed;
/* File Entry (ECMA 167r3 4/14.17) */
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index ea80036d7897..e875bc5668ee 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1981,10 +1981,10 @@ int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
__udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
udf_write_aext(inode, epos, &nepos.block,
- sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
+ sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
} else {
__udf_add_aext(inode, epos, &nepos.block,
- sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
+ sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
}
brelse(epos->bh);
@@ -2143,7 +2143,7 @@ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
unsigned int indirections = 0;
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
- (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+ (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
udf_pblk_t block;
if (++indirections > UDF_MAX_INDIR_EXTS) {
diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h
index a4da59e38b7f..35e61b2cacfe 100644
--- a/fs/udf/osta_udf.h
+++ b/fs/udf/osta_udf.h
@@ -1,10 +1,11 @@
/*
* osta_udf.h
*
- * This file is based on OSTA UDF(tm) 2.50 (April 30, 2003)
+ * This file is based on OSTA UDF(tm) 2.60 (March 1, 2005)
* http://www.osta.org
*
- * Copyright (c) 2001-2004 Ben Fennema <bfennema@falcon.csc.calpoly.edu>
+ * Copyright (c) 2001-2004 Ben Fennema
+ * Copyright (c) 2017-2019 Pali Rohár <pali.rohar@gmail.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -32,38 +33,57 @@
* SUCH DAMAGE.
*/
+/**
+ * @file
+ * OSTA-UDF defines and structure definitions
+ */
+
#include "ecma_167.h"
#ifndef _OSTA_UDF_H
#define _OSTA_UDF_H 1
-/* OSTA CS0 Charspec (UDF 2.50 2.1.2) */
+/* OSTA CS0 Charspec (UDF 2.60 2.1.2) */
#define UDF_CHAR_SET_TYPE 0
#define UDF_CHAR_SET_INFO "OSTA Compressed Unicode"
-/* Entity Identifier (UDF 2.50 2.1.5) */
-/* Identifiers (UDF 2.50 2.1.5.2) */
+/* Entity Identifier (UDF 2.60 2.1.5) */
+/* Identifiers (UDF 2.60 2.1.5.2) */
+/* Implementation Use Extended Attribute (UDF 2.60 3.3.4.5) */
+/* Virtual Allocation Table (UDF 1.50 2.2.10) */
+/* Logical Volume Extended Information (UDF 1.50 Errata, DCN 5003, 3.3.4.5.1.3) */
+/* OS2EA (UDF 1.50 3.3.4.5.3.1) */
+/* MacUniqueIDTable (UDF 1.50 3.3.4.5.4.3) */
+/* MacResourceFork (UDF 1.50 3.3.4.5.4.4) */
#define UDF_ID_DEVELOPER "*Linux UDFFS"
#define UDF_ID_COMPLIANT "*OSTA UDF Compliant"
#define UDF_ID_LV_INFO "*UDF LV Info"
#define UDF_ID_FREE_EA "*UDF FreeEASpace"
#define UDF_ID_FREE_APP_EA "*UDF FreeAppEASpace"
#define UDF_ID_DVD_CGMS "*UDF DVD CGMS Info"
+#define UDF_ID_VAT_LVEXTENSION "*UDF VAT LVExtension"
#define UDF_ID_OS2_EA "*UDF OS/2 EA"
#define UDF_ID_OS2_EA_LENGTH "*UDF OS/2 EALength"
#define UDF_ID_MAC_VOLUME "*UDF Mac VolumeInfo"
#define UDF_ID_MAC_FINDER "*UDF Mac FinderInfo"
#define UDF_ID_MAC_UNIQUE "*UDF Mac UniqueIDTable"
#define UDF_ID_MAC_RESOURCE "*UDF Mac ResourceFork"
+#define UDF_ID_OS400_DIRINFO "*UDF OS/400 DirInfo"
#define UDF_ID_VIRTUAL "*UDF Virtual Partition"
#define UDF_ID_SPARABLE "*UDF Sparable Partition"
#define UDF_ID_ALLOC "*UDF Virtual Alloc Tbl"
#define UDF_ID_SPARING "*UDF Sparing Table"
#define UDF_ID_METADATA "*UDF Metadata Partition"
-/* Identifier Suffix (UDF 2.50 2.1.5.3) */
-#define IS_DF_HARD_WRITE_PROTECT 0x01
-#define IS_DF_SOFT_WRITE_PROTECT 0x02
+/* Identifier Suffix (UDF 2.60 2.1.5.3) */
+#define DOMAIN_FLAGS_HARD_WRITE_PROTECT 0x01
+#define DOMAIN_FLAGS_SOFT_WRITE_PROTECT 0x02
+
+struct domainIdentSuffix {
+ __le16 UDFRevision;
+ uint8_t domainFlags;
+ uint8_t reserved[5];
+} __packed;
struct UDFIdentSuffix {
__le16 UDFRevision;
@@ -75,15 +95,15 @@ struct UDFIdentSuffix {
struct impIdentSuffix {
uint8_t OSClass;
uint8_t OSIdentifier;
- uint8_t reserved[6];
+ uint8_t impUse[6];
} __packed;
struct appIdentSuffix {
uint8_t impUse[8];
} __packed;
-/* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */
-/* Implementation Use (UDF 2.50 2.2.6.4) */
+/* Logical Volume Integrity Descriptor (UDF 2.60 2.2.6) */
+/* Implementation Use (UDF 2.60 2.2.6.4) */
struct logicalVolIntegrityDescImpUse {
struct regid impIdent;
__le32 numFiles;
@@ -94,8 +114,8 @@ struct logicalVolIntegrityDescImpUse {
uint8_t impUse[0];
} __packed;
-/* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */
-/* Implementation Use (UDF 2.50 2.2.7.2) */
+/* Implementation Use Volume Descriptor (UDF 2.60 2.2.7) */
+/* Implementation Use (UDF 2.60 2.2.7.2) */
struct impUseVolDescImpUse {
struct charspec LVICharset;
dstring logicalVolIdent[128];
@@ -115,7 +135,7 @@ struct udfPartitionMap2 {
__le16 partitionNum;
} __packed;
-/* Virtual Partition Map (UDF 2.50 2.2.8) */
+/* Virtual Partition Map (UDF 2.60 2.2.8) */
struct virtualPartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
@@ -126,7 +146,7 @@ struct virtualPartitionMap {
uint8_t reserved2[24];
} __packed;
-/* Sparable Partition Map (UDF 2.50 2.2.9) */
+/* Sparable Partition Map (UDF 2.60 2.2.9) */
struct sparablePartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
@@ -141,7 +161,7 @@ struct sparablePartitionMap {
__le32 locSparingTable[4];
} __packed;
-/* Metadata Partition Map (UDF 2.4.0 2.2.10) */
+/* Metadata Partition Map (UDF 2.60 2.2.10) */
struct metadataPartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
@@ -160,14 +180,14 @@ struct metadataPartitionMap {
/* Virtual Allocation Table (UDF 1.5 2.2.10) */
struct virtualAllocationTable15 {
- __le32 VirtualSector[0];
+ __le32 vatEntry[0];
struct regid vatIdent;
__le32 previousVATICBLoc;
} __packed;
#define ICBTAG_FILE_TYPE_VAT15 0x00U
-/* Virtual Allocation Table (UDF 2.50 2.2.11) */
+/* Virtual Allocation Table (UDF 2.60 2.2.11) */
struct virtualAllocationTable20 {
__le16 lengthHeader;
__le16 lengthImpUse;
@@ -175,9 +195,9 @@ struct virtualAllocationTable20 {
__le32 previousVATICBLoc;
__le32 numFiles;
__le32 numDirs;
- __le16 minReadRevision;
- __le16 minWriteRevision;
- __le16 maxWriteRevision;
+ __le16 minUDFReadRev;
+ __le16 minUDFWriteRev;
+ __le16 maxUDFWriteRev;
__le16 reserved;
uint8_t impUse[0];
__le32 vatEntry[0];
@@ -185,7 +205,7 @@ struct virtualAllocationTable20 {
#define ICBTAG_FILE_TYPE_VAT20 0xF8U
-/* Sparing Table (UDF 2.50 2.2.12) */
+/* Sparing Table (UDF 2.60 2.2.12) */
struct sparingEntry {
__le32 origLocation;
__le32 mappedLocation;
@@ -201,12 +221,12 @@ struct sparingTable {
mapEntry[0];
} __packed;
-/* Metadata File (and Metadata Mirror File) (UDF 2.50 2.2.13.1) */
+/* Metadata File (and Metadata Mirror File) (UDF 2.60 2.2.13.1) */
#define ICBTAG_FILE_TYPE_MAIN 0xFA
#define ICBTAG_FILE_TYPE_MIRROR 0xFB
#define ICBTAG_FILE_TYPE_BITMAP 0xFC
-/* struct struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */
+/* struct struct long_ad ICB - ADImpUse (UDF 2.60 2.2.4.3) */
struct allocDescImpUse {
__le16 flags;
uint8_t impUse[4];
@@ -214,17 +234,17 @@ struct allocDescImpUse {
#define AD_IU_EXT_ERASED 0x0001
-/* Real-Time Files (UDF 2.50 6.11) */
+/* Real-Time Files (UDF 2.60 6.11) */
#define ICBTAG_FILE_TYPE_REALTIME 0xF9U
-/* Implementation Use Extended Attribute (UDF 2.50 3.3.4.5) */
-/* FreeEASpace (UDF 2.50 3.3.4.5.1.1) */
+/* Implementation Use Extended Attribute (UDF 2.60 3.3.4.5) */
+/* FreeEASpace (UDF 2.60 3.3.4.5.1.1) */
struct freeEaSpace {
__le16 headerChecksum;
uint8_t freeEASpace[0];
} __packed;
-/* DVD Copyright Management Information (UDF 2.50 3.3.4.5.1.2) */
+/* DVD Copyright Management Information (UDF 2.60 3.3.4.5.1.2) */
struct DVDCopyrightImpUse {
__le16 headerChecksum;
uint8_t CGMSInfo;
@@ -232,20 +252,35 @@ struct DVDCopyrightImpUse {
uint8_t protectionSystemInfo[4];
} __packed;
-/* Application Use Extended Attribute (UDF 2.50 3.3.4.6) */
-/* FreeAppEASpace (UDF 2.50 3.3.4.6.1) */
+/* Logical Volume Extended Information (UDF 1.50 Errata, DCN 5003, 3.3.4.5.1.3) */
+struct LVExtensionEA {
+ __le16 headerChecksum;
+ __le64 verificationID;
+ __le32 numFiles;
+ __le32 numDirs;
+ dstring logicalVolIdent[128];
+} __packed;
+
+/* Application Use Extended Attribute (UDF 2.60 3.3.4.6) */
+/* FreeAppEASpace (UDF 2.60 3.3.4.6.1) */
struct freeAppEASpace {
__le16 headerChecksum;
uint8_t freeEASpace[0];
} __packed;
-/* UDF Defined System Stream (UDF 2.50 3.3.7) */
+/* UDF Defined System Stream (UDF 2.60 3.3.7) */
#define UDF_ID_UNIQUE_ID "*UDF Unique ID Mapping Data"
#define UDF_ID_NON_ALLOC "*UDF Non-Allocatable Space"
#define UDF_ID_POWER_CAL "*UDF Power Cal Table"
#define UDF_ID_BACKUP "*UDF Backup"
-/* Operating System Identifiers (UDF 2.50 6.3) */
+/* UDF Defined Non-System Streams (UDF 2.60 3.3.8) */
+#define UDF_ID_MAC_RESOURCE_FORK_STREAM "*UDF Macintosh Resource Fork"
+/* #define UDF_ID_OS2_EA "*UDF OS/2 EA" */
+#define UDF_ID_NT_ACL "*UDF NT ACL"
+#define UDF_ID_UNIX_ACL "*UDF UNIX ACL"
+
+/* Operating System Identifiers (UDF 2.60 6.3) */
#define UDF_OS_CLASS_UNDEF 0x00U
#define UDF_OS_CLASS_DOS 0x01U
#define UDF_OS_CLASS_OS2 0x02U
@@ -270,6 +305,7 @@ struct freeAppEASpace {
#define UDF_OS_ID_LINUX 0x05U
#define UDF_OS_ID_MKLINUX 0x06U
#define UDF_OS_ID_FREEBSD 0x07U
+#define UDF_OS_ID_NETBSD 0x08U
#define UDF_OS_ID_WIN9X 0x00U
#define UDF_OS_ID_WINNT 0x00U
#define UDF_OS_ID_OS400 0x00U
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 8c28e93e9b73..f747bf72edbe 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -767,20 +767,20 @@ static int udf_check_vsd(struct super_block *sb)
static int udf_verify_domain_identifier(struct super_block *sb,
struct regid *ident, char *dname)
{
- struct domainEntityIDSuffix *suffix;
+ struct domainIdentSuffix *suffix;
if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
goto force_ro;
}
- if (ident->flags & (1 << ENTITYID_FLAGS_DIRTY)) {
+ if (ident->flags & ENTITYID_FLAGS_DIRTY) {
udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
dname);
goto force_ro;
}
- suffix = (struct domainEntityIDSuffix *)ident->identSuffix;
- if (suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_HARDWRITEPROTECT) ||
- suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_SOFTWRITEPROTECT)) {
+ suffix = (struct domainIdentSuffix *)ident->identSuffix;
+ if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) ||
+ (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) {
if (!sb_rdonly(sb)) {
udf_warn(sb, "Descriptor for %s marked write protected."
" Forcing read only mount.\n", dname);
@@ -1035,7 +1035,6 @@ static int check_partition_desc(struct super_block *sb,
switch (le32_to_cpu(p->accessType)) {
case PD_ACCESS_TYPE_READ_ONLY:
case PD_ACCESS_TYPE_WRITE_ONCE:
- case PD_ACCESS_TYPE_REWRITABLE:
case PD_ACCESS_TYPE_NONE:
goto force_ro;
}
@@ -1063,7 +1062,8 @@ static int check_partition_desc(struct super_block *sb,
goto force_ro;
if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
- map->s_partition_type == UDF_VIRTUAL_MAP20)
+ map->s_partition_type == UDF_VIRTUAL_MAP20 ||
+ map->s_partition_type == UDF_METADATA_MAP25)
goto force_ro;
return 0;
@@ -2402,6 +2402,10 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
buf->f_bfree = udf_count_free(sb);
buf->f_bavail = buf->f_bfree;
+ /*
+ * Let's pretend each free block is also a free 'inode' since UDF does
+ * not have separate preallocated table of inodes.
+ */
buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
le32_to_cpu(lvidiu->numDirs)) : 0)
+ buf->f_bfree;
@@ -2492,17 +2496,29 @@ static unsigned int udf_count_free_table(struct super_block *sb,
static unsigned int udf_count_free(struct super_block *sb)
{
unsigned int accum = 0;
- struct udf_sb_info *sbi;
+ struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map;
+ unsigned int part = sbi->s_partition;
+ int ptype = sbi->s_partmaps[part].s_partition_type;
+
+ if (ptype == UDF_METADATA_MAP25) {
+ part = sbi->s_partmaps[part].s_type_specific.s_metadata.
+ s_phys_partition_ref;
+ } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
+ /*
+ * Filesystems with VAT are append-only and we cannot write to
+ * them. Let's just report 0 here.
+ */
+ return 0;
+ }
- sbi = UDF_SB(sb);
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid =
(struct logicalVolIntegrityDesc *)
sbi->s_lvid_bh->b_data;
- if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
+ if (le32_to_cpu(lvid->numOfPartitions) > part) {
accum = le32_to_cpu(
- lvid->freeSpaceTable[sbi->s_partition]);
+ lvid->freeSpaceTable[part]);
if (accum == 0xFFFFFFFF)
accum = 0;
}
@@ -2511,7 +2527,7 @@ static unsigned int udf_count_free(struct super_block *sb)
if (accum)
return accum;
- map = &sbi->s_partmaps[sbi->s_partition];
+ map = &sbi->s_partmaps[part];
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
accum += udf_count_free_bitmap(sb,
map->s_uspace.s_bitmap);
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 63a47f1e1d52..532cda99644e 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -241,7 +241,7 @@ int udf_truncate_extents(struct inode *inode)
while ((etype = udf_current_aext(inode, &epos, &eloc,
&elen, 0)) != -1) {
- if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+ if (etype == (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
udf_write_aext(inode, &epos, &neloc, nelen, 0);
if (indirect_ext_len) {
/* We managed to free all extents in the
diff --git a/fs/unicode/Makefile b/fs/unicode/Makefile
index d46e9baee285..b88aecc86550 100644
--- a/fs/unicode/Makefile
+++ b/fs/unicode/Makefile
@@ -35,4 +35,4 @@ $(obj)/utf8data.h: $(src)/utf8data.h_shipped FORCE
endif
targets += utf8data.h
-hostprogs-y += mkutf8data
+hostprogs += mkutf8data
diff --git a/fs/utimes.c b/fs/utimes.c
index c952b6b3d8a0..1d17ce98cb80 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -36,14 +36,14 @@ static int utimes_common(const struct path *path, struct timespec64 *times)
if (times[0].tv_nsec == UTIME_OMIT)
newattrs.ia_valid &= ~ATTR_ATIME;
else if (times[0].tv_nsec != UTIME_NOW) {
- newattrs.ia_atime = timestamp_truncate(times[0], inode);
+ newattrs.ia_atime = times[0];
newattrs.ia_valid |= ATTR_ATIME_SET;
}
if (times[1].tv_nsec == UTIME_OMIT)
newattrs.ia_valid &= ~ATTR_MTIME;
else if (times[1].tv_nsec != UTIME_NOW) {
- newattrs.ia_mtime = timestamp_truncate(times[1], inode);
+ newattrs.ia_mtime = times[1];
newattrs.ia_valid |= ATTR_MTIME_SET;
}
/*
diff --git a/fs/vboxsf/Kconfig b/fs/vboxsf/Kconfig
new file mode 100644
index 000000000000..b84586ae08b3
--- /dev/null
+++ b/fs/vboxsf/Kconfig
@@ -0,0 +1,10 @@
+config VBOXSF_FS
+ tristate "VirtualBox guest shared folder (vboxsf) support"
+ depends on X86 && VBOXGUEST
+ select NLS
+ help
+ VirtualBox hosts can share folders with guests, this driver
+ implements the Linux-guest side of this allowing folders exported
+ by the host to be mounted under Linux.
+
+ If you want to use shared folders in VirtualBox guests, answer Y or M.
diff --git a/fs/vboxsf/Makefile b/fs/vboxsf/Makefile
new file mode 100644
index 000000000000..9e4328e79623
--- /dev/null
+++ b/fs/vboxsf/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+
+obj-$(CONFIG_VBOXSF_FS) += vboxsf.o
+
+vboxsf-y := dir.o file.o utils.o vboxsf_wrappers.o super.o
diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c
new file mode 100644
index 000000000000..dd147b490982
--- /dev/null
+++ b/fs/vboxsf/dir.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: MIT
+/*
+ * VirtualBox Guest Shared Folders support: Directory inode and file operations
+ *
+ * Copyright (C) 2006-2018 Oracle Corporation
+ */
+
+#include <linux/namei.h>
+#include <linux/vbox_utils.h>
+#include "vfsmod.h"
+
+static int vboxsf_dir_open(struct inode *inode, struct file *file)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
+ struct shfl_createparms params = {};
+ struct vboxsf_dir_info *sf_d;
+ int err;
+
+ sf_d = vboxsf_dir_info_alloc();
+ if (!sf_d)
+ return -ENOMEM;
+
+ params.handle = SHFL_HANDLE_NIL;
+ params.create_flags = SHFL_CF_DIRECTORY | SHFL_CF_ACT_OPEN_IF_EXISTS |
+ SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
+
+ err = vboxsf_create_at_dentry(file_dentry(file), &params);
+ if (err)
+ goto err_free_dir_info;
+
+ if (params.result != SHFL_FILE_EXISTS) {
+ err = -ENOENT;
+ goto err_close;
+ }
+
+ err = vboxsf_dir_read_all(sbi, sf_d, params.handle);
+ if (err)
+ goto err_close;
+
+ vboxsf_close(sbi->root, params.handle);
+ file->private_data = sf_d;
+ return 0;
+
+err_close:
+ vboxsf_close(sbi->root, params.handle);
+err_free_dir_info:
+ vboxsf_dir_info_free(sf_d);
+ return err;
+}
+
+static int vboxsf_dir_release(struct inode *inode, struct file *file)
+{
+ if (file->private_data)
+ vboxsf_dir_info_free(file->private_data);
+
+ return 0;
+}
+
+static unsigned int vboxsf_get_d_type(u32 mode)
+{
+ unsigned int d_type;
+
+ switch (mode & SHFL_TYPE_MASK) {
+ case SHFL_TYPE_FIFO:
+ d_type = DT_FIFO;
+ break;
+ case SHFL_TYPE_DEV_CHAR:
+ d_type = DT_CHR;
+ break;
+ case SHFL_TYPE_DIRECTORY:
+ d_type = DT_DIR;
+ break;
+ case SHFL_TYPE_DEV_BLOCK:
+ d_type = DT_BLK;
+ break;
+ case SHFL_TYPE_FILE:
+ d_type = DT_REG;
+ break;
+ case SHFL_TYPE_SYMLINK:
+ d_type = DT_LNK;
+ break;
+ case SHFL_TYPE_SOCKET:
+ d_type = DT_SOCK;
+ break;
+ case SHFL_TYPE_WHITEOUT:
+ d_type = DT_WHT;
+ break;
+ default:
+ d_type = DT_UNKNOWN;
+ break;
+ }
+ return d_type;
+}
+
+static bool vboxsf_dir_emit(struct file *dir, struct dir_context *ctx)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(file_inode(dir)->i_sb);
+ struct vboxsf_dir_info *sf_d = dir->private_data;
+ struct shfl_dirinfo *info;
+ struct vboxsf_dir_buf *b;
+ unsigned int d_type;
+ loff_t i, cur = 0;
+ ino_t fake_ino;
+ void *end;
+ int err;
+
+ list_for_each_entry(b, &sf_d->info_list, head) {
+try_next_entry:
+ if (ctx->pos >= cur + b->entries) {
+ cur += b->entries;
+ continue;
+ }
+
+ /*
+ * Note the vboxsf_dir_info objects we are iterating over here
+ * are variable sized, so the info pointer may end up being
+ * unaligned. This is how we get the data from the host.
+ * Since vboxsf is only supported on x86 machines this is not
+ * a problem.
+ */
+ for (i = 0, info = b->buf; i < ctx->pos - cur; i++) {
+ end = &info->name.string.utf8[info->name.size];
+ /* Only happens if the host gives us corrupt data */
+ if (WARN_ON(end > (b->buf + b->used)))
+ return false;
+ info = end;
+ }
+
+ end = &info->name.string.utf8[info->name.size];
+ if (WARN_ON(end > (b->buf + b->used)))
+ return false;
+
+ /* Info now points to the right entry, emit it. */
+ d_type = vboxsf_get_d_type(info->info.attr.mode);
+
+ /*
+ * On 32 bit systems pos is 64 signed, while ino is 32 bit
+ * unsigned so fake_ino may overflow, check for this.
+ */
+ if ((ino_t)(ctx->pos + 1) != (u64)(ctx->pos + 1)) {
+ vbg_err("vboxsf: fake ino overflow, truncating dir\n");
+ return false;
+ }
+ fake_ino = ctx->pos + 1;
+
+ if (sbi->nls) {
+ char d_name[NAME_MAX];
+
+ err = vboxsf_nlscpy(sbi, d_name, NAME_MAX,
+ info->name.string.utf8,
+ info->name.length);
+ if (err) {
+ /* skip erroneous entry and proceed */
+ ctx->pos += 1;
+ goto try_next_entry;
+ }
+
+ return dir_emit(ctx, d_name, strlen(d_name),
+ fake_ino, d_type);
+ }
+
+ return dir_emit(ctx, info->name.string.utf8, info->name.length,
+ fake_ino, d_type);
+ }
+
+ return false;
+}
+
+static int vboxsf_dir_iterate(struct file *dir, struct dir_context *ctx)
+{
+ bool emitted;
+
+ do {
+ emitted = vboxsf_dir_emit(dir, ctx);
+ if (emitted)
+ ctx->pos += 1;
+ } while (emitted);
+
+ return 0;
+}
+
+const struct file_operations vboxsf_dir_fops = {
+ .open = vboxsf_dir_open,
+ .iterate = vboxsf_dir_iterate,
+ .release = vboxsf_dir_release,
+ .read = generic_read_dir,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * This is called during name resolution/lookup to check if the @dentry in
+ * the cache is still valid. the job is handled by vboxsf_inode_revalidate.
+ */
+static int vboxsf_dentry_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (d_really_is_positive(dentry))
+ return vboxsf_inode_revalidate(dentry) == 0;
+ else
+ return vboxsf_stat_dentry(dentry, NULL) == -ENOENT;
+}
+
+const struct dentry_operations vboxsf_dentry_ops = {
+ .d_revalidate = vboxsf_dentry_revalidate
+};
+
+/* iops */
+
+static struct dentry *vboxsf_dir_lookup(struct inode *parent,
+ struct dentry *dentry,
+ unsigned int flags)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+ struct shfl_fsobjinfo fsinfo;
+ struct inode *inode;
+ int err;
+
+ dentry->d_time = jiffies;
+
+ err = vboxsf_stat_dentry(dentry, &fsinfo);
+ if (err) {
+ inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
+ } else {
+ inode = vboxsf_new_inode(parent->i_sb);
+ if (!IS_ERR(inode))
+ vboxsf_init_inode(sbi, inode, &fsinfo);
+ }
+
+ return d_splice_alias(inode, dentry);
+}
+
+static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry,
+ struct shfl_fsobjinfo *info)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+ struct vboxsf_inode *sf_i;
+ struct inode *inode;
+
+ inode = vboxsf_new_inode(parent->i_sb);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ sf_i = VBOXSF_I(inode);
+ /* The host may have given us different attr then requested */
+ sf_i->force_restat = 1;
+ vboxsf_init_inode(sbi, inode, info);
+
+ d_instantiate(dentry, inode);
+
+ return 0;
+}
+
+static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
+ umode_t mode, int is_dir)
+{
+ struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+ struct shfl_createparms params = {};
+ int err;
+
+ params.handle = SHFL_HANDLE_NIL;
+ params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
+ SHFL_CF_ACT_FAIL_IF_EXISTS |
+ SHFL_CF_ACCESS_READWRITE |
+ (is_dir ? SHFL_CF_DIRECTORY : 0);
+ params.info.attr.mode = (mode & 0777) |
+ (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
+ params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
+
+ err = vboxsf_create_at_dentry(dentry, &params);
+ if (err)
+ return err;
+
+ if (params.result != SHFL_FILE_CREATED)
+ return -EPERM;
+
+ vboxsf_close(sbi->root, params.handle);
+
+ err = vboxsf_dir_instantiate(parent, dentry, &params.info);
+ if (err)
+ return err;
+
+ /* parent directory access/change time changed */
+ sf_parent_i->force_restat = 1;
+
+ return 0;
+}
+
+static int vboxsf_dir_mkfile(struct inode *parent, struct dentry *dentry,
+ umode_t mode, bool excl)
+{
+ return vboxsf_dir_create(parent, dentry, mode, 0);
+}
+
+static int vboxsf_dir_mkdir(struct inode *parent, struct dentry *dentry,
+ umode_t mode)
+{
+ return vboxsf_dir_create(parent, dentry, mode, 1);
+}
+
+static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+ struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
+ struct inode *inode = d_inode(dentry);
+ struct shfl_string *path;
+ u32 flags;
+ int err;
+
+ if (S_ISDIR(inode->i_mode))
+ flags = SHFL_REMOVE_DIR;
+ else
+ flags = SHFL_REMOVE_FILE;
+
+ if (S_ISLNK(inode->i_mode))
+ flags |= SHFL_REMOVE_SYMLINK;
+
+ path = vboxsf_path_from_dentry(sbi, dentry);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ err = vboxsf_remove(sbi->root, path, flags);
+ __putname(path);
+ if (err)
+ return err;
+
+ /* parent directory access/change time changed */
+ sf_parent_i->force_restat = 1;
+
+ return 0;
+}
+
+static int vboxsf_dir_rename(struct inode *old_parent,
+ struct dentry *old_dentry,
+ struct inode *new_parent,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(old_parent->i_sb);
+ struct vboxsf_inode *sf_old_parent_i = VBOXSF_I(old_parent);
+ struct vboxsf_inode *sf_new_parent_i = VBOXSF_I(new_parent);
+ u32 shfl_flags = SHFL_RENAME_FILE | SHFL_RENAME_REPLACE_IF_EXISTS;
+ struct shfl_string *old_path, *new_path;
+ int err;
+
+ if (flags)
+ return -EINVAL;
+
+ old_path = vboxsf_path_from_dentry(sbi, old_dentry);
+ if (IS_ERR(old_path))
+ return PTR_ERR(old_path);
+
+ new_path = vboxsf_path_from_dentry(sbi, new_dentry);
+ if (IS_ERR(new_path)) {
+ err = PTR_ERR(new_path);
+ goto err_put_old_path;
+ }
+
+ if (d_inode(old_dentry)->i_mode & S_IFDIR)
+ shfl_flags = 0;
+
+ err = vboxsf_rename(sbi->root, old_path, new_path, shfl_flags);
+ if (err == 0) {
+ /* parent directories access/change time changed */
+ sf_new_parent_i->force_restat = 1;
+ sf_old_parent_i->force_restat = 1;
+ }
+
+ __putname(new_path);
+err_put_old_path:
+ __putname(old_path);
+ return err;
+}
+
+static int vboxsf_dir_symlink(struct inode *parent, struct dentry *dentry,
+ const char *symname)
+{
+ struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+ int symname_size = strlen(symname) + 1;
+ struct shfl_string *path, *ssymname;
+ struct shfl_fsobjinfo info;
+ int err;
+
+ path = vboxsf_path_from_dentry(sbi, dentry);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ ssymname = kmalloc(SHFLSTRING_HEADER_SIZE + symname_size, GFP_KERNEL);
+ if (!ssymname) {
+ __putname(path);
+ return -ENOMEM;
+ }
+ ssymname->length = symname_size - 1;
+ ssymname->size = symname_size;
+ memcpy(ssymname->string.utf8, symname, symname_size);
+
+ err = vboxsf_symlink(sbi->root, path, ssymname, &info);
+ kfree(ssymname);
+ __putname(path);
+ if (err) {
+ /* -EROFS means symlinks are note support -> -EPERM */
+ return (err == -EROFS) ? -EPERM : err;
+ }
+
+ err = vboxsf_dir_instantiate(parent, dentry, &info);
+ if (err)
+ return err;
+
+ /* parent directory access/change time changed */
+ sf_parent_i->force_restat = 1;
+ return 0;
+}
+
+const struct inode_operations vboxsf_dir_iops = {
+ .lookup = vboxsf_dir_lookup,
+ .create = vboxsf_dir_mkfile,
+ .mkdir = vboxsf_dir_mkdir,
+ .rmdir = vboxsf_dir_unlink,
+ .unlink = vboxsf_dir_unlink,
+ .rename = vboxsf_dir_rename,
+ .symlink = vboxsf_dir_symlink,
+ .getattr = vboxsf_getattr,
+ .setattr = vboxsf_setattr,
+};
diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
new file mode 100644
index 000000000000..c4ab5996d97a
--- /dev/null
+++ b/fs/vboxsf/file.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: MIT
+/*
+ * VirtualBox Guest Shared Folders support: Regular file inode and file ops.
+ *
+ * Copyright (C) 2006-2018 Oracle Corporation
+ */
+
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/sizes.h>
+#include "vfsmod.h"
+
+struct vboxsf_handle {
+ u64 handle;
+ u32 root;
+ u32 access_flags;
+ struct kref refcount;
+ struct list_head head;
+};
+
+static int vboxsf_file_open(struct inode *inode, struct file *file)
+{
+ struct vboxsf_inode *sf_i = VBOXSF_I(inode);
+ struct shfl_createparms params = {};
+ struct vboxsf_handle *sf_handle;
+ u32 access_flags = 0;
+ int err;
+
+ sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
+ if (!sf_handle)
+ return -ENOMEM;
+
+ /*
+ * We check the value of params.handle afterwards to find out if
+ * the call succeeded or failed, as the API does not seem to cleanly
+ * distinguish error and informational messages.
+ *
+ * Furthermore, we must set params.handle to SHFL_HANDLE_NIL to
+ * make the shared folders host service use our mode parameter.
+ */
+ params.handle = SHFL_HANDLE_NIL;
+ if (file->f_flags & O_CREAT) {
+ params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW;
+ /*
+ * We ignore O_EXCL, as the Linux kernel seems to call create
+ * beforehand itself, so O_EXCL should always fail.
+ */
+ if (file->f_flags & O_TRUNC)
+ params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
+ else
+ params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
+ } else {
+ params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW;
+ if (file->f_flags & O_TRUNC)
+ params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
+ }
+
+ switch (file->f_flags & O_ACCMODE) {
+ case O_RDONLY:
+ access_flags |= SHFL_CF_ACCESS_READ;
+ break;
+
+ case O_WRONLY:
+ access_flags |= SHFL_CF_ACCESS_WRITE;
+ break;
+
+ case O_RDWR:
+ access_flags |= SHFL_CF_ACCESS_READWRITE;
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+
+ if (file->f_flags & O_APPEND)
+ access_flags |= SHFL_CF_ACCESS_APPEND;
+
+ params.create_flags |= access_flags;
+ params.info.attr.mode = inode->i_mode;
+
+ err = vboxsf_create_at_dentry(file_dentry(file), &params);
+ if (err == 0 && params.handle == SHFL_HANDLE_NIL)
+ err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
+ if (err) {
+ kfree(sf_handle);
+ return err;
+ }
+
+ /* the host may have given us different attr then requested */
+ sf_i->force_restat = 1;
+
+ /* init our handle struct and add it to the inode's handles list */
+ sf_handle->handle = params.handle;
+ sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
+ sf_handle->access_flags = access_flags;
+ kref_init(&sf_handle->refcount);
+
+ mutex_lock(&sf_i->handle_list_mutex);
+ list_add(&sf_handle->head, &sf_i->handle_list);
+ mutex_unlock(&sf_i->handle_list_mutex);
+
+ file->private_data = sf_handle;
+ return 0;
+}
+
+static void vboxsf_handle_release(struct kref *refcount)
+{
+ struct vboxsf_handle *sf_handle =
+ container_of(refcount, struct vboxsf_handle, refcount);
+
+ vboxsf_close(sf_handle->root, sf_handle->handle);
+ kfree(sf_handle);
+}
+
+static int vboxsf_file_release(struct inode *inode, struct file *file)
+{
+ struct vboxsf_inode *sf_i = VBOXSF_I(inode);
+ struct vboxsf_handle *sf_handle = file->private_data;
+
+ /*
+ * When a file is closed on our (the guest) side, we want any subsequent
+ * accesses done on the host side to see all changes done from our side.
+ */
+ filemap_write_and_wait(inode->i_mapping);
+
+ mutex_lock(&sf_i->handle_list_mutex);
+ list_del(&sf_handle->head);
+ mutex_unlock(&sf_i->handle_list_mutex);
+
+ kref_put(&sf_handle->refcount, vboxsf_handle_release);
+ return 0;
+}
+
+/*
+ * Write back dirty pages now, because there may not be any suitable
+ * open files later
+ */
+static void vboxsf_vma_close(struct vm_area_struct *vma)
+{
+ filemap_write_and_wait(vma->vm_file->f_mapping);
+}
+
+static const struct vm_operations_struct vboxsf_file_vm_ops = {
+ .close = vboxsf_vma_close,
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+};
+
+static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int err;
+
+ err = generic_file_mmap(file, vma);
+ if (!err)
+ vma->vm_ops = &vboxsf_file_vm_ops;
+
+ return err;
+}
+
+/*
+ * Note that since we are accessing files on the host's filesystem, files
+ * may always be changed underneath us by the host!
+ *
+ * The vboxsf API between the guest and the host does not offer any functions
+ * to deal with this. There is no inode-generation to check for changes, no
+ * events / callback on changes and no way to lock files.
+ *
+ * To avoid returning stale data when a file gets *opened* on our (the guest)
+ * side, we do a "stat" on the host side, then compare the mtime with the
+ * last known mtime and invalidate the page-cache if they differ.
+ * This is done from vboxsf_inode_revalidate().
+ *
+ * When reads are done through the read_iter fop, it is possible to do
+ * further cache revalidation then, there are 3 options to deal with this:
+ *
+ * 1) Rely solely on the revalidation done at open time
+ * 2) Do another "stat" and compare mtime again. Unfortunately the vboxsf
+ * host API does not allow stat on handles, so we would need to use
+ * file->f_path.dentry and the stat will then fail if the file was unlinked
+ * or renamed (and there is no thing like NFS' silly-rename). So we get:
+ * 2a) "stat" and compare mtime, on stat failure invalidate the cache
+ * 2b) "stat" and compare mtime, on stat failure do nothing
+ * 3) Simply always call invalidate_inode_pages2_range on the range of the read
+ *
+ * Currently we are keeping things KISS and using option 1. this allows
+ * directly using generic_file_read_iter without wrapping it.
+ *
+ * This means that only data written on the host side before open() on
+ * the guest side is guaranteed to be seen by the guest. If necessary
+ * we may provide other read-cache strategies in the future and make this
+ * configurable through a mount option.
+ */
+const struct file_operations vboxsf_reg_fops = {
+ .llseek = generic_file_llseek,
+ .read_iter = generic_file_read_iter,
+ .write_iter = generic_file_write_iter,
+ .mmap = vboxsf_file_mmap,
+ .open = vboxsf_file_open,
+ .release = vboxsf_file_release,
+ .fsync = noop_fsync,
+ .splice_read = generic_file_splice_read,
+};
+
+const struct inode_operations vboxsf_reg_iops = {
+ .getattr = vboxsf_getattr,
+ .setattr = vboxsf_setattr
+};
+
+static int vboxsf_readpage(struct file *file, struct page *page)
+{
+ struct vboxsf_handle *sf_handle = file->private_data;
+ loff_t off = page_offset(page);
+ u32 nread = PAGE_SIZE;
+ u8 *buf;
+ int err;
+
+ buf = kmap(page);
+
+ err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
+ if (err == 0) {
+ memset(&buf[nread], 0, PAGE_SIZE - nread);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ } else {
+ SetPageError(page);
+ }
+
+ kunmap(page);
+ unlock_page(page);
+ return err;
+}
+
+static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
+{
+ struct vboxsf_handle *h, *sf_handle = NULL;
+
+ mutex_lock(&sf_i->handle_list_mutex);
+ list_for_each_entry(h, &sf_i->handle_list, head) {
+ if (h->access_flags == SHFL_CF_ACCESS_WRITE ||
+ h->access_flags == SHFL_CF_ACCESS_READWRITE) {
+ kref_get(&h->refcount);
+ sf_handle = h;
+ break;
+ }
+ }
+ mutex_unlock(&sf_i->handle_list_mutex);
+
+ return sf_handle;
+}
+
+static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct inode *inode = page->mapping->host;
+ struct vboxsf_inode *sf_i = VBOXSF_I(inode);
+ struct vboxsf_handle *sf_handle;
+ loff_t off = page_offset(page);
+ loff_t size = i_size_read(inode);
+ u32 nwrite = PAGE_SIZE;
+ u8 *buf;
+ int err;
+
+ if (off + PAGE_SIZE > size)
+ nwrite = size & ~PAGE_MASK;
+
+ sf_handle = vboxsf_get_write_handle(sf_i);
+ if (!sf_handle)
+ return -EBADF;
+
+ buf = kmap(page);
+ err = vboxsf_write(sf_handle->root, sf_handle->handle,
+ off, &nwrite, buf);
+ kunmap(page);
+
+ kref_put(&sf_handle->refcount, vboxsf_handle_release);
+
+ if (err == 0) {
+ ClearPageError(page);
+ /* mtime changed */
+ sf_i->force_restat = 1;
+ } else {
+ ClearPageUptodate(page);
+ }
+
+ unlock_page(page);
+ return err;
+}
+
+static int vboxsf_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ struct vboxsf_handle *sf_handle = file->private_data;
+ unsigned int from = pos & ~PAGE_MASK;
+ u32 nwritten = len;
+ u8 *buf;
+ int err;
+
+ /* zero the stale part of the page if we did a short copy */
+ if (!PageUptodate(page) && copied < len)
+ zero_user(page, from + copied, len - copied);
+
+ buf = kmap(page);
+ err = vboxsf_write(sf_handle->root, sf_handle->handle,
+ pos, &nwritten, buf + from);
+ kunmap(page);
+
+ if (err) {
+ nwritten = 0;
+ goto out;
+ }
+
+ /* mtime changed */
+ VBOXSF_I(inode)->force_restat = 1;
+
+ if (!PageUptodate(page) && nwritten == PAGE_SIZE)
+ SetPageUptodate(page);
+
+ pos += nwritten;
+ if (pos > inode->i_size)
+ i_size_write(inode, pos);
+
+out:
+ unlock_page(page);
+ put_page(page);
+
+ return nwritten;
+}
+
+/*
+ * Note simple_write_begin does not read the page from disk on partial writes
+ * this is ok since vboxsf_write_end only writes the written parts of the
+ * page and it does not call SetPageUptodate for partial writes.
+ */
+const struct address_space_operations vboxsf_reg_aops = {
+ .readpage = vboxsf_readpage,
+ .writepage = vboxsf_writepage,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ .write_begin = simple_write_begin,
+ .write_end = vboxsf_write_end,
+};
+
+static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
+ struct shfl_string *path;
+ char *link;
+ int err;
+
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ path = vboxsf_path_from_dentry(sbi, dentry);
+ if (IS_ERR(path))
+ return ERR_CAST(path);
+
+ link = kzalloc(PATH_MAX, GFP_KERNEL);
+ if (!link) {
+ __putname(path);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = vboxsf_readlink(sbi->root, path, PATH_MAX, link);
+ __putname(path);
+ if (err) {
+ kfree(link);
+ return ERR_PTR(err);
+ }
+
+ set_delayed_call(done, kfree_link, link);
+ return link;
+}
+
+const struct inode_operations vboxsf_lnk_iops = {
+ .get_link = vboxsf_get_link
+};
diff --git a/fs/vboxsf/shfl_hostintf.h b/fs/vboxsf/shfl_hostintf.h
new file mode 100644
index 000000000000..aca829062c12
--- /dev/null
+++ b/fs/vboxsf/shfl_hostintf.h
@@ -0,0 +1,901 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * VirtualBox Shared Folders: host interface definition.
+ *
+ * Copyright (C) 2006-2018 Oracle Corporation
+ */
+
+#ifndef SHFL_HOSTINTF_H
+#define SHFL_HOSTINTF_H
+
+#include <linux/vbox_vmmdev_types.h>
+
+/* The max in/out buffer size for a FN_READ or FN_WRITE call */
+#define SHFL_MAX_RW_COUNT (16 * SZ_1M)
+
+/*
+ * Structures shared between guest and the service
+ * can be relocated and use offsets to point to variable
+ * length parts.
+ *
+ * Shared folders protocol works with handles.
+ * Before doing any action on a file system object,
+ * one have to obtain the object handle via a SHFL_FN_CREATE
+ * request. A handle must be closed with SHFL_FN_CLOSE.
+ */
+
+enum {
+ SHFL_FN_QUERY_MAPPINGS = 1, /* Query mappings changes. */
+ SHFL_FN_QUERY_MAP_NAME = 2, /* Query map name. */
+ SHFL_FN_CREATE = 3, /* Open/create object. */
+ SHFL_FN_CLOSE = 4, /* Close object handle. */
+ SHFL_FN_READ = 5, /* Read object content. */
+ SHFL_FN_WRITE = 6, /* Write new object content. */
+ SHFL_FN_LOCK = 7, /* Lock/unlock a range in the object. */
+ SHFL_FN_LIST = 8, /* List object content. */
+ SHFL_FN_INFORMATION = 9, /* Query/set object information. */
+ /* Note function number 10 is not used! */
+ SHFL_FN_REMOVE = 11, /* Remove object */
+ SHFL_FN_MAP_FOLDER_OLD = 12, /* Map folder (legacy) */
+ SHFL_FN_UNMAP_FOLDER = 13, /* Unmap folder */
+ SHFL_FN_RENAME = 14, /* Rename object */
+ SHFL_FN_FLUSH = 15, /* Flush file */
+ SHFL_FN_SET_UTF8 = 16, /* Select UTF8 filename encoding */
+ SHFL_FN_MAP_FOLDER = 17, /* Map folder */
+ SHFL_FN_READLINK = 18, /* Read symlink dest (as of VBox 4.0) */
+ SHFL_FN_SYMLINK = 19, /* Create symlink (as of VBox 4.0) */
+ SHFL_FN_SET_SYMLINKS = 20, /* Ask host to show symlinks (4.0+) */
+};
+
+/* Root handles for a mapping are of type u32, Root handles are unique. */
+#define SHFL_ROOT_NIL UINT_MAX
+
+/* Shared folders handle for an opened object are of type u64. */
+#define SHFL_HANDLE_NIL ULLONG_MAX
+
+/* Hardcoded maximum length (in chars) of a shared folder name. */
+#define SHFL_MAX_LEN (256)
+/* Hardcoded maximum number of shared folder mapping available to the guest. */
+#define SHFL_MAX_MAPPINGS (64)
+
+/** Shared folder string buffer structure. */
+struct shfl_string {
+ /** Allocated size of the string member in bytes. */
+ u16 size;
+
+ /** Length of string without trailing nul in bytes. */
+ u16 length;
+
+ /** UTF-8 or UTF-16 string. Nul terminated. */
+ union {
+ u8 utf8[2];
+ u16 utf16[1];
+ u16 ucs2[1]; /* misnomer, use utf16. */
+ } string;
+};
+VMMDEV_ASSERT_SIZE(shfl_string, 6);
+
+/* The size of shfl_string w/o the string part. */
+#define SHFLSTRING_HEADER_SIZE 4
+
+/* Calculate size of the string. */
+static inline u32 shfl_string_buf_size(const struct shfl_string *string)
+{
+ return string ? SHFLSTRING_HEADER_SIZE + string->size : 0;
+}
+
+/* Set user id on execution (S_ISUID). */
+#define SHFL_UNIX_ISUID 0004000U
+/* Set group id on execution (S_ISGID). */
+#define SHFL_UNIX_ISGID 0002000U
+/* Sticky bit (S_ISVTX / S_ISTXT). */
+#define SHFL_UNIX_ISTXT 0001000U
+
+/* Owner readable (S_IRUSR). */
+#define SHFL_UNIX_IRUSR 0000400U
+/* Owner writable (S_IWUSR). */
+#define SHFL_UNIX_IWUSR 0000200U
+/* Owner executable (S_IXUSR). */
+#define SHFL_UNIX_IXUSR 0000100U
+
+/* Group readable (S_IRGRP). */
+#define SHFL_UNIX_IRGRP 0000040U
+/* Group writable (S_IWGRP). */
+#define SHFL_UNIX_IWGRP 0000020U
+/* Group executable (S_IXGRP). */
+#define SHFL_UNIX_IXGRP 0000010U
+
+/* Other readable (S_IROTH). */
+#define SHFL_UNIX_IROTH 0000004U
+/* Other writable (S_IWOTH). */
+#define SHFL_UNIX_IWOTH 0000002U
+/* Other executable (S_IXOTH). */
+#define SHFL_UNIX_IXOTH 0000001U
+
+/* Named pipe (fifo) (S_IFIFO). */
+#define SHFL_TYPE_FIFO 0010000U
+/* Character device (S_IFCHR). */
+#define SHFL_TYPE_DEV_CHAR 0020000U
+/* Directory (S_IFDIR). */
+#define SHFL_TYPE_DIRECTORY 0040000U
+/* Block device (S_IFBLK). */
+#define SHFL_TYPE_DEV_BLOCK 0060000U
+/* Regular file (S_IFREG). */
+#define SHFL_TYPE_FILE 0100000U
+/* Symbolic link (S_IFLNK). */
+#define SHFL_TYPE_SYMLINK 0120000U
+/* Socket (S_IFSOCK). */
+#define SHFL_TYPE_SOCKET 0140000U
+/* Whiteout (S_IFWHT). */
+#define SHFL_TYPE_WHITEOUT 0160000U
+/* Type mask (S_IFMT). */
+#define SHFL_TYPE_MASK 0170000U
+
+/* Checks the mode flags indicate a directory (S_ISDIR). */
+#define SHFL_IS_DIRECTORY(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_DIRECTORY)
+/* Checks the mode flags indicate a symbolic link (S_ISLNK). */
+#define SHFL_IS_SYMLINK(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_SYMLINK)
+
+/** The available additional information in a shfl_fsobjattr object. */
+enum shfl_fsobjattr_add {
+ /** No additional information is available / requested. */
+ SHFLFSOBJATTRADD_NOTHING = 1,
+ /**
+ * The additional unix attributes (shfl_fsobjattr::u::unix_attr) are
+ * available / requested.
+ */
+ SHFLFSOBJATTRADD_UNIX,
+ /**
+ * The additional extended attribute size (shfl_fsobjattr::u::size) is
+ * available / requested.
+ */
+ SHFLFSOBJATTRADD_EASIZE,
+ /**
+ * The last valid item (inclusive).
+ * The valid range is SHFLFSOBJATTRADD_NOTHING thru
+ * SHFLFSOBJATTRADD_LAST.
+ */
+ SHFLFSOBJATTRADD_LAST = SHFLFSOBJATTRADD_EASIZE,
+
+ /** The usual 32-bit hack. */
+ SHFLFSOBJATTRADD_32BIT_SIZE_HACK = 0x7fffffff
+};
+
+/**
+ * Additional unix Attributes, these are available when
+ * shfl_fsobjattr.additional == SHFLFSOBJATTRADD_UNIX.
+ */
+struct shfl_fsobjattr_unix {
+ /**
+ * The user owning the filesystem object (st_uid).
+ * This field is ~0U if not supported.
+ */
+ u32 uid;
+
+ /**
+ * The group the filesystem object is assigned (st_gid).
+ * This field is ~0U if not supported.
+ */
+ u32 gid;
+
+ /**
+ * Number of hard links to this filesystem object (st_nlink).
+ * This field is 1 if the filesystem doesn't support hardlinking or
+ * the information isn't available.
+ */
+ u32 hardlinks;
+
+ /**
+ * The device number of the device which this filesystem object resides
+ * on (st_dev). This field is 0 if this information is not available.
+ */
+ u32 inode_id_device;
+
+ /**
+ * The unique identifier (within the filesystem) of this filesystem
+ * object (st_ino). Together with inode_id_device, this field can be
+ * used as a OS wide unique id, when both their values are not 0.
+ * This field is 0 if the information is not available.
+ */
+ u64 inode_id;
+
+ /**
+ * User flags (st_flags).
+ * This field is 0 if this information is not available.
+ */
+ u32 flags;
+
+ /**
+ * The current generation number (st_gen).
+ * This field is 0 if this information is not available.
+ */
+ u32 generation_id;
+
+ /**
+ * The device number of a char. or block device type object (st_rdev).
+ * This field is 0 if the file isn't a char. or block device or when
+ * the OS doesn't use the major+minor device idenfication scheme.
+ */
+ u32 device;
+} __packed;
+
+/** Extended attribute size. */
+struct shfl_fsobjattr_easize {
+ /** Size of EAs. */
+ s64 cb;
+} __packed;
+
+/** Shared folder filesystem object attributes. */
+struct shfl_fsobjattr {
+ /** Mode flags (st_mode). SHFL_UNIX_*, SHFL_TYPE_*, and SHFL_DOS_*. */
+ u32 mode;
+
+ /** The additional attributes available. */
+ enum shfl_fsobjattr_add additional;
+
+ /**
+ * Additional attributes.
+ *
+ * Unless explicitly specified to an API, the API can provide additional
+ * data as it is provided by the underlying OS.
+ */
+ union {
+ struct shfl_fsobjattr_unix unix_attr;
+ struct shfl_fsobjattr_easize size;
+ } __packed u;
+} __packed;
+VMMDEV_ASSERT_SIZE(shfl_fsobjattr, 44);
+
+struct shfl_timespec {
+ s64 ns_relative_to_unix_epoch;
+};
+
+/** Filesystem object information structure. */
+struct shfl_fsobjinfo {
+ /**
+ * Logical size (st_size).
+ * For normal files this is the size of the file.
+ * For symbolic links, this is the length of the path name contained
+ * in the symbolic link.
+ * For other objects this fields needs to be specified.
+ */
+ s64 size;
+
+ /** Disk allocation size (st_blocks * DEV_BSIZE). */
+ s64 allocated;
+
+ /** Time of last access (st_atime). */
+ struct shfl_timespec access_time;
+
+ /** Time of last data modification (st_mtime). */
+ struct shfl_timespec modification_time;
+
+ /**
+ * Time of last status change (st_ctime).
+ * If not available this is set to modification_time.
+ */
+ struct shfl_timespec change_time;
+
+ /**
+ * Time of file birth (st_birthtime).
+ * If not available this is set to change_time.
+ */
+ struct shfl_timespec birth_time;
+
+ /** Attributes. */
+ struct shfl_fsobjattr attr;
+
+} __packed;
+VMMDEV_ASSERT_SIZE(shfl_fsobjinfo, 92);
+
+/**
+ * result of an open/create request.
+ * Along with handle value the result code
+ * identifies what has happened while
+ * trying to open the object.
+ */
+enum shfl_create_result {
+ SHFL_NO_RESULT,
+ /** Specified path does not exist. */
+ SHFL_PATH_NOT_FOUND,
+ /** Path to file exists, but the last component does not. */
+ SHFL_FILE_NOT_FOUND,
+ /** File already exists and either has been opened or not. */
+ SHFL_FILE_EXISTS,
+ /** New file was created. */
+ SHFL_FILE_CREATED,
+ /** Existing file was replaced or overwritten. */
+ SHFL_FILE_REPLACED
+};
+
+/* No flags. Initialization value. */
+#define SHFL_CF_NONE (0x00000000)
+
+/*
+ * Only lookup the object, do not return a handle. When this is set all other
+ * flags are ignored.
+ */
+#define SHFL_CF_LOOKUP (0x00000001)
+
+/*
+ * Open parent directory of specified object.
+ * Useful for the corresponding Windows FSD flag
+ * and for opening paths like \\dir\\*.* to search the 'dir'.
+ */
+#define SHFL_CF_OPEN_TARGET_DIRECTORY (0x00000002)
+
+/* Create/open a directory. */
+#define SHFL_CF_DIRECTORY (0x00000004)
+
+/*
+ * Open/create action to do if object exists
+ * and if the object does not exists.
+ * REPLACE file means atomically DELETE and CREATE.
+ * OVERWRITE file means truncating the file to 0 and
+ * setting new size.
+ * When opening an existing directory REPLACE and OVERWRITE
+ * actions are considered invalid, and cause returning
+ * FILE_EXISTS with NIL handle.
+ */
+#define SHFL_CF_ACT_MASK_IF_EXISTS (0x000000f0)
+#define SHFL_CF_ACT_MASK_IF_NEW (0x00000f00)
+
+/* What to do if object exists. */
+#define SHFL_CF_ACT_OPEN_IF_EXISTS (0x00000000)
+#define SHFL_CF_ACT_FAIL_IF_EXISTS (0x00000010)
+#define SHFL_CF_ACT_REPLACE_IF_EXISTS (0x00000020)
+#define SHFL_CF_ACT_OVERWRITE_IF_EXISTS (0x00000030)
+
+/* What to do if object does not exist. */
+#define SHFL_CF_ACT_CREATE_IF_NEW (0x00000000)
+#define SHFL_CF_ACT_FAIL_IF_NEW (0x00000100)
+
+/* Read/write requested access for the object. */
+#define SHFL_CF_ACCESS_MASK_RW (0x00003000)
+
+/* No access requested. */
+#define SHFL_CF_ACCESS_NONE (0x00000000)
+/* Read access requested. */
+#define SHFL_CF_ACCESS_READ (0x00001000)
+/* Write access requested. */
+#define SHFL_CF_ACCESS_WRITE (0x00002000)
+/* Read/Write access requested. */
+#define SHFL_CF_ACCESS_READWRITE (0x00003000)
+
+/* Requested share access for the object. */
+#define SHFL_CF_ACCESS_MASK_DENY (0x0000c000)
+
+/* Allow any access. */
+#define SHFL_CF_ACCESS_DENYNONE (0x00000000)
+/* Do not allow read. */
+#define SHFL_CF_ACCESS_DENYREAD (0x00004000)
+/* Do not allow write. */
+#define SHFL_CF_ACCESS_DENYWRITE (0x00008000)
+/* Do not allow access. */
+#define SHFL_CF_ACCESS_DENYALL (0x0000c000)
+
+/* Requested access to attributes of the object. */
+#define SHFL_CF_ACCESS_MASK_ATTR (0x00030000)
+
+/* No access requested. */
+#define SHFL_CF_ACCESS_ATTR_NONE (0x00000000)
+/* Read access requested. */
+#define SHFL_CF_ACCESS_ATTR_READ (0x00010000)
+/* Write access requested. */
+#define SHFL_CF_ACCESS_ATTR_WRITE (0x00020000)
+/* Read/Write access requested. */
+#define SHFL_CF_ACCESS_ATTR_READWRITE (0x00030000)
+
+/*
+ * The file is opened in append mode.
+ * Ignored if SHFL_CF_ACCESS_WRITE is not set.
+ */
+#define SHFL_CF_ACCESS_APPEND (0x00040000)
+
+/** Create parameters buffer struct for SHFL_FN_CREATE call */
+struct shfl_createparms {
+ /** Returned handle of opened object. */
+ u64 handle;
+
+ /** Returned result of the operation */
+ enum shfl_create_result result;
+
+ /** SHFL_CF_* */
+ u32 create_flags;
+
+ /**
+ * Attributes of object to create and
+ * returned actual attributes of opened/created object.
+ */
+ struct shfl_fsobjinfo info;
+} __packed;
+
+/** Shared Folder directory information */
+struct shfl_dirinfo {
+ /** Full information about the object. */
+ struct shfl_fsobjinfo info;
+ /**
+ * The length of the short field (number of UTF16 chars).
+ * It is 16-bit for reasons of alignment.
+ */
+ u16 short_name_len;
+ /**
+ * The short name for 8.3 compatibility.
+ * Empty string if not available.
+ */
+ u16 short_name[14];
+ struct shfl_string name;
+};
+
+/** Shared folder filesystem properties. */
+struct shfl_fsproperties {
+ /**
+ * The maximum size of a filesystem object name.
+ * This does not include the '\\0'.
+ */
+ u32 max_component_len;
+
+ /**
+ * True if the filesystem is remote.
+ * False if the filesystem is local.
+ */
+ bool remote;
+
+ /**
+ * True if the filesystem is case sensitive.
+ * False if the filesystem is case insensitive.
+ */
+ bool case_sensitive;
+
+ /**
+ * True if the filesystem is mounted read only.
+ * False if the filesystem is mounted read write.
+ */
+ bool read_only;
+
+ /**
+ * True if the filesystem can encode unicode object names.
+ * False if it can't.
+ */
+ bool supports_unicode;
+
+ /**
+ * True if the filesystem is compresses.
+ * False if it isn't or we don't know.
+ */
+ bool compressed;
+
+ /**
+ * True if the filesystem compresses of individual files.
+ * False if it doesn't or we don't know.
+ */
+ bool file_compression;
+};
+VMMDEV_ASSERT_SIZE(shfl_fsproperties, 12);
+
+struct shfl_volinfo {
+ s64 total_allocation_bytes;
+ s64 available_allocation_bytes;
+ u32 bytes_per_allocation_unit;
+ u32 bytes_per_sector;
+ u32 serial;
+ struct shfl_fsproperties properties;
+};
+
+
+/** SHFL_FN_MAP_FOLDER Parameters structure. */
+struct shfl_map_folder {
+ /**
+ * pointer, in:
+ * Points to struct shfl_string buffer.
+ */
+ struct vmmdev_hgcm_function_parameter path;
+
+ /**
+ * pointer, out: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * pointer, in: UTF16
+ * Path delimiter
+ */
+ struct vmmdev_hgcm_function_parameter delimiter;
+
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Case senstive flag
+ */
+ struct vmmdev_hgcm_function_parameter case_sensitive;
+
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_MAP_FOLDER (4)
+
+
+/** SHFL_FN_UNMAP_FOLDER Parameters structure. */
+struct shfl_unmap_folder {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_UNMAP_FOLDER (1)
+
+
+/** SHFL_FN_CREATE Parameters structure. */
+struct shfl_create {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * pointer, in:
+ * Points to struct shfl_string buffer.
+ */
+ struct vmmdev_hgcm_function_parameter path;
+
+ /**
+ * pointer, in/out:
+ * Points to struct shfl_createparms buffer.
+ */
+ struct vmmdev_hgcm_function_parameter parms;
+
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_CREATE (3)
+
+
+/** SHFL_FN_CLOSE Parameters structure. */
+struct shfl_close {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE (u64) of object to close.
+ */
+ struct vmmdev_hgcm_function_parameter handle;
+
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_CLOSE (2)
+
+
+/** SHFL_FN_READ Parameters structure. */
+struct shfl_read {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE (u64) of object to read from.
+ */
+ struct vmmdev_hgcm_function_parameter handle;
+
+ /**
+ * value64, in:
+ * Offset to read from.
+ */
+ struct vmmdev_hgcm_function_parameter offset;
+
+ /**
+ * value64, in/out:
+ * Bytes to read/How many were read.
+ */
+ struct vmmdev_hgcm_function_parameter cb;
+
+ /**
+ * pointer, out:
+ * Buffer to place data to.
+ */
+ struct vmmdev_hgcm_function_parameter buffer;
+
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_READ (5)
+
+
+/** SHFL_FN_WRITE Parameters structure. */
+struct shfl_write {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE (u64) of object to write to.
+ */
+ struct vmmdev_hgcm_function_parameter handle;
+
+ /**
+ * value64, in:
+ * Offset to write to.
+ */
+ struct vmmdev_hgcm_function_parameter offset;
+
+ /**
+ * value64, in/out:
+ * Bytes to write/How many were written.
+ */
+ struct vmmdev_hgcm_function_parameter cb;
+
+ /**
+ * pointer, in:
+ * Data to write.
+ */
+ struct vmmdev_hgcm_function_parameter buffer;
+
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_WRITE (5)
+
+
+/*
+ * SHFL_FN_LIST
+ * Listing information includes variable length RTDIRENTRY[EX] structures.
+ */
+
+#define SHFL_LIST_NONE 0
+#define SHFL_LIST_RETURN_ONE 1
+
+/** SHFL_FN_LIST Parameters structure. */
+struct shfl_list {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE (u64) of object to be listed.
+ */
+ struct vmmdev_hgcm_function_parameter handle;
+
+ /**
+ * value32, in:
+ * List flags SHFL_LIST_*.
+ */
+ struct vmmdev_hgcm_function_parameter flags;
+
+ /**
+ * value32, in/out:
+ * Bytes to be used for listing information/How many bytes were used.
+ */
+ struct vmmdev_hgcm_function_parameter cb;
+
+ /**
+ * pointer, in/optional
+ * Points to struct shfl_string buffer that specifies a search path.
+ */
+ struct vmmdev_hgcm_function_parameter path;
+
+ /**
+ * pointer, out:
+ * Buffer to place listing information to. (struct shfl_dirinfo)
+ */
+ struct vmmdev_hgcm_function_parameter buffer;
+
+ /**
+ * value32, in/out:
+ * Indicates a key where the listing must be resumed.
+ * in: 0 means start from begin of object.
+ * out: 0 means listing completed.
+ */
+ struct vmmdev_hgcm_function_parameter resume_point;
+
+ /**
+ * pointer, out:
+ * Number of files returned
+ */
+ struct vmmdev_hgcm_function_parameter file_count;
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_LIST (8)
+
+
+/** SHFL_FN_READLINK Parameters structure. */
+struct shfl_readLink {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * pointer, in:
+ * Points to struct shfl_string buffer.
+ */
+ struct vmmdev_hgcm_function_parameter path;
+
+ /**
+ * pointer, out:
+ * Buffer to place data to.
+ */
+ struct vmmdev_hgcm_function_parameter buffer;
+
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_READLINK (3)
+
+
+/* SHFL_FN_INFORMATION */
+
+/* Mask of Set/Get bit. */
+#define SHFL_INFO_MODE_MASK (0x1)
+/* Get information */
+#define SHFL_INFO_GET (0x0)
+/* Set information */
+#define SHFL_INFO_SET (0x1)
+
+/* Get name of the object. */
+#define SHFL_INFO_NAME (0x2)
+/* Set size of object (extend/trucate); only applies to file objects */
+#define SHFL_INFO_SIZE (0x4)
+/* Get/Set file object info. */
+#define SHFL_INFO_FILE (0x8)
+/* Get volume information. */
+#define SHFL_INFO_VOLUME (0x10)
+
+/** SHFL_FN_INFORMATION Parameters structure. */
+struct shfl_information {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * value64, in:
+ * SHFLHANDLE (u64) of object to be listed.
+ */
+ struct vmmdev_hgcm_function_parameter handle;
+
+ /**
+ * value32, in:
+ * SHFL_INFO_*
+ */
+ struct vmmdev_hgcm_function_parameter flags;
+
+ /**
+ * value32, in/out:
+ * Bytes to be used for information/How many bytes were used.
+ */
+ struct vmmdev_hgcm_function_parameter cb;
+
+ /**
+ * pointer, in/out:
+ * Information to be set/get (shfl_fsobjinfo or shfl_string). Do not
+ * forget to set the shfl_fsobjinfo::attr::additional for a get
+ * operation as well.
+ */
+ struct vmmdev_hgcm_function_parameter info;
+
+};
+
+/* Number of parameters */
+#define SHFL_CPARMS_INFORMATION (5)
+
+
+/* SHFL_FN_REMOVE */
+
+#define SHFL_REMOVE_FILE (0x1)
+#define SHFL_REMOVE_DIR (0x2)
+#define SHFL_REMOVE_SYMLINK (0x4)
+
+/** SHFL_FN_REMOVE Parameters structure. */
+struct shfl_remove {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * pointer, in:
+ * Points to struct shfl_string buffer.
+ */
+ struct vmmdev_hgcm_function_parameter path;
+
+ /**
+ * value32, in:
+ * remove flags (file/directory)
+ */
+ struct vmmdev_hgcm_function_parameter flags;
+
+};
+
+#define SHFL_CPARMS_REMOVE (3)
+
+
+/* SHFL_FN_RENAME */
+
+#define SHFL_RENAME_FILE (0x1)
+#define SHFL_RENAME_DIR (0x2)
+#define SHFL_RENAME_REPLACE_IF_EXISTS (0x4)
+
+/** SHFL_FN_RENAME Parameters structure. */
+struct shfl_rename {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * pointer, in:
+ * Points to struct shfl_string src.
+ */
+ struct vmmdev_hgcm_function_parameter src;
+
+ /**
+ * pointer, in:
+ * Points to struct shfl_string dest.
+ */
+ struct vmmdev_hgcm_function_parameter dest;
+
+ /**
+ * value32, in:
+ * rename flags (file/directory)
+ */
+ struct vmmdev_hgcm_function_parameter flags;
+
+};
+
+#define SHFL_CPARMS_RENAME (4)
+
+
+/** SHFL_FN_SYMLINK Parameters structure. */
+struct shfl_symlink {
+ /**
+ * pointer, in: SHFLROOT (u32)
+ * Root handle of the mapping which name is queried.
+ */
+ struct vmmdev_hgcm_function_parameter root;
+
+ /**
+ * pointer, in:
+ * Points to struct shfl_string of path for the new symlink.
+ */
+ struct vmmdev_hgcm_function_parameter new_path;
+
+ /**
+ * pointer, in:
+ * Points to struct shfl_string of destination for symlink.
+ */
+ struct vmmdev_hgcm_function_parameter old_path;
+
+ /**
+ * pointer, out:
+ * Information about created symlink.
+ */
+ struct vmmdev_hgcm_function_parameter info;
+
+};
+
+#define SHFL_CPARMS_SYMLINK (4)
+
+#endif
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
new file mode 100644
index 000000000000..675e26989376
--- /dev/null
+++ b/fs/vboxsf/super.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: MIT
+/*
+ * VirtualBox Guest Shared Folders support: Virtual File System.
+ *
+ * Module initialization/finalization
+ * File system registration/deregistration
+ * Superblock reading
+ * Few utility functions
+ *
+ * Copyright (C) 2006-2018 Oracle Corporation
+ */
+
+#include <linux/idr.h>
+#include <linux/fs_parser.h>
+#include <linux/magic.h>
+#include <linux/module.h>
+#include <linux/nls.h>
+#include <linux/statfs.h>
+#include <linux/vbox_utils.h>
+#include "vfsmod.h"
+
+#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
+
+#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
+#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
+#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
+#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
+
+static int follow_symlinks;
+module_param(follow_symlinks, int, 0444);
+MODULE_PARM_DESC(follow_symlinks,
+ "Let host resolve symlinks rather than showing them");
+
+static DEFINE_IDA(vboxsf_bdi_ida);
+static DEFINE_MUTEX(vboxsf_setup_mutex);
+static bool vboxsf_setup_done;
+static struct super_operations vboxsf_super_ops; /* forward declaration */
+static struct kmem_cache *vboxsf_inode_cachep;
+
+static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT;
+
+enum { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode,
+ opt_dmask, opt_fmask };
+
+static const struct fs_parameter_spec vboxsf_fs_parameters[] = {
+ fsparam_string ("nls", opt_nls),
+ fsparam_u32 ("uid", opt_uid),
+ fsparam_u32 ("gid", opt_gid),
+ fsparam_u32 ("ttl", opt_ttl),
+ fsparam_u32oct ("dmode", opt_dmode),
+ fsparam_u32oct ("fmode", opt_fmode),
+ fsparam_u32oct ("dmask", opt_dmask),
+ fsparam_u32oct ("fmask", opt_fmask),
+ {}
+};
+
+static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct vboxsf_fs_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ kuid_t uid;
+ kgid_t gid;
+ int opt;
+
+ opt = fs_parse(fc, vboxsf_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case opt_nls:
+ if (ctx->nls_name || fc->purpose != FS_CONTEXT_FOR_MOUNT) {
+ vbg_err("vboxsf: Cannot reconfigure nls option\n");
+ return -EINVAL;
+ }
+ ctx->nls_name = param->string;
+ param->string = NULL;
+ break;
+ case opt_uid:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ return -EINVAL;
+ ctx->o.uid = uid;
+ break;
+ case opt_gid:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ return -EINVAL;
+ ctx->o.gid = gid;
+ break;
+ case opt_ttl:
+ ctx->o.ttl = msecs_to_jiffies(result.uint_32);
+ break;
+ case opt_dmode:
+ if (result.uint_32 & ~0777)
+ return -EINVAL;
+ ctx->o.dmode = result.uint_32;
+ ctx->o.dmode_set = true;
+ break;
+ case opt_fmode:
+ if (result.uint_32 & ~0777)
+ return -EINVAL;
+ ctx->o.fmode = result.uint_32;
+ ctx->o.fmode_set = true;
+ break;
+ case opt_dmask:
+ if (result.uint_32 & ~07777)
+ return -EINVAL;
+ ctx->o.dmask = result.uint_32;
+ break;
+ case opt_fmask:
+ if (result.uint_32 & ~07777)
+ return -EINVAL;
+ ctx->o.fmask = result.uint_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct vboxsf_fs_context *ctx = fc->fs_private;
+ struct shfl_string *folder_name, root_path;
+ struct vboxsf_sbi *sbi;
+ struct dentry *droot;
+ struct inode *iroot;
+ char *nls_name;
+ size_t size;
+ int err;
+
+ if (!fc->source)
+ return -EINVAL;
+
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ sbi->o = ctx->o;
+ idr_init(&sbi->ino_idr);
+ spin_lock_init(&sbi->ino_idr_lock);
+ sbi->next_generation = 1;
+ sbi->bdi_id = -1;
+
+ /* Load nls if not utf8 */
+ nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls;
+ if (strcmp(nls_name, "utf8") != 0) {
+ if (nls_name == vboxsf_default_nls)
+ sbi->nls = load_nls_default();
+ else
+ sbi->nls = load_nls(nls_name);
+
+ if (!sbi->nls) {
+ vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
+ err = -EINVAL;
+ goto fail_free;
+ }
+ }
+
+ sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL);
+ if (sbi->bdi_id < 0) {
+ err = sbi->bdi_id;
+ goto fail_free;
+ }
+
+ err = super_setup_bdi_name(sb, "vboxsf-%s.%d", fc->source, sbi->bdi_id);
+ if (err)
+ goto fail_free;
+
+ /* Turn source into a shfl_string and map the folder */
+ size = strlen(fc->source) + 1;
+ folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL);
+ if (!folder_name) {
+ err = -ENOMEM;
+ goto fail_free;
+ }
+ folder_name->size = size;
+ folder_name->length = size - 1;
+ strlcpy(folder_name->string.utf8, fc->source, size);
+ err = vboxsf_map_folder(folder_name, &sbi->root);
+ kfree(folder_name);
+ if (err) {
+ vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n",
+ fc->source, err);
+ goto fail_free;
+ }
+
+ root_path.length = 1;
+ root_path.size = 2;
+ root_path.string.utf8[0] = '/';
+ root_path.string.utf8[1] = 0;
+ err = vboxsf_stat(sbi, &root_path, &sbi->root_info);
+ if (err)
+ goto fail_unmap;
+
+ sb->s_magic = VBOXSF_SUPER_MAGIC;
+ sb->s_blocksize = 1024;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_op = &vboxsf_super_ops;
+ sb->s_d_op = &vboxsf_dentry_ops;
+
+ iroot = iget_locked(sb, 0);
+ if (!iroot) {
+ err = -ENOMEM;
+ goto fail_unmap;
+ }
+ vboxsf_init_inode(sbi, iroot, &sbi->root_info);
+ unlock_new_inode(iroot);
+
+ droot = d_make_root(iroot);
+ if (!droot) {
+ err = -ENOMEM;
+ goto fail_unmap;
+ }
+
+ sb->s_root = droot;
+ sb->s_fs_info = sbi;
+ return 0;
+
+fail_unmap:
+ vboxsf_unmap_folder(sbi->root);
+fail_free:
+ if (sbi->bdi_id >= 0)
+ ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
+ if (sbi->nls)
+ unload_nls(sbi->nls);
+ idr_destroy(&sbi->ino_idr);
+ kfree(sbi);
+ return err;
+}
+
+static void vboxsf_inode_init_once(void *data)
+{
+ struct vboxsf_inode *sf_i = data;
+
+ mutex_init(&sf_i->handle_list_mutex);
+ inode_init_once(&sf_i->vfs_inode);
+}
+
+static struct inode *vboxsf_alloc_inode(struct super_block *sb)
+{
+ struct vboxsf_inode *sf_i;
+
+ sf_i = kmem_cache_alloc(vboxsf_inode_cachep, GFP_NOFS);
+ if (!sf_i)
+ return NULL;
+
+ sf_i->force_restat = 0;
+ INIT_LIST_HEAD(&sf_i->handle_list);
+
+ return &sf_i->vfs_inode;
+}
+
+static void vboxsf_free_inode(struct inode *inode)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->ino_idr_lock, flags);
+ idr_remove(&sbi->ino_idr, inode->i_ino);
+ spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
+ kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode));
+}
+
+static void vboxsf_put_super(struct super_block *sb)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
+
+ vboxsf_unmap_folder(sbi->root);
+ if (sbi->bdi_id >= 0)
+ ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
+ if (sbi->nls)
+ unload_nls(sbi->nls);
+
+ /*
+ * vboxsf_free_inode uses the idr, make sure all delayed rcu free
+ * inodes are flushed.
+ */
+ rcu_barrier();
+ idr_destroy(&sbi->ino_idr);
+ kfree(sbi);
+}
+
+static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct shfl_volinfo shfl_volinfo;
+ struct vboxsf_sbi *sbi;
+ u32 buf_len;
+ int err;
+
+ sbi = VBOXSF_SBI(sb);
+ buf_len = sizeof(shfl_volinfo);
+ err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME,
+ &buf_len, &shfl_volinfo);
+ if (err)
+ return err;
+
+ stat->f_type = VBOXSF_SUPER_MAGIC;
+ stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit;
+
+ do_div(shfl_volinfo.total_allocation_bytes,
+ shfl_volinfo.bytes_per_allocation_unit);
+ stat->f_blocks = shfl_volinfo.total_allocation_bytes;
+
+ do_div(shfl_volinfo.available_allocation_bytes,
+ shfl_volinfo.bytes_per_allocation_unit);
+ stat->f_bfree = shfl_volinfo.available_allocation_bytes;
+ stat->f_bavail = shfl_volinfo.available_allocation_bytes;
+
+ stat->f_files = 1000;
+ /*
+ * Don't return 0 here since the guest may then think that it is not
+ * possible to create any more files.
+ */
+ stat->f_ffree = 1000000;
+ stat->f_fsid.val[0] = 0;
+ stat->f_fsid.val[1] = 0;
+ stat->f_namelen = 255;
+ return 0;
+}
+
+static struct super_operations vboxsf_super_ops = {
+ .alloc_inode = vboxsf_alloc_inode,
+ .free_inode = vboxsf_free_inode,
+ .put_super = vboxsf_put_super,
+ .statfs = vboxsf_statfs,
+};
+
+static int vboxsf_setup(void)
+{
+ int err;
+
+ mutex_lock(&vboxsf_setup_mutex);
+
+ if (vboxsf_setup_done)
+ goto success;
+
+ vboxsf_inode_cachep =
+ kmem_cache_create("vboxsf_inode_cache",
+ sizeof(struct vboxsf_inode), 0,
+ (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
+ SLAB_ACCOUNT),
+ vboxsf_inode_init_once);
+ if (!vboxsf_inode_cachep) {
+ err = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ err = vboxsf_connect();
+ if (err) {
+ vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err);
+ vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n");
+ vbg_err("vboxsf: and check dmesg for vboxguest errors\n");
+ goto fail_free_cache;
+ }
+
+ err = vboxsf_set_utf8();
+ if (err) {
+ vbg_err("vboxsf_setutf8 error %d\n", err);
+ goto fail_disconnect;
+ }
+
+ if (!follow_symlinks) {
+ err = vboxsf_set_symlinks();
+ if (err)
+ vbg_warn("vboxsf: Unable to show symlinks: %d\n", err);
+ }
+
+ vboxsf_setup_done = true;
+success:
+ mutex_unlock(&vboxsf_setup_mutex);
+ return 0;
+
+fail_disconnect:
+ vboxsf_disconnect();
+fail_free_cache:
+ kmem_cache_destroy(vboxsf_inode_cachep);
+fail_nomem:
+ mutex_unlock(&vboxsf_setup_mutex);
+ return err;
+}
+
+static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
+{
+ char *options = data;
+
+ if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
+ options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
+ options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
+ options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
+ vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
+ return -EINVAL;
+ }
+
+ return generic_parse_monolithic(fc, data);
+}
+
+static int vboxsf_get_tree(struct fs_context *fc)
+{
+ int err;
+
+ err = vboxsf_setup();
+ if (err)
+ return err;
+
+ return get_tree_nodev(fc, vboxsf_fill_super);
+}
+
+static int vboxsf_reconfigure(struct fs_context *fc)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb);
+ struct vboxsf_fs_context *ctx = fc->fs_private;
+ struct inode *iroot = fc->root->d_sb->s_root->d_inode;
+
+ /* Apply changed options to the root inode */
+ sbi->o = ctx->o;
+ vboxsf_init_inode(sbi, iroot, &sbi->root_info);
+
+ return 0;
+}
+
+static void vboxsf_free_fc(struct fs_context *fc)
+{
+ struct vboxsf_fs_context *ctx = fc->fs_private;
+
+ kfree(ctx->nls_name);
+ kfree(ctx);
+}
+
+static const struct fs_context_operations vboxsf_context_ops = {
+ .free = vboxsf_free_fc,
+ .parse_param = vboxsf_parse_param,
+ .parse_monolithic = vboxsf_parse_monolithic,
+ .get_tree = vboxsf_get_tree,
+ .reconfigure = vboxsf_reconfigure,
+};
+
+static int vboxsf_init_fs_context(struct fs_context *fc)
+{
+ struct vboxsf_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ current_uid_gid(&ctx->o.uid, &ctx->o.gid);
+
+ fc->fs_private = ctx;
+ fc->ops = &vboxsf_context_ops;
+ return 0;
+}
+
+static struct file_system_type vboxsf_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "vboxsf",
+ .init_fs_context = vboxsf_init_fs_context,
+ .kill_sb = kill_anon_super
+};
+
+/* Module initialization/finalization handlers */
+static int __init vboxsf_init(void)
+{
+ return register_filesystem(&vboxsf_fs_type);
+}
+
+static void __exit vboxsf_fini(void)
+{
+ unregister_filesystem(&vboxsf_fs_type);
+
+ mutex_lock(&vboxsf_setup_mutex);
+ if (vboxsf_setup_done) {
+ vboxsf_disconnect();
+ /*
+ * Make sure all delayed rcu free inodes are flushed
+ * before we destroy the cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(vboxsf_inode_cachep);
+ }
+ mutex_unlock(&vboxsf_setup_mutex);
+}
+
+module_init(vboxsf_init);
+module_exit(vboxsf_fini);
+
+MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access");
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_FS("vboxsf");
diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c
new file mode 100644
index 000000000000..96bd160da48b
--- /dev/null
+++ b/fs/vboxsf/utils.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: MIT
+/*
+ * VirtualBox Guest Shared Folders support: Utility functions.
+ * Mainly conversion from/to VirtualBox/Linux data structures.
+ *
+ * Copyright (C) 2006-2018 Oracle Corporation
+ */
+
+#include <linux/namei.h>
+#include <linux/nls.h>
+#include <linux/sizes.h>
+#include <linux/vfs.h>
+#include "vfsmod.h"
+
+struct inode *vboxsf_new_inode(struct super_block *sb)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
+ struct inode *inode;
+ unsigned long flags;
+ int cursor, ret;
+ u32 gen;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&sbi->ino_idr_lock, flags);
+ cursor = idr_get_cursor(&sbi->ino_idr);
+ ret = idr_alloc_cyclic(&sbi->ino_idr, inode, 1, 0, GFP_ATOMIC);
+ if (ret >= 0 && ret < cursor)
+ sbi->next_generation++;
+ gen = sbi->next_generation;
+ spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
+ idr_preload_end();
+
+ if (ret < 0) {
+ iput(inode);
+ return ERR_PTR(ret);
+ }
+
+ inode->i_ino = ret;
+ inode->i_generation = gen;
+ return inode;
+}
+
+/* set [inode] attributes based on [info], uid/gid based on [sbi] */
+void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
+ const struct shfl_fsobjinfo *info)
+{
+ const struct shfl_fsobjattr *attr;
+ s64 allocated;
+ int mode;
+
+ attr = &info->attr;
+
+#define mode_set(r) ((attr->mode & (SHFL_UNIX_##r)) ? (S_##r) : 0)
+
+ mode = mode_set(IRUSR);
+ mode |= mode_set(IWUSR);
+ mode |= mode_set(IXUSR);
+
+ mode |= mode_set(IRGRP);
+ mode |= mode_set(IWGRP);
+ mode |= mode_set(IXGRP);
+
+ mode |= mode_set(IROTH);
+ mode |= mode_set(IWOTH);
+ mode |= mode_set(IXOTH);
+
+#undef mode_set
+
+ /* We use the host-side values for these */
+ inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ inode->i_mapping->a_ops = &vboxsf_reg_aops;
+
+ if (SHFL_IS_DIRECTORY(attr->mode)) {
+ inode->i_mode = sbi->o.dmode_set ? sbi->o.dmode : mode;
+ inode->i_mode &= ~sbi->o.dmask;
+ inode->i_mode |= S_IFDIR;
+ inode->i_op = &vboxsf_dir_iops;
+ inode->i_fop = &vboxsf_dir_fops;
+ /*
+ * XXX: this probably should be set to the number of entries
+ * in the directory plus two (. ..)
+ */
+ set_nlink(inode, 1);
+ } else if (SHFL_IS_SYMLINK(attr->mode)) {
+ inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
+ inode->i_mode &= ~sbi->o.fmask;
+ inode->i_mode |= S_IFLNK;
+ inode->i_op = &vboxsf_lnk_iops;
+ set_nlink(inode, 1);
+ } else {
+ inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
+ inode->i_mode &= ~sbi->o.fmask;
+ inode->i_mode |= S_IFREG;
+ inode->i_op = &vboxsf_reg_iops;
+ inode->i_fop = &vboxsf_reg_fops;
+ set_nlink(inode, 1);
+ }
+
+ inode->i_uid = sbi->o.uid;
+ inode->i_gid = sbi->o.gid;
+
+ inode->i_size = info->size;
+ inode->i_blkbits = 12;
+ /* i_blocks always in units of 512 bytes! */
+ allocated = info->allocated + 511;
+ do_div(allocated, 512);
+ inode->i_blocks = allocated;
+
+ inode->i_atime = ns_to_timespec64(
+ info->access_time.ns_relative_to_unix_epoch);
+ inode->i_ctime = ns_to_timespec64(
+ info->change_time.ns_relative_to_unix_epoch);
+ inode->i_mtime = ns_to_timespec64(
+ info->modification_time.ns_relative_to_unix_epoch);
+}
+
+int vboxsf_create_at_dentry(struct dentry *dentry,
+ struct shfl_createparms *params)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
+ struct shfl_string *path;
+ int err;
+
+ path = vboxsf_path_from_dentry(sbi, dentry);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ err = vboxsf_create(sbi->root, path, params);
+ __putname(path);
+
+ return err;
+}
+
+int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
+ struct shfl_fsobjinfo *info)
+{
+ struct shfl_createparms params = {};
+ int err;
+
+ params.handle = SHFL_HANDLE_NIL;
+ params.create_flags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
+
+ err = vboxsf_create(sbi->root, path, &params);
+ if (err)
+ return err;
+
+ if (params.result != SHFL_FILE_EXISTS)
+ return -ENOENT;
+
+ if (info)
+ *info = params.info;
+
+ return 0;
+}
+
+int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info)
+{
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
+ struct shfl_string *path;
+ int err;
+
+ path = vboxsf_path_from_dentry(sbi, dentry);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ err = vboxsf_stat(sbi, path, info);
+ __putname(path);
+ return err;
+}
+
+int vboxsf_inode_revalidate(struct dentry *dentry)
+{
+ struct vboxsf_sbi *sbi;
+ struct vboxsf_inode *sf_i;
+ struct shfl_fsobjinfo info;
+ struct timespec64 prev_mtime;
+ struct inode *inode;
+ int err;
+
+ if (!dentry || !d_really_is_positive(dentry))
+ return -EINVAL;
+
+ inode = d_inode(dentry);
+ prev_mtime = inode->i_mtime;
+ sf_i = VBOXSF_I(inode);
+ sbi = VBOXSF_SBI(dentry->d_sb);
+ if (!sf_i->force_restat) {
+ if (time_before(jiffies, dentry->d_time + sbi->o.ttl))
+ return 0;
+ }
+
+ err = vboxsf_stat_dentry(dentry, &info);
+ if (err)
+ return err;
+
+ dentry->d_time = jiffies;
+ sf_i->force_restat = 0;
+ vboxsf_init_inode(sbi, inode, &info);
+
+ /*
+ * If the file was changed on the host side we need to invalidate the
+ * page-cache for it. Note this also gets triggered by our own writes,
+ * this is unavoidable.
+ */
+ if (timespec64_compare(&inode->i_mtime, &prev_mtime) > 0)
+ invalidate_inode_pages2(inode->i_mapping);
+
+ return 0;
+}
+
+int vboxsf_getattr(const struct path *path, struct kstat *kstat,
+ u32 request_mask, unsigned int flags)
+{
+ int err;
+ struct dentry *dentry = path->dentry;
+ struct inode *inode = d_inode(dentry);
+ struct vboxsf_inode *sf_i = VBOXSF_I(inode);
+
+ switch (flags & AT_STATX_SYNC_TYPE) {
+ case AT_STATX_DONT_SYNC:
+ err = 0;
+ break;
+ case AT_STATX_FORCE_SYNC:
+ sf_i->force_restat = 1;
+ /* fall-through */
+ default:
+ err = vboxsf_inode_revalidate(dentry);
+ }
+ if (err)
+ return err;
+
+ generic_fillattr(d_inode(dentry), kstat);
+ return 0;
+}
+
+int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ struct vboxsf_inode *sf_i = VBOXSF_I(d_inode(dentry));
+ struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
+ struct shfl_createparms params = {};
+ struct shfl_fsobjinfo info = {};
+ u32 buf_len;
+ int err;
+
+ params.handle = SHFL_HANDLE_NIL;
+ params.create_flags = SHFL_CF_ACT_OPEN_IF_EXISTS |
+ SHFL_CF_ACT_FAIL_IF_NEW |
+ SHFL_CF_ACCESS_ATTR_WRITE;
+
+ /* this is at least required for Posix hosts */
+ if (iattr->ia_valid & ATTR_SIZE)
+ params.create_flags |= SHFL_CF_ACCESS_WRITE;
+
+ err = vboxsf_create_at_dentry(dentry, &params);
+ if (err || params.result != SHFL_FILE_EXISTS)
+ return err ? err : -ENOENT;
+
+#define mode_set(r) ((iattr->ia_mode & (S_##r)) ? SHFL_UNIX_##r : 0)
+
+ /*
+ * Setting the file size and setting the other attributes has to
+ * be handled separately.
+ */
+ if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) {
+ if (iattr->ia_valid & ATTR_MODE) {
+ info.attr.mode = mode_set(IRUSR);
+ info.attr.mode |= mode_set(IWUSR);
+ info.attr.mode |= mode_set(IXUSR);
+ info.attr.mode |= mode_set(IRGRP);
+ info.attr.mode |= mode_set(IWGRP);
+ info.attr.mode |= mode_set(IXGRP);
+ info.attr.mode |= mode_set(IROTH);
+ info.attr.mode |= mode_set(IWOTH);
+ info.attr.mode |= mode_set(IXOTH);
+
+ if (iattr->ia_mode & S_IFDIR)
+ info.attr.mode |= SHFL_TYPE_DIRECTORY;
+ else
+ info.attr.mode |= SHFL_TYPE_FILE;
+ }
+
+ if (iattr->ia_valid & ATTR_ATIME)
+ info.access_time.ns_relative_to_unix_epoch =
+ timespec64_to_ns(&iattr->ia_atime);
+
+ if (iattr->ia_valid & ATTR_MTIME)
+ info.modification_time.ns_relative_to_unix_epoch =
+ timespec64_to_ns(&iattr->ia_mtime);
+
+ /*
+ * Ignore ctime (inode change time) as it can't be set
+ * from userland anyway.
+ */
+
+ buf_len = sizeof(info);
+ err = vboxsf_fsinfo(sbi->root, params.handle,
+ SHFL_INFO_SET | SHFL_INFO_FILE, &buf_len,
+ &info);
+ if (err) {
+ vboxsf_close(sbi->root, params.handle);
+ return err;
+ }
+
+ /* the host may have given us different attr then requested */
+ sf_i->force_restat = 1;
+ }
+
+#undef mode_set
+
+ if (iattr->ia_valid & ATTR_SIZE) {
+ memset(&info, 0, sizeof(info));
+ info.size = iattr->ia_size;
+ buf_len = sizeof(info);
+ err = vboxsf_fsinfo(sbi->root, params.handle,
+ SHFL_INFO_SET | SHFL_INFO_SIZE, &buf_len,
+ &info);
+ if (err) {
+ vboxsf_close(sbi->root, params.handle);
+ return err;
+ }
+
+ /* the host may have given us different attr then requested */
+ sf_i->force_restat = 1;
+ }
+
+ vboxsf_close(sbi->root, params.handle);
+
+ /* Update the inode with what the host has actually given us. */
+ if (sf_i->force_restat)
+ vboxsf_inode_revalidate(dentry);
+
+ return 0;
+}
+
+/*
+ * [dentry] contains string encoded in coding system that corresponds
+ * to [sbi]->nls, we must convert it to UTF8 here.
+ * Returns a shfl_string allocated through __getname (must be freed using
+ * __putname), or an ERR_PTR on error.
+ */
+struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
+ struct dentry *dentry)
+{
+ struct shfl_string *shfl_path;
+ int path_len, out_len, nb;
+ char *buf, *path;
+ wchar_t uni;
+ u8 *out;
+
+ buf = __getname();
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ path = dentry_path_raw(dentry, buf, PATH_MAX);
+ if (IS_ERR(path)) {
+ __putname(buf);
+ return ERR_CAST(path);
+ }
+ path_len = strlen(path);
+
+ if (sbi->nls) {
+ shfl_path = __getname();
+ if (!shfl_path) {
+ __putname(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ out = shfl_path->string.utf8;
+ out_len = PATH_MAX - SHFLSTRING_HEADER_SIZE - 1;
+
+ while (path_len) {
+ nb = sbi->nls->char2uni(path, path_len, &uni);
+ if (nb < 0) {
+ __putname(shfl_path);
+ __putname(buf);
+ return ERR_PTR(-EINVAL);
+ }
+ path += nb;
+ path_len -= nb;
+
+ nb = utf32_to_utf8(uni, out, out_len);
+ if (nb < 0) {
+ __putname(shfl_path);
+ __putname(buf);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+ out += nb;
+ out_len -= nb;
+ }
+ *out = 0;
+ shfl_path->length = out - shfl_path->string.utf8;
+ shfl_path->size = shfl_path->length + 1;
+ __putname(buf);
+ } else {
+ if ((SHFLSTRING_HEADER_SIZE + path_len + 1) > PATH_MAX) {
+ __putname(buf);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+ /*
+ * dentry_path stores the name at the end of buf, but the
+ * shfl_string string we return must be properly aligned.
+ */
+ shfl_path = (struct shfl_string *)buf;
+ memmove(shfl_path->string.utf8, path, path_len);
+ shfl_path->string.utf8[path_len] = 0;
+ shfl_path->length = path_len;
+ shfl_path->size = path_len + 1;
+ }
+
+ return shfl_path;
+}
+
+int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
+ const unsigned char *utf8_name, size_t utf8_len)
+{
+ const char *in;
+ char *out;
+ size_t out_len;
+ size_t out_bound_len;
+ size_t in_bound_len;
+
+ in = utf8_name;
+ in_bound_len = utf8_len;
+
+ out = name;
+ out_len = 0;
+ /* Reserve space for terminating 0 */
+ out_bound_len = name_bound_len - 1;
+
+ while (in_bound_len) {
+ int nb;
+ unicode_t uni;
+
+ nb = utf8_to_utf32(in, in_bound_len, &uni);
+ if (nb < 0)
+ return -EINVAL;
+
+ in += nb;
+ in_bound_len -= nb;
+
+ nb = sbi->nls->uni2char(uni, out, out_bound_len);
+ if (nb < 0)
+ return nb;
+
+ out += nb;
+ out_bound_len -= nb;
+ out_len += nb;
+ }
+
+ *out = 0;
+
+ return 0;
+}
+
+static struct vboxsf_dir_buf *vboxsf_dir_buf_alloc(struct list_head *list)
+{
+ struct vboxsf_dir_buf *b;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL);
+ if (!b->buf) {
+ kfree(b);
+ return NULL;
+ }
+
+ b->entries = 0;
+ b->used = 0;
+ b->free = DIR_BUFFER_SIZE;
+ list_add(&b->head, list);
+
+ return b;
+}
+
+static void vboxsf_dir_buf_free(struct vboxsf_dir_buf *b)
+{
+ list_del(&b->head);
+ kfree(b->buf);
+ kfree(b);
+}
+
+struct vboxsf_dir_info *vboxsf_dir_info_alloc(void)
+{
+ struct vboxsf_dir_info *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ INIT_LIST_HEAD(&p->info_list);
+ return p;
+}
+
+void vboxsf_dir_info_free(struct vboxsf_dir_info *p)
+{
+ struct list_head *list, *pos, *tmp;
+
+ list = &p->info_list;
+ list_for_each_safe(pos, tmp, list) {
+ struct vboxsf_dir_buf *b;
+
+ b = list_entry(pos, struct vboxsf_dir_buf, head);
+ vboxsf_dir_buf_free(b);
+ }
+ kfree(p);
+}
+
+int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
+ u64 handle)
+{
+ struct vboxsf_dir_buf *b;
+ u32 entries, size;
+ int err = 0;
+ void *buf;
+
+ /* vboxsf_dirinfo returns 1 on end of dir */
+ while (err == 0) {
+ b = vboxsf_dir_buf_alloc(&sf_d->info_list);
+ if (!b) {
+ err = -ENOMEM;
+ break;
+ }
+
+ buf = b->buf;
+ size = b->free;
+
+ err = vboxsf_dirinfo(sbi->root, handle, NULL, 0, 0,
+ &size, buf, &entries);
+ if (err < 0)
+ break;
+
+ b->entries += entries;
+ b->free -= size;
+ b->used += size;
+ }
+
+ if (b && b->used == 0)
+ vboxsf_dir_buf_free(b);
+
+ /* -EILSEQ means the host could not translate a filename, ignore */
+ if (err > 0 || err == -EILSEQ)
+ err = 0;
+
+ return err;
+}
diff --git a/fs/vboxsf/vboxsf_wrappers.c b/fs/vboxsf/vboxsf_wrappers.c
new file mode 100644
index 000000000000..bfc78a097dae
--- /dev/null
+++ b/fs/vboxsf/vboxsf_wrappers.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Wrapper functions for the shfl host calls.
+ *
+ * Copyright (C) 2006-2018 Oracle Corporation
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vfsmod.h"
+
+#define SHFL_REQUEST \
+ (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER | \
+ VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
+static u32 vboxsf_client_id;
+
+int vboxsf_connect(void)
+{
+ struct vbg_dev *gdev;
+ struct vmmdev_hgcm_service_location loc;
+ int err, vbox_status;
+
+ loc.type = VMMDEV_HGCM_LOC_LOCALHOST_EXISTING;
+ strcpy(loc.u.localhost.service_name, "VBoxSharedFolders");
+
+ gdev = vbg_get_gdev();
+ if (IS_ERR(gdev))
+ return -ENODEV; /* No guest-device */
+
+ err = vbg_hgcm_connect(gdev, SHFL_REQUEST, &loc,
+ &vboxsf_client_id, &vbox_status);
+ vbg_put_gdev(gdev);
+
+ return err ? err : vbg_status_code_to_errno(vbox_status);
+}
+
+void vboxsf_disconnect(void)
+{
+ struct vbg_dev *gdev;
+ int vbox_status;
+
+ gdev = vbg_get_gdev();
+ if (IS_ERR(gdev))
+ return; /* guest-device is gone, already disconnected */
+
+ vbg_hgcm_disconnect(gdev, SHFL_REQUEST, vboxsf_client_id, &vbox_status);
+ vbg_put_gdev(gdev);
+}
+
+static int vboxsf_call(u32 function, void *parms, u32 parm_count, int *status)
+{
+ struct vbg_dev *gdev;
+ int err, vbox_status;
+
+ gdev = vbg_get_gdev();
+ if (IS_ERR(gdev))
+ return -ESHUTDOWN; /* guest-dev removed underneath us */
+
+ err = vbg_hgcm_call(gdev, SHFL_REQUEST, vboxsf_client_id, function,
+ U32_MAX, parms, parm_count, &vbox_status);
+ vbg_put_gdev(gdev);
+
+ if (err < 0)
+ return err;
+
+ if (status)
+ *status = vbox_status;
+
+ return vbg_status_code_to_errno(vbox_status);
+}
+
+int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root)
+{
+ struct shfl_map_folder parms;
+ int err, status;
+
+ parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
+ parms.path.u.pointer.size = shfl_string_buf_size(folder_name);
+ parms.path.u.pointer.u.linear_addr = (uintptr_t)folder_name;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = 0;
+
+ parms.delimiter.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.delimiter.u.value32 = '/';
+
+ parms.case_sensitive.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.case_sensitive.u.value32 = 1;
+
+ err = vboxsf_call(SHFL_FN_MAP_FOLDER, &parms, SHFL_CPARMS_MAP_FOLDER,
+ &status);
+ if (err == -ENOSYS && status == VERR_NOT_IMPLEMENTED)
+ vbg_err("%s: Error host is too old\n", __func__);
+
+ *root = parms.root.u.value32;
+ return err;
+}
+
+int vboxsf_unmap_folder(u32 root)
+{
+ struct shfl_unmap_folder parms;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ return vboxsf_call(SHFL_FN_UNMAP_FOLDER, &parms,
+ SHFL_CPARMS_UNMAP_FOLDER, NULL);
+}
+
+/**
+ * vboxsf_create - Create a new file or folder
+ * @root: Root of the shared folder in which to create the file
+ * @parsed_path: The path of the file or folder relative to the shared folder
+ * @param: create_parms Parameters for file/folder creation.
+ *
+ * Create a new file or folder or open an existing one in a shared folder.
+ * Note this function always returns 0 / success unless an exceptional condition
+ * occurs - out of memory, invalid arguments, etc. If the file or folder could
+ * not be opened or created, create_parms->handle will be set to
+ * SHFL_HANDLE_NIL on return. In this case the value in create_parms->result
+ * provides information as to why (e.g. SHFL_FILE_EXISTS), create_parms->result
+ * is also set on success as additional information.
+ *
+ * Returns:
+ * 0 or negative errno value.
+ */
+int vboxsf_create(u32 root, struct shfl_string *parsed_path,
+ struct shfl_createparms *create_parms)
+{
+ struct shfl_create parms;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
+ parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
+ parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
+
+ parms.parms.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
+ parms.parms.u.pointer.size = sizeof(struct shfl_createparms);
+ parms.parms.u.pointer.u.linear_addr = (uintptr_t)create_parms;
+
+ return vboxsf_call(SHFL_FN_CREATE, &parms, SHFL_CPARMS_CREATE, NULL);
+}
+
+int vboxsf_close(u32 root, u64 handle)
+{
+ struct shfl_close parms;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+ parms.handle.u.value64 = handle;
+
+ return vboxsf_call(SHFL_FN_CLOSE, &parms, SHFL_CPARMS_CLOSE, NULL);
+}
+
+int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags)
+{
+ struct shfl_remove parms;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
+ parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
+ parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
+
+ parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.flags.u.value32 = flags;
+
+ return vboxsf_call(SHFL_FN_REMOVE, &parms, SHFL_CPARMS_REMOVE, NULL);
+}
+
+int vboxsf_rename(u32 root, struct shfl_string *src_path,
+ struct shfl_string *dest_path, u32 flags)
+{
+ struct shfl_rename parms;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.src.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
+ parms.src.u.pointer.size = shfl_string_buf_size(src_path);
+ parms.src.u.pointer.u.linear_addr = (uintptr_t)src_path;
+
+ parms.dest.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
+ parms.dest.u.pointer.size = shfl_string_buf_size(dest_path);
+ parms.dest.u.pointer.u.linear_addr = (uintptr_t)dest_path;
+
+ parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.flags.u.value32 = flags;
+
+ return vboxsf_call(SHFL_FN_RENAME, &parms, SHFL_CPARMS_RENAME, NULL);
+}
+
+int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
+{
+ struct shfl_read parms;
+ int err;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+ parms.handle.u.value64 = handle;
+ parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+ parms.offset.u.value64 = offset;
+ parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.cb.u.value32 = *buf_len;
+ parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
+ parms.buffer.u.pointer.size = *buf_len;
+ parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
+
+ err = vboxsf_call(SHFL_FN_READ, &parms, SHFL_CPARMS_READ, NULL);
+
+ *buf_len = parms.cb.u.value32;
+ return err;
+}
+
+int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
+{
+ struct shfl_write parms;
+ int err;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+ parms.handle.u.value64 = handle;
+ parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+ parms.offset.u.value64 = offset;
+ parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.cb.u.value32 = *buf_len;
+ parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
+ parms.buffer.u.pointer.size = *buf_len;
+ parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
+
+ err = vboxsf_call(SHFL_FN_WRITE, &parms, SHFL_CPARMS_WRITE, NULL);
+
+ *buf_len = parms.cb.u.value32;
+ return err;
+}
+
+/* Returns 0 on success, 1 on end-of-dir, negative errno otherwise */
+int vboxsf_dirinfo(u32 root, u64 handle,
+ struct shfl_string *parsed_path, u32 flags, u32 index,
+ u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count)
+{
+ struct shfl_list parms;
+ int err, status;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+ parms.handle.u.value64 = handle;
+ parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.flags.u.value32 = flags;
+ parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.cb.u.value32 = *buf_len;
+ if (parsed_path) {
+ parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
+ parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
+ parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
+ } else {
+ parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_IN;
+ parms.path.u.pointer.size = 0;
+ parms.path.u.pointer.u.linear_addr = 0;
+ }
+
+ parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
+ parms.buffer.u.pointer.size = *buf_len;
+ parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
+
+ parms.resume_point.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.resume_point.u.value32 = index;
+ parms.file_count.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.file_count.u.value32 = 0; /* out parameter only */
+
+ err = vboxsf_call(SHFL_FN_LIST, &parms, SHFL_CPARMS_LIST, &status);
+ if (err == -ENODATA && status == VERR_NO_MORE_FILES)
+ err = 1;
+
+ *buf_len = parms.cb.u.value32;
+ *file_count = parms.file_count.u.value32;
+ return err;
+}
+
+int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
+ u32 *buf_len, void *buf)
+{
+ struct shfl_information parms;
+ int err;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
+ parms.handle.u.value64 = handle;
+ parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.flags.u.value32 = flags;
+ parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.cb.u.value32 = *buf_len;
+ parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
+ parms.info.u.pointer.size = *buf_len;
+ parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
+
+ err = vboxsf_call(SHFL_FN_INFORMATION, &parms, SHFL_CPARMS_INFORMATION,
+ NULL);
+
+ *buf_len = parms.cb.u.value32;
+ return err;
+}
+
+int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
+ u32 buf_len, u8 *buf)
+{
+ struct shfl_readLink parms;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
+ parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
+ parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
+
+ parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
+ parms.buffer.u.pointer.size = buf_len;
+ parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
+
+ return vboxsf_call(SHFL_FN_READLINK, &parms, SHFL_CPARMS_READLINK,
+ NULL);
+}
+
+int vboxsf_symlink(u32 root, struct shfl_string *new_path,
+ struct shfl_string *old_path, struct shfl_fsobjinfo *buf)
+{
+ struct shfl_symlink parms;
+
+ parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
+ parms.root.u.value32 = root;
+
+ parms.new_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
+ parms.new_path.u.pointer.size = shfl_string_buf_size(new_path);
+ parms.new_path.u.pointer.u.linear_addr = (uintptr_t)new_path;
+
+ parms.old_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
+ parms.old_path.u.pointer.size = shfl_string_buf_size(old_path);
+ parms.old_path.u.pointer.u.linear_addr = (uintptr_t)old_path;
+
+ parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
+ parms.info.u.pointer.size = sizeof(struct shfl_fsobjinfo);
+ parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
+
+ return vboxsf_call(SHFL_FN_SYMLINK, &parms, SHFL_CPARMS_SYMLINK, NULL);
+}
+
+int vboxsf_set_utf8(void)
+{
+ return vboxsf_call(SHFL_FN_SET_UTF8, NULL, 0, NULL);
+}
+
+int vboxsf_set_symlinks(void)
+{
+ return vboxsf_call(SHFL_FN_SET_SYMLINKS, NULL, 0, NULL);
+}
diff --git a/fs/vboxsf/vfsmod.h b/fs/vboxsf/vfsmod.h
new file mode 100644
index 000000000000..18f95b00fc33
--- /dev/null
+++ b/fs/vboxsf/vfsmod.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * VirtualBox Guest Shared Folders support: module header.
+ *
+ * Copyright (C) 2006-2018 Oracle Corporation
+ */
+
+#ifndef VFSMOD_H
+#define VFSMOD_H
+
+#include <linux/backing-dev.h>
+#include <linux/idr.h>
+#include "shfl_hostintf.h"
+
+#define DIR_BUFFER_SIZE SZ_16K
+
+/* The cast is to prevent assignment of void * to pointers of arbitrary type */
+#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
+#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
+
+struct vboxsf_options {
+ unsigned long ttl;
+ kuid_t uid;
+ kgid_t gid;
+ bool dmode_set;
+ bool fmode_set;
+ umode_t dmode;
+ umode_t fmode;
+ umode_t dmask;
+ umode_t fmask;
+};
+
+struct vboxsf_fs_context {
+ struct vboxsf_options o;
+ char *nls_name;
+};
+
+/* per-shared folder information */
+struct vboxsf_sbi {
+ struct vboxsf_options o;
+ struct shfl_fsobjinfo root_info;
+ struct idr ino_idr;
+ spinlock_t ino_idr_lock; /* This protects ino_idr */
+ struct nls_table *nls;
+ u32 next_generation;
+ u32 root;
+ int bdi_id;
+};
+
+/* per-inode information */
+struct vboxsf_inode {
+ /* some information was changed, update data on next revalidate */
+ int force_restat;
+ /* list of open handles for this inode + lock protecting it */
+ struct list_head handle_list;
+ /* This mutex protects handle_list accesses */
+ struct mutex handle_list_mutex;
+ /* The VFS inode struct */
+ struct inode vfs_inode;
+};
+
+struct vboxsf_dir_info {
+ struct list_head info_list;
+};
+
+struct vboxsf_dir_buf {
+ size_t entries;
+ size_t free;
+ size_t used;
+ void *buf;
+ struct list_head head;
+};
+
+/* globals */
+extern const struct inode_operations vboxsf_dir_iops;
+extern const struct inode_operations vboxsf_lnk_iops;
+extern const struct inode_operations vboxsf_reg_iops;
+extern const struct file_operations vboxsf_dir_fops;
+extern const struct file_operations vboxsf_reg_fops;
+extern const struct address_space_operations vboxsf_reg_aops;
+extern const struct dentry_operations vboxsf_dentry_ops;
+
+/* from utils.c */
+struct inode *vboxsf_new_inode(struct super_block *sb);
+void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
+ const struct shfl_fsobjinfo *info);
+int vboxsf_create_at_dentry(struct dentry *dentry,
+ struct shfl_createparms *params);
+int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
+ struct shfl_fsobjinfo *info);
+int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info);
+int vboxsf_inode_revalidate(struct dentry *dentry);
+int vboxsf_getattr(const struct path *path, struct kstat *kstat,
+ u32 request_mask, unsigned int query_flags);
+int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr);
+struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
+ struct dentry *dentry);
+int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
+ const unsigned char *utf8_name, size_t utf8_len);
+struct vboxsf_dir_info *vboxsf_dir_info_alloc(void);
+void vboxsf_dir_info_free(struct vboxsf_dir_info *p);
+int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
+ u64 handle);
+
+/* from vboxsf_wrappers.c */
+int vboxsf_connect(void);
+void vboxsf_disconnect(void);
+
+int vboxsf_create(u32 root, struct shfl_string *parsed_path,
+ struct shfl_createparms *create_parms);
+
+int vboxsf_close(u32 root, u64 handle);
+int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags);
+int vboxsf_rename(u32 root, struct shfl_string *src_path,
+ struct shfl_string *dest_path, u32 flags);
+
+int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
+int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
+
+int vboxsf_dirinfo(u32 root, u64 handle,
+ struct shfl_string *parsed_path, u32 flags, u32 index,
+ u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count);
+int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
+ u32 *buf_len, void *buf);
+
+int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root);
+int vboxsf_unmap_folder(u32 root);
+
+int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
+ u32 buf_len, u8 *buf);
+int vboxsf_symlink(u32 root, struct shfl_string *new_path,
+ struct shfl_string *old_path, struct shfl_fsobjinfo *buf);
+
+int vboxsf_set_utf8(void);
+int vboxsf_set_symlinks(void);
+
+#endif
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
index b79e3fd19d11..d98bea308fd7 100644
--- a/fs/verity/enable.c
+++ b/fs/verity/enable.c
@@ -8,18 +8,48 @@
#include "fsverity_private.h"
#include <crypto/hash.h>
+#include <linux/backing-dev.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
-static int build_merkle_tree_level(struct inode *inode, unsigned int level,
+/*
+ * Read a file data page for Merkle tree construction. Do aggressive readahead,
+ * since we're sequentially reading the entire file.
+ */
+static struct page *read_file_data_page(struct file *filp, pgoff_t index,
+ struct file_ra_state *ra,
+ unsigned long remaining_pages)
+{
+ struct page *page;
+
+ page = find_get_page_flags(filp->f_mapping, index, FGP_ACCESSED);
+ if (!page || !PageUptodate(page)) {
+ if (page)
+ put_page(page);
+ else
+ page_cache_sync_readahead(filp->f_mapping, ra, filp,
+ index, remaining_pages);
+ page = read_mapping_page(filp->f_mapping, index, NULL);
+ if (IS_ERR(page))
+ return page;
+ }
+ if (PageReadahead(page))
+ page_cache_async_readahead(filp->f_mapping, ra, filp, page,
+ index, remaining_pages);
+ return page;
+}
+
+static int build_merkle_tree_level(struct file *filp, unsigned int level,
u64 num_blocks_to_hash,
const struct merkle_tree_params *params,
u8 *pending_hashes,
struct ahash_request *req)
{
+ struct inode *inode = file_inode(filp);
const struct fsverity_operations *vops = inode->i_sb->s_vop;
+ struct file_ra_state ra = { 0 };
unsigned int pending_size = 0;
u64 dst_block_num;
u64 i;
@@ -36,6 +66,8 @@ static int build_merkle_tree_level(struct inode *inode, unsigned int level,
dst_block_num = 0; /* unused */
}
+ file_ra_state_init(&ra, filp->f_mapping);
+
for (i = 0; i < num_blocks_to_hash; i++) {
struct page *src_page;
@@ -45,7 +77,8 @@ static int build_merkle_tree_level(struct inode *inode, unsigned int level,
if (level == 0) {
/* Leaf: hashing a data block */
- src_page = read_mapping_page(inode->i_mapping, i, NULL);
+ src_page = read_file_data_page(filp, i, &ra,
+ num_blocks_to_hash - i);
if (IS_ERR(src_page)) {
err = PTR_ERR(src_page);
fsverity_err(inode,
@@ -54,9 +87,14 @@ static int build_merkle_tree_level(struct inode *inode, unsigned int level,
return err;
}
} else {
+ unsigned long num_ra_pages =
+ min_t(unsigned long, num_blocks_to_hash - i,
+ inode->i_sb->s_bdi->io_pages);
+
/* Non-leaf: hashing hash block from level below */
src_page = vops->read_merkle_tree_page(inode,
- params->level_start[level - 1] + i);
+ params->level_start[level - 1] + i,
+ num_ra_pages);
if (IS_ERR(src_page)) {
err = PTR_ERR(src_page);
fsverity_err(inode,
@@ -103,17 +141,18 @@ static int build_merkle_tree_level(struct inode *inode, unsigned int level,
}
/*
- * Build the Merkle tree for the given inode using the given parameters, and
+ * Build the Merkle tree for the given file using the given parameters, and
* return the root hash in @root_hash.
*
* The tree is written to a filesystem-specific location as determined by the
* ->write_merkle_tree_block() method. However, the blocks that comprise the
* tree are the same for all filesystems.
*/
-static int build_merkle_tree(struct inode *inode,
+static int build_merkle_tree(struct file *filp,
const struct merkle_tree_params *params,
u8 *root_hash)
{
+ struct inode *inode = file_inode(filp);
u8 *pending_hashes;
struct ahash_request *req;
u64 blocks;
@@ -126,9 +165,11 @@ static int build_merkle_tree(struct inode *inode,
return 0;
}
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(params->hash_alg, GFP_KERNEL);
+
pending_hashes = kmalloc(params->block_size, GFP_KERNEL);
- req = ahash_request_alloc(params->hash_alg->tfm, GFP_KERNEL);
- if (!pending_hashes || !req)
+ if (!pending_hashes)
goto out;
/*
@@ -139,7 +180,7 @@ static int build_merkle_tree(struct inode *inode,
blocks = (inode->i_size + params->block_size - 1) >>
params->log_blocksize;
for (level = 0; level <= params->num_levels; level++) {
- err = build_merkle_tree_level(inode, level, blocks, params,
+ err = build_merkle_tree_level(filp, level, blocks, params,
pending_hashes, req);
if (err)
goto out;
@@ -150,7 +191,7 @@ static int build_merkle_tree(struct inode *inode,
err = 0;
out:
kfree(pending_hashes);
- ahash_request_free(req);
+ fsverity_free_hash_request(params->hash_alg, req);
return err;
}
@@ -175,8 +216,7 @@ static int enable_verity(struct file *filp,
/* Get the salt if the user provided one */
if (arg->salt_size &&
- copy_from_user(desc->salt,
- (const u8 __user *)(uintptr_t)arg->salt_ptr,
+ copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr),
arg->salt_size)) {
err = -EFAULT;
goto out;
@@ -185,8 +225,7 @@ static int enable_verity(struct file *filp,
/* Get the signature if the user provided one */
if (arg->sig_size &&
- copy_from_user(desc->signature,
- (const u8 __user *)(uintptr_t)arg->sig_ptr,
+ copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr),
arg->sig_size)) {
err = -EFAULT;
goto out;
@@ -227,7 +266,7 @@ static int enable_verity(struct file *filp,
*/
pr_debug("Building Merkle tree...\n");
BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
- err = build_merkle_tree(inode, &params, desc->root_hash);
+ err = build_merkle_tree(filp, &params, desc->root_hash);
if (err) {
fsverity_err(inode, "Error %d building Merkle tree", err);
goto rollback;
diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h
index e74c79b64d88..74768cf539da 100644
--- a/fs/verity/fsverity_private.h
+++ b/fs/verity/fsverity_private.h
@@ -16,6 +16,7 @@
#include <crypto/sha.h>
#include <linux/fsverity.h>
+#include <linux/mempool.h>
struct ahash_request;
@@ -37,11 +38,12 @@ struct fsverity_hash_alg {
const char *name; /* crypto API name, e.g. sha256 */
unsigned int digest_size; /* digest size in bytes, e.g. 32 for SHA-256 */
unsigned int block_size; /* block size in bytes, e.g. 64 for SHA-256 */
+ mempool_t req_pool; /* mempool with a preallocated hash request */
};
/* Merkle tree parameters: hash algorithm, initial hash state, and topology */
struct merkle_tree_params {
- const struct fsverity_hash_alg *hash_alg; /* the hash algorithm */
+ struct fsverity_hash_alg *hash_alg; /* the hash algorithm */
const u8 *hashstate; /* initial hash state or NULL */
unsigned int digest_size; /* same as hash_alg->digest_size */
unsigned int block_size; /* size of data and tree blocks */
@@ -50,6 +52,7 @@ struct merkle_tree_params {
unsigned int log_arity; /* log2(hashes_per_block) */
unsigned int num_levels; /* number of levels in Merkle tree */
u64 tree_size; /* Merkle tree size in bytes */
+ unsigned long level0_blocks; /* number of blocks in tree level 0 */
/*
* Starting block index for each tree level, ordered from leaf level (0)
@@ -114,14 +117,18 @@ struct fsverity_signed_digest {
extern struct fsverity_hash_alg fsverity_hash_algs[];
-const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
- unsigned int num);
-const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
+struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
+ unsigned int num);
+struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
+ gfp_t gfp_flags);
+void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
+ struct ahash_request *req);
+const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
const u8 *salt, size_t salt_size);
int fsverity_hash_page(const struct merkle_tree_params *params,
const struct inode *inode,
struct ahash_request *req, struct page *page, u8 *out);
-int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
+int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
const void *data, size_t size, u8 *out);
void __init fsverity_check_hash_algs(void);
diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c
index 31e6d7d2389a..c37e186ebeb6 100644
--- a/fs/verity/hash_algs.c
+++ b/fs/verity/hash_algs.c
@@ -24,6 +24,8 @@ struct fsverity_hash_alg fsverity_hash_algs[] = {
},
};
+static DEFINE_MUTEX(fsverity_hash_alg_init_mutex);
+
/**
* fsverity_get_hash_alg() - validate and prepare a hash algorithm
* @inode: optional inode for logging purposes
@@ -36,8 +38,8 @@ struct fsverity_hash_alg fsverity_hash_algs[] = {
*
* Return: pointer to the hash alg on success, else an ERR_PTR()
*/
-const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
- unsigned int num)
+struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
+ unsigned int num)
{
struct fsverity_hash_alg *alg;
struct crypto_ahash *tfm;
@@ -50,10 +52,15 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
}
alg = &fsverity_hash_algs[num];
- /* pairs with cmpxchg() below */
- tfm = READ_ONCE(alg->tfm);
- if (likely(tfm != NULL))
+ /* pairs with smp_store_release() below */
+ if (likely(smp_load_acquire(&alg->tfm) != NULL))
return alg;
+
+ mutex_lock(&fsverity_hash_alg_init_mutex);
+
+ if (alg->tfm != NULL)
+ goto out_unlock;
+
/*
* Using the shash API would make things a bit simpler, but the ahash
* API is preferable as it allows the use of crypto accelerators.
@@ -64,12 +71,14 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
fsverity_warn(inode,
"Missing crypto API support for hash algorithm \"%s\"",
alg->name);
- return ERR_PTR(-ENOPKG);
+ alg = ERR_PTR(-ENOPKG);
+ goto out_unlock;
}
fsverity_err(inode,
"Error allocating hash algorithm \"%s\": %ld",
alg->name, PTR_ERR(tfm));
- return ERR_CAST(tfm);
+ alg = ERR_CAST(tfm);
+ goto out_unlock;
}
err = -EINVAL;
@@ -78,18 +87,61 @@ const struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
if (WARN_ON(alg->block_size != crypto_ahash_blocksize(tfm)))
goto err_free_tfm;
+ err = mempool_init_kmalloc_pool(&alg->req_pool, 1,
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(tfm));
+ if (err)
+ goto err_free_tfm;
+
pr_info("%s using implementation \"%s\"\n",
alg->name, crypto_ahash_driver_name(tfm));
- /* pairs with READ_ONCE() above */
- if (cmpxchg(&alg->tfm, NULL, tfm) != NULL)
- crypto_free_ahash(tfm);
-
- return alg;
+ /* pairs with smp_load_acquire() above */
+ smp_store_release(&alg->tfm, tfm);
+ goto out_unlock;
err_free_tfm:
crypto_free_ahash(tfm);
- return ERR_PTR(err);
+ alg = ERR_PTR(err);
+out_unlock:
+ mutex_unlock(&fsverity_hash_alg_init_mutex);
+ return alg;
+}
+
+/**
+ * fsverity_alloc_hash_request() - allocate a hash request object
+ * @alg: the hash algorithm for which to allocate the request
+ * @gfp_flags: memory allocation flags
+ *
+ * This is mempool-backed, so this never fails if __GFP_DIRECT_RECLAIM is set in
+ * @gfp_flags. However, in that case this might need to wait for all
+ * previously-allocated requests to be freed. So to avoid deadlocks, callers
+ * must never need multiple requests at a time to make forward progress.
+ *
+ * Return: the request object on success; NULL on failure (but see above)
+ */
+struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
+ gfp_t gfp_flags)
+{
+ struct ahash_request *req = mempool_alloc(&alg->req_pool, gfp_flags);
+
+ if (req)
+ ahash_request_set_tfm(req, alg->tfm);
+ return req;
+}
+
+/**
+ * fsverity_free_hash_request() - free a hash request object
+ * @alg: the hash algorithm
+ * @req: the hash request object to free
+ */
+void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
+ struct ahash_request *req)
+{
+ if (req) {
+ ahash_request_zero(req);
+ mempool_free(req, &alg->req_pool);
+ }
}
/**
@@ -101,7 +153,7 @@ err_free_tfm:
* Return: NULL if the salt is empty, otherwise the kmalloc()'ed precomputed
* initial hash state on success or an ERR_PTR() on failure.
*/
-const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
+const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
const u8 *salt, size_t salt_size)
{
u8 *hashstate = NULL;
@@ -119,11 +171,8 @@ const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
if (!hashstate)
return ERR_PTR(-ENOMEM);
- req = ahash_request_alloc(alg->tfm, GFP_KERNEL);
- if (!req) {
- err = -ENOMEM;
- goto err_free;
- }
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
/*
* Zero-pad the salt to the next multiple of the input size of the hash
@@ -158,7 +207,7 @@ const u8 *fsverity_prepare_hash_state(const struct fsverity_hash_alg *alg,
if (err)
goto err_free;
out:
- ahash_request_free(req);
+ fsverity_free_hash_request(alg, req);
kfree(padded_salt);
return hashstate;
@@ -229,7 +278,7 @@ int fsverity_hash_page(const struct merkle_tree_params *params,
*
* Return: 0 on success, -errno on failure
*/
-int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
+int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
const void *data, size_t size, u8 *out)
{
struct ahash_request *req;
@@ -237,9 +286,8 @@ int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
DECLARE_CRYPTO_WAIT(wait);
int err;
- req = ahash_request_alloc(alg->tfm, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
sg_init_one(&sg, data, size);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
@@ -249,7 +297,7 @@ int fsverity_hash_buffer(const struct fsverity_hash_alg *alg,
err = crypto_wait_req(crypto_ahash_digest(req), &wait);
- ahash_request_free(req);
+ fsverity_free_hash_request(alg, req);
return err;
}
diff --git a/fs/verity/open.c b/fs/verity/open.c
index 63d1004b688c..c5fe6948e262 100644
--- a/fs/verity/open.c
+++ b/fs/verity/open.c
@@ -31,7 +31,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
unsigned int log_blocksize,
const u8 *salt, size_t salt_size)
{
- const struct fsverity_hash_alg *hash_alg;
+ struct fsverity_hash_alg *hash_alg;
int err;
u64 blocks;
u64 offset;
@@ -102,6 +102,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
/* temporarily using level_start[] to store blocks in level */
params->level_start[params->num_levels++] = blocks;
}
+ params->level0_blocks = params->level_start[0];
/* Compute the starting block of each level */
offset = 0;
@@ -126,7 +127,7 @@ out_err:
* Compute the file measurement by hashing the fsverity_descriptor excluding the
* signature and with the sig_size field set to 0.
*/
-static int compute_file_measurement(const struct fsverity_hash_alg *hash_alg,
+static int compute_file_measurement(struct fsverity_hash_alg *hash_alg,
struct fsverity_descriptor *desc,
u8 *measurement)
{
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 3e8f2de44667..e0cb62da3864 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -84,7 +84,8 @@ static inline int cmp_hashes(const struct fsverity_info *vi,
* Return: true if the page is valid, else false.
*/
static bool verify_page(struct inode *inode, const struct fsverity_info *vi,
- struct ahash_request *req, struct page *data_page)
+ struct ahash_request *req, struct page *data_page,
+ unsigned long level0_ra_pages)
{
const struct merkle_tree_params *params = &vi->tree_params;
const unsigned int hsize = params->digest_size;
@@ -117,8 +118,8 @@ static bool verify_page(struct inode *inode, const struct fsverity_info *vi,
pr_debug_ratelimited("Level %d: hindex=%lu, hoffset=%u\n",
level, hindex, hoffset);
- hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
- hindex);
+ hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, hindex,
+ level == 0 ? level0_ra_pages : 0);
if (IS_ERR(hpage)) {
err = PTR_ERR(hpage);
fsverity_err(inode,
@@ -191,13 +192,12 @@ bool fsverity_verify_page(struct page *page)
struct ahash_request *req;
bool valid;
- req = ahash_request_alloc(vi->tree_params.hash_alg->tfm, GFP_NOFS);
- if (unlikely(!req))
- return false;
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS);
- valid = verify_page(inode, vi, req, page);
+ valid = verify_page(inode, vi, req, page, 0);
- ahash_request_free(req);
+ fsverity_free_hash_request(vi->tree_params.hash_alg, req);
return valid;
}
@@ -222,25 +222,42 @@ void fsverity_verify_bio(struct bio *bio)
{
struct inode *inode = bio_first_page_all(bio)->mapping->host;
const struct fsverity_info *vi = inode->i_verity_info;
+ const struct merkle_tree_params *params = &vi->tree_params;
struct ahash_request *req;
struct bio_vec *bv;
struct bvec_iter_all iter_all;
-
- req = ahash_request_alloc(vi->tree_params.hash_alg->tfm, GFP_NOFS);
- if (unlikely(!req)) {
+ unsigned long max_ra_pages = 0;
+
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(params->hash_alg, GFP_NOFS);
+
+ if (bio->bi_opf & REQ_RAHEAD) {
+ /*
+ * If this bio is for data readahead, then we also do readahead
+ * of the first (largest) level of the Merkle tree. Namely,
+ * when a Merkle tree page is read, we also try to piggy-back on
+ * some additional pages -- up to 1/4 the number of data pages.
+ *
+ * This improves sequential read performance, as it greatly
+ * reduces the number of I/O requests made to the Merkle tree.
+ */
bio_for_each_segment_all(bv, bio, iter_all)
- SetPageError(bv->bv_page);
- return;
+ max_ra_pages++;
+ max_ra_pages /= 4;
}
bio_for_each_segment_all(bv, bio, iter_all) {
struct page *page = bv->bv_page;
+ unsigned long level0_index = page->index >> params->log_arity;
+ unsigned long level0_ra_pages =
+ min(max_ra_pages, params->level0_blocks - level0_index);
- if (!PageError(page) && !verify_page(inode, vi, req, page))
+ if (!PageError(page) &&
+ !verify_page(inode, vi, req, page, level0_ra_pages))
SetPageError(page);
}
- ahash_request_free(req);
+ fsverity_free_hash_request(params->hash_alg, req);
}
EXPORT_SYMBOL_GPL(fsverity_verify_bio);
#endif /* CONFIG_BLOCK */
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 14fbdf22b7e7..08d6beb54f8c 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -23,25 +23,28 @@
#include "xfs_ag_resv.h"
#include "xfs_health.h"
-static struct xfs_buf *
+static int
xfs_get_aghdr_buf(
struct xfs_mount *mp,
xfs_daddr_t blkno,
size_t numblks,
+ struct xfs_buf **bpp,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
+ int error;
- bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0);
- if (!bp)
- return NULL;
+ error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp);
+ if (error)
+ return error;
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
bp->b_bn = blkno;
bp->b_maps[0].bm_bn = blkno;
bp->b_ops = ops;
- return bp;
+ *bpp = bp;
+ return 0;
}
static inline bool is_log_ag(struct xfs_mount *mp, struct aghdr_init_data *id)
@@ -340,13 +343,13 @@ xfs_ag_init_hdr(
struct aghdr_init_data *id,
aghdr_init_work_f work,
const struct xfs_buf_ops *ops)
-
{
struct xfs_buf *bp;
+ int error;
- bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, ops);
- if (!bp)
- return -ENOMEM;
+ error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops);
+ if (error)
+ return error;
(*work)(mp, bp, id);
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index fc93fd88ec89..d8053bc96c4d 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1070,11 +1070,11 @@ xfs_alloc_ag_vextent_small(
if (args->datatype & XFS_ALLOC_USERDATA) {
struct xfs_buf *bp;
- bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
- if (XFS_IS_CORRUPT(args->mp, !bp)) {
- error = -EFSCORRUPTED;
+ error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
+ XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
+ args->mp->m_bsize, 0, &bp);
+ if (error)
goto error;
- }
xfs_trans_binval(args->tp, bp);
}
*fbnop = args->agbno = fbno;
@@ -2347,9 +2347,11 @@ xfs_free_agfl_block(
if (error)
return error;
- bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
- if (XFS_IS_CORRUPT(tp->t_mountp, !bp))
- return -EFSCORRUPTED;
+ error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
+ XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
+ tp->t_mountp->m_bsize, 0, &bp);
+ if (error)
+ return error;
xfs_trans_binval(tp, bp);
return 0;
@@ -2500,12 +2502,11 @@ xfs_alloc_fix_freelist(
if (!pag->pagf_init) {
error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
- if (error)
+ if (error) {
+ /* Couldn't lock the AGF so skip this AG. */
+ if (error == -EAGAIN)
+ error = 0;
goto out_no_agbp;
- if (!pag->pagf_init) {
- ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
- ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
- goto out_agbp_relse;
}
}
@@ -2531,11 +2532,10 @@ xfs_alloc_fix_freelist(
*/
if (!agbp) {
error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
- if (error)
- goto out_no_agbp;
- if (!agbp) {
- ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
- ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
+ if (error) {
+ /* Couldn't lock the AGF so skip this AG. */
+ if (error == -EAGAIN)
+ error = 0;
goto out_no_agbp;
}
}
@@ -2766,11 +2766,10 @@ xfs_alloc_pagf_init(
xfs_buf_t *bp;
int error;
- if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
- return error;
- if (bp)
+ error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp);
+ if (!error)
xfs_trans_brelse(tp, bp);
- return 0;
+ return error;
}
/*
@@ -2956,14 +2955,11 @@ xfs_read_agf(
trace_xfs_read_agf(mp, agno);
ASSERT(agno != NULLAGNUMBER);
- error = xfs_trans_read_buf(
- mp, tp, mp->m_ddev_targp,
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
if (error)
return error;
- if (!*bpp)
- return 0;
ASSERT(!(*bpp)->b_error);
xfs_buf_set_ref(*bpp, XFS_AGF_REF);
@@ -2987,14 +2983,15 @@ xfs_alloc_read_agf(
trace_xfs_alloc_read_agf(mp, agno);
+ /* We don't support trylock when freeing. */
+ ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
+ (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
ASSERT(agno != NULLAGNUMBER);
error = xfs_read_agf(mp, tp, agno,
(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
bpp);
if (error)
return error;
- if (!*bpp)
- return 0;
ASSERT(!(*bpp)->b_error);
agf = XFS_BUF_TO_AGF(*bpp);
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 0d7fcc983b3d..e6149720ce02 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -62,6 +62,7 @@ xfs_attr_args_init(
struct xfs_da_args *args,
struct xfs_inode *dp,
const unsigned char *name,
+ size_t namelen,
int flags)
{
@@ -74,7 +75,7 @@ xfs_attr_args_init(
args->dp = dp;
args->flags = flags;
args->name = name;
- args->namelen = strlen((const char *)name);
+ args->namelen = namelen;
if (args->namelen >= MAXNAMELEN)
return -EFAULT; /* match IRIX behaviour */
@@ -139,6 +140,7 @@ int
xfs_attr_get(
struct xfs_inode *ip,
const unsigned char *name,
+ size_t namelen,
unsigned char **value,
int *valuelenp,
int flags)
@@ -154,7 +156,7 @@ xfs_attr_get(
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- error = xfs_attr_args_init(&args, ip, name, flags);
+ error = xfs_attr_args_init(&args, ip, name, namelen, flags);
if (error)
return error;
@@ -338,6 +340,7 @@ int
xfs_attr_set(
struct xfs_inode *dp,
const unsigned char *name,
+ size_t namelen,
unsigned char *value,
int valuelen,
int flags)
@@ -353,7 +356,7 @@ xfs_attr_set(
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO;
- error = xfs_attr_args_init(&args, dp, name, flags);
+ error = xfs_attr_args_init(&args, dp, name, namelen, flags);
if (error)
return error;
@@ -442,6 +445,7 @@ int
xfs_attr_remove(
struct xfs_inode *dp,
const unsigned char *name,
+ size_t namelen,
int flags)
{
struct xfs_mount *mp = dp->i_mount;
@@ -453,7 +457,7 @@ xfs_attr_remove(
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO;
- error = xfs_attr_args_init(&args, dp, name, flags);
+ error = xfs_attr_args_init(&args, dp, name, namelen, flags);
if (error)
return error;
@@ -1007,7 +1011,7 @@ restart:
* The INCOMPLETE flag means that we will find the "old"
* attr, not the "new" one.
*/
- args->flags |= XFS_ATTR_INCOMPLETE;
+ args->op_flags |= XFS_DA_OP_INCOMPLETE;
state = xfs_da_state_alloc();
state->args = args;
state->mp = mp;
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 94badfa1743e..4243b2272642 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -26,7 +26,7 @@ struct xfs_attr_list_context;
*========================================================================*/
-#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
+#define ATTR_DONTFOLLOW 0x0001 /* -- ignored, from IRIX -- */
#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
#define ATTR_SECURE 0x0008 /* use attrs in security namespace */
@@ -37,7 +37,10 @@ struct xfs_attr_list_context;
#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
-#define ATTR_ALLOC 0x8000 /* allocate xattr buffer on demand */
+#define ATTR_ALLOC 0x8000 /* [kernel] allocate xattr buffer on demand */
+
+#define ATTR_KERNEL_FLAGS \
+ (ATTR_KERNOTIME | ATTR_KERNOVAL | ATTR_INCOMPLETE | ATTR_ALLOC)
#define XFS_ATTR_FLAGS \
{ ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
@@ -145,11 +148,13 @@ int xfs_attr_list_int(struct xfs_attr_list_context *);
int xfs_inode_hasattr(struct xfs_inode *ip);
int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
- unsigned char **value, int *valuelenp, int flags);
+ size_t namelen, unsigned char **value, int *valuelenp,
+ int flags);
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
- unsigned char *value, int valuelen, int flags);
+ size_t namelen, unsigned char *value, int valuelen, int flags);
int xfs_attr_set_args(struct xfs_da_args *args);
-int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name,
+ size_t namelen, int flags);
int xfs_attr_remove_args(struct xfs_da_args *args);
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
int flags, struct attrlist_cursor_kern *cursor);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 08d4b10ae2d5..fed537a4353d 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -2403,8 +2403,8 @@ xfs_attr3_leaf_lookup_int(
* If we are looking for INCOMPLETE entries, show only those.
* If we are looking for complete entries, show only those.
*/
- if ((args->flags & XFS_ATTR_INCOMPLETE) !=
- (entry->flags & XFS_ATTR_INCOMPLETE)) {
+ if (!!(args->op_flags & XFS_DA_OP_INCOMPLETE) !=
+ !!(entry->flags & XFS_ATTR_INCOMPLETE)) {
continue;
}
if (entry->flags & XFS_ATTR_LOCAL) {
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index f4a188e28b7b..73615b1dd1a8 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -39,15 +39,6 @@ struct xfs_attr3_icleaf_hdr {
} freemap[XFS_ATTR_LEAF_MAPSIZE];
};
-/*
- * Used to keep a list of "remote value" extents when unlinking an inode.
- */
-typedef struct xfs_attr_inactive_list {
- xfs_dablk_t valueblk; /* block number of value bytes */
- int valuelen; /* number of bytes in value */
-} xfs_attr_inactive_list_t;
-
-
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index a6ef5df42669..8b7f74b3bea2 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -26,6 +26,23 @@
#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
/*
+ * Remote Attribute Values
+ * =======================
+ *
+ * Remote extended attribute values are conceptually simple -- they're written
+ * to data blocks mapped by an inode's attribute fork, and they have an upper
+ * size limit of 64k. Setting a value does not involve the XFS log.
+ *
+ * However, on a v5 filesystem, maximally sized remote attr values require one
+ * block more than 64k worth of space to hold both the remote attribute value
+ * header (64 bytes). On a 4k block filesystem this results in a 68k buffer;
+ * on a 64k block filesystem, this would be a 128k buffer. Note that the log
+ * format can only handle a dirty buffer of XFS_MAX_BLOCKSIZE length (64k).
+ * Therefore, we /must/ ensure that remote attribute value buffers never touch
+ * the logging system and therefore never have a log item.
+ */
+
+/*
* Each contiguous block has a header, so it is not just a simple attribute
* length to FSB conversion.
*/
@@ -401,17 +418,15 @@ xfs_attr_rmtval_get(
(map[i].br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
- error = xfs_trans_read_buf(mp, args->trans,
- mp->m_ddev_targp,
- dblkno, dblkcnt, 0, &bp,
- &xfs_attr3_rmt_buf_ops);
+ error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
+ 0, &bp, &xfs_attr3_rmt_buf_ops);
if (error)
return error;
error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
&offset, &valuelen,
&dst);
- xfs_trans_brelse(args->trans, bp);
+ xfs_buf_relse(bp);
if (error)
return error;
@@ -530,9 +545,9 @@ xfs_attr_rmtval_set(
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
- bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt);
- if (!bp)
- return -ENOMEM;
+ error = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, &bp);
+ if (error)
+ return error;
bp->b_ops = &xfs_attr3_rmt_buf_ops;
xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
@@ -552,6 +567,33 @@ xfs_attr_rmtval_set(
return 0;
}
+/* Mark stale any incore buffers for the remote value. */
+int
+xfs_attr_rmtval_stale(
+ struct xfs_inode *ip,
+ struct xfs_bmbt_irec *map,
+ xfs_buf_flags_t incore_flags)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_buf *bp;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ if (XFS_IS_CORRUPT(mp, map->br_startblock == DELAYSTARTBLOCK) ||
+ XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK))
+ return -EFSCORRUPTED;
+
+ bp = xfs_buf_incore(mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, map->br_startblock),
+ XFS_FSB_TO_BB(mp, map->br_blockcount), incore_flags);
+ if (bp) {
+ xfs_buf_stale(bp);
+ xfs_buf_relse(bp);
+ }
+
+ return 0;
+}
+
/*
* Remove the value associated with an attribute by deleting the
* out-of-line buffer that it is stored on.
@@ -560,7 +602,6 @@ int
xfs_attr_rmtval_remove(
struct xfs_da_args *args)
{
- struct xfs_mount *mp = args->dp->i_mount;
xfs_dablk_t lblkno;
int blkcnt;
int error;
@@ -575,9 +616,6 @@ xfs_attr_rmtval_remove(
blkcnt = args->rmtblkcnt;
while (blkcnt > 0) {
struct xfs_bmbt_irec map;
- struct xfs_buf *bp;
- xfs_daddr_t dblkno;
- int dblkcnt;
int nmap;
/*
@@ -588,22 +626,11 @@ xfs_attr_rmtval_remove(
blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
if (error)
return error;
- ASSERT(nmap == 1);
- ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
- (map.br_startblock != HOLESTARTBLOCK));
-
- dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
- dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
-
- /*
- * If the "remote" value is in the cache, remove it.
- */
- bp = xfs_buf_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
- if (bp) {
- xfs_buf_stale(bp);
- xfs_buf_relse(bp);
- bp = NULL;
- }
+ if (XFS_IS_CORRUPT(args->dp->i_mount, nmap != 1))
+ return -EFSCORRUPTED;
+ error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
+ if (error)
+ return error;
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h
index 9d20b66ad379..6fb4572845ce 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.h
+++ b/fs/xfs/libxfs/xfs_attr_remote.h
@@ -11,5 +11,7 @@ int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
int xfs_attr_rmtval_set(struct xfs_da_args *args);
int xfs_attr_rmtval_remove(struct xfs_da_args *args);
+int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map,
+ xfs_buf_flags_t incore_flags);
#endif /* __XFS_ATTR_REMOTE_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 4c2e046fbfad..9a6d7a84689a 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -730,11 +730,11 @@ xfs_bmap_extents_to_btree(
cur->bc_private.b.allocated++;
ip->i_d.di_nblocks++;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
- abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
- if (XFS_IS_CORRUPT(mp, !abp)) {
- error = -EFSCORRUPTED;
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, args.fsbno),
+ mp->m_bsize, 0, &abp);
+ if (error)
goto out_unreserve_dquot;
- }
/*
* Fill in the child block.
@@ -878,7 +878,11 @@ xfs_bmap_local_to_extents(
ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(args.len == 1);
tp->t_firstblock = args.fsbno;
- bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno);
+ error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(args.mp, args.fsbno),
+ args.mp->m_bsize, 0, &bp);
+ if (error)
+ goto done;
/*
* Initialize the block, copy the data and log the remote buffer.
@@ -3307,11 +3311,12 @@ xfs_bmap_longest_free_extent(
pag = xfs_perag_get(mp, ag);
if (!pag->pagf_init) {
error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
- if (error)
- goto out;
-
- if (!pag->pagf_init) {
- *notinit = 1;
+ if (error) {
+ /* Couldn't lock the AGF, so skip this AG. */
+ if (error == -EAGAIN) {
+ *notinit = 1;
+ error = 0;
+ }
goto out;
}
}
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index e2cc98931552..fd300dc93ca4 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -679,42 +679,6 @@ xfs_btree_get_block(
}
/*
- * Get a buffer for the block, return it with no data read.
- * Long-form addressing.
- */
-xfs_buf_t * /* buffer for fsbno */
-xfs_btree_get_bufl(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_fsblock_t fsbno) /* file system block number */
-{
- xfs_daddr_t d; /* real disk block address */
-
- ASSERT(fsbno != NULLFSBLOCK);
- d = XFS_FSB_TO_DADDR(mp, fsbno);
- return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, 0);
-}
-
-/*
- * Get a buffer for the block, return it with no data read.
- * Short-form addressing.
- */
-xfs_buf_t * /* buffer for agno/agbno */
-xfs_btree_get_bufs(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno) /* allocation group block number */
-{
- xfs_daddr_t d; /* real disk block address */
-
- ASSERT(agno != NULLAGNUMBER);
- ASSERT(agbno != NULLAGBLOCK);
- d = XFS_AGB_TO_DADDR(mp, agno, agbno);
- return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, 0);
-}
-
-/*
* Change the cursor to point to the first record at the given level.
* Other levels are unaffected.
*/
@@ -1270,11 +1234,10 @@ xfs_btree_get_buf_block(
error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
if (error)
return error;
- *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
- mp->m_bsize, 0);
-
- if (!*bpp)
- return -ENOMEM;
+ error = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, mp->m_bsize,
+ 0, bpp);
+ if (error)
+ return error;
(*bpp)->b_ops = cur->bc_ops->buf_ops;
*block = XFS_BUF_TO_BLOCK(*bpp);
@@ -2389,8 +2352,6 @@ xfs_btree_lshift(
XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
if (level > 0) {
/* It's a nonleaf. operate on keys and ptrs */
- int i; /* loop index */
-
for (i = 0; i < rrecs; i++) {
error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level);
if (error)
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index fb9b2121c628..3eff7c321d43 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -297,27 +297,6 @@ xfs_btree_dup_cursor(
xfs_btree_cur_t **ncur);/* output cursor */
/*
- * Get a buffer for the block, return it with no data read.
- * Long-form addressing.
- */
-struct xfs_buf * /* buffer for fsbno */
-xfs_btree_get_bufl(
- struct xfs_mount *mp, /* file system mount point */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_fsblock_t fsbno); /* file system block number */
-
-/*
- * Get a buffer for the block, return it with no data read.
- * Short-form addressing.
- */
-struct xfs_buf * /* buffer for agno/agbno */
-xfs_btree_get_bufs(
- struct xfs_mount *mp, /* file system mount point */
- struct xfs_trans *tp, /* transaction pointer */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno); /* allocation group block number */
-
-/*
* Compute first and last byte offsets for the fields given.
* Interprets the offsets table, which contains struct field offsets.
*/
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 8c3eafe280ed..875e04f82541 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2591,13 +2591,9 @@ xfs_da_get_buf(
if (error || nmap == 0)
goto out_free;
- bp = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0);
- error = bp ? bp->b_error : -EIO;
- if (error) {
- if (bp)
- xfs_trans_brelse(tp, bp);
+ error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp);
+ if (error)
goto out_free;
- }
*bpp = bp;
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index e16610d1c14f..0f4fbb0889ff 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -89,6 +89,7 @@ typedef struct xfs_da_args {
#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
#define XFS_DA_OP_ALLOCVAL 0x0020 /* lookup to alloc buffer if found */
+#define XFS_DA_OP_INCOMPLETE 0x0040 /* lookup INCOMPLETE attr keys */
#define XFS_DA_OP_FLAGS \
{ XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
@@ -96,7 +97,8 @@ typedef struct xfs_da_args {
{ XFS_DA_OP_ADDNAME, "ADDNAME" }, \
{ XFS_DA_OP_OKNOENT, "OKNOENT" }, \
{ XFS_DA_OP_CILOOKUP, "CILOOKUP" }, \
- { XFS_DA_OP_ALLOCVAL, "ALLOCVAL" }
+ { XFS_DA_OP_ALLOCVAL, "ALLOCVAL" }, \
+ { XFS_DA_OP_INCOMPLETE, "INCOMPLETE" }
/*
* Storage for holding state during Btree searches and split/join ops.
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 3dee33043e09..734837a9b51a 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -217,7 +217,7 @@ typedef struct xfs_dir2_sf_entry {
* A 64-bit or 32-bit inode number follows here, at a variable offset
* after the name.
*/
-} xfs_dir2_sf_entry_t;
+} __packed xfs_dir2_sf_entry_t;
static inline int xfs_dir2_sf_hdr_size(int i8count)
{
@@ -683,8 +683,6 @@ struct xfs_attr3_leafblock {
/*
* Flags used in the leaf_entry[i].flags field.
- * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
- * on the system call, they are "or"ed together for various operations.
*/
#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */
#define XFS_ATTR_ROOT_BIT 1 /* limit access to trusted attrs */
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 1b7dcbae051c..77e9fa385980 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -1540,6 +1540,13 @@ typedef struct xfs_bmdr_block {
#define BMBT_BLOCKCOUNT_BITLEN 21
#define BMBT_STARTOFF_MASK ((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+#define BMBT_BLOCKCOUNT_MASK ((1ULL << BMBT_BLOCKCOUNT_BITLEN) - 1)
+
+/*
+ * bmbt records have a file offset (block) field that is 54 bits wide, so this
+ * is the largest xfs_fileoff_t that we ever expect to see.
+ */
+#define XFS_MAX_FILEOFF (BMBT_STARTOFF_MASK + BMBT_BLOCKCOUNT_MASK)
typedef struct xfs_bmbt_rec {
__be64 l0, l1;
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 5b759af4d165..bf161e930f1d 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -276,6 +276,7 @@ xfs_ialloc_inode_init(
int i, j;
xfs_daddr_t d;
xfs_ino_t ino = 0;
+ int error;
/*
* Loop over the new block(s), filling in the inodes. For small block
@@ -327,12 +328,11 @@ xfs_ialloc_inode_init(
*/
d = XFS_AGB_TO_DADDR(mp, agno, agbno +
(j * M_IGEO(mp)->blocks_per_cluster));
- fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
- mp->m_bsize *
- M_IGEO(mp)->blocks_per_cluster,
- XBF_UNMAPPED);
- if (!fbuf)
- return -ENOMEM;
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+ mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
+ XBF_UNMAPPED, &fbuf);
+ if (error)
+ return error;
/* Initialize the inode buffers and log them appropriately. */
fbuf->b_ops = &xfs_inode_buf_ops;
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 8ef31d71a9c7..9bac0d2e56dc 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -462,11 +462,20 @@ static inline uint xfs_log_dinode_size(int version)
#define XFS_BLF_GDQUOT_BUF (1<<4)
/*
- * This is the structure used to lay out a buf log item in the
- * log. The data map describes which 128 byte chunks of the buffer
- * have been logged.
- */
-#define XFS_BLF_DATAMAP_SIZE ((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
+ * This is the structure used to lay out a buf log item in the log. The data
+ * map describes which 128 byte chunks of the buffer have been logged.
+ *
+ * The placement of blf_map_size causes blf_data_map to start at an odd
+ * multiple of sizeof(unsigned int) offset within the struct. Because the data
+ * bitmap size will always be an even number, the end of the data_map (and
+ * therefore the structure) will also be at an odd multiple of sizeof(unsigned
+ * int). Some 64-bit compilers will insert padding at the end of the struct to
+ * ensure 64-bit alignment of blf_blkno, but 32-bit ones will not. Therefore,
+ * XFS_BLF_DATAMAP_SIZE must be an odd number to make the padding explicit and
+ * keep the structure size consistent between 32-bit and 64-bit platforms.
+ */
+#define __XFS_BLF_DATAMAP_SIZE ((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
+#define XFS_BLF_DATAMAP_SIZE (__XFS_BLF_DATAMAP_SIZE + 1)
typedef struct xfs_buf_log_format {
unsigned short blf_type; /* buf log item type indicator */
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index d7d702ee4d1a..6e1665f2cb67 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1177,8 +1177,6 @@ xfs_refcount_finish_one(
XFS_ALLOC_FLAG_FREEING, &agbp);
if (error)
return error;
- if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
- return -EFSCORRUPTED;
rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
if (!rcur) {
@@ -1718,10 +1716,6 @@ xfs_refcount_recover_cow_leftovers(
error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
goto out_trans;
- if (!agbp) {
- error = -ENOMEM;
- goto out_trans;
- }
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
/* Find all the leftover CoW staging extents. */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 0ac69751fe85..2f60fc3c99a0 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -985,9 +985,9 @@ xfs_update_secondary_sbs(
for (agno = 1; agno < mp->m_sb.sb_agcount; agno++) {
struct xfs_buf *bp;
- bp = xfs_buf_get(mp->m_ddev_targp,
+ error = xfs_buf_get(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
- XFS_FSS_TO_BB(mp, 1));
+ XFS_FSS_TO_BB(mp, 1), &bp);
/*
* If we get an error reading or writing alternate superblocks,
* continue. xfs_repair chooses the "best" superblock based
@@ -995,12 +995,12 @@ xfs_update_secondary_sbs(
* superblocks un-updated than updated, and xfs_repair may
* pick them over the properly-updated primary.
*/
- if (!bp) {
+ if (error) {
xfs_warn(mp,
"error allocating secondary superblock for ag %d",
agno);
if (!saved_error)
- saved_error = -ENOMEM;
+ saved_error = error;
continue;
}
@@ -1185,13 +1185,14 @@ xfs_sb_get_secondary(
struct xfs_buf **bpp)
{
struct xfs_buf *bp;
+ int error;
ASSERT(agno != 0 && agno != NULLAGNUMBER);
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
- XFS_FSS_TO_BB(mp, 1), 0);
- if (!bp)
- return -ENOMEM;
+ XFS_FSS_TO_BB(mp, 1), 0, &bp);
+ if (error)
+ return error;
bp->b_ops = &xfs_sb_buf_ops;
xfs_buf_oneshot(bp);
*bpp = bp;
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 7a1a38b636a9..d5e6db9af434 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -659,8 +659,6 @@ xrep_agfl(
error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
if (error)
return error;
- if (!agf_bp)
- return -ENOMEM;
/*
* Make sure we have the AGFL buffer, as scrub might have decided it
@@ -735,8 +733,6 @@ xrep_agi_find_btrees(
error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
if (error)
return error;
- if (!agf_bp)
- return -ENOMEM;
/* Find the btree roots. */
error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index 7251c66a82c9..ec2064ed3c30 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -83,9 +83,6 @@ xchk_fscount_warmup(
error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp);
if (error)
break;
- error = -ENOMEM;
- if (!agf_bp || !agi_bp)
- break;
/*
* These are supposed to be initialized by the header read
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index b70a88bc975e..e489d7a8446a 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -341,13 +341,17 @@ xrep_init_btblock(
struct xfs_trans *tp = sc->tp;
struct xfs_mount *mp = sc->mp;
struct xfs_buf *bp;
+ int error;
trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), btnum);
ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, fsb),
- XFS_FSB_TO_BB(mp, 1), 0);
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0,
+ &bp);
+ if (error)
+ return error;
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.agno);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
@@ -542,8 +546,6 @@ xrep_reap_block(
error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf_bp);
if (error)
return error;
- if (!agf_bp)
- return -ENOMEM;
} else {
agf_bp = sc->sa.agf_bp;
}
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index 60c61d7052a8..c3422403b169 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -75,7 +75,6 @@ static inline xfs_extlen_t
xrep_calc_ag_resblks(
struct xfs_scrub *sc)
{
- ASSERT(!(sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR));
return 0;
}
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 3362bae28b46..096203119934 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -329,7 +329,7 @@ TRACE_EVENT(xchk_btree_op_error,
__field(int, level)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, bno)
- __field(int, ptr);
+ __field(int, ptr)
__field(int, error)
__field(void *, ret_ip)
),
@@ -414,7 +414,7 @@ TRACE_EVENT(xchk_btree_error,
__field(int, level)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, bno)
- __field(int, ptr);
+ __field(int, ptr)
__field(void *, ret_ip)
),
TP_fast_assign(
@@ -452,7 +452,7 @@ TRACE_EVENT(xchk_ifork_btree_error,
__field(int, level)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, bno)
- __field(int, ptr);
+ __field(int, ptr)
__field(void *, ret_ip)
),
TP_fast_assign(
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 91693fce34a8..cd743fad8478 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -145,7 +145,8 @@ xfs_get_acl(struct inode *inode, int type)
* go out to the disk.
*/
len = XFS_ACL_MAX_SIZE(ip->i_mount);
- error = xfs_attr_get(ip, ea_name, (unsigned char **)&xfs_acl, &len,
+ error = xfs_attr_get(ip, ea_name, strlen(ea_name),
+ (unsigned char **)&xfs_acl, &len,
ATTR_ALLOC | ATTR_ROOT);
if (error) {
/*
@@ -196,15 +197,17 @@ __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
len -= sizeof(struct xfs_acl_entry) *
(XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count);
- error = xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
- len, ATTR_ROOT);
+ error = xfs_attr_set(ip, ea_name, strlen(ea_name),
+ (unsigned char *)xfs_acl, len, ATTR_ROOT);
kmem_free(xfs_acl);
} else {
/*
* A NULL ACL argument means we want to remove the ACL.
*/
- error = xfs_attr_remove(ip, ea_name, ATTR_ROOT);
+ error = xfs_attr_remove(ip, ea_name,
+ strlen(ea_name),
+ ATTR_ROOT);
/*
* If the attribute didn't exist to start with that's fine.
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3a688eb5c5ae..58e937be24ce 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -587,7 +587,7 @@ xfs_dax_writepages(
xfs_iflags_clear(ip, XFS_ITRUNCATED);
return dax_writeback_mapping_range(mapping,
- xfs_inode_buftarg(ip)->bt_bdev, wbc);
+ xfs_inode_buftarg(ip)->bt_daxdev, wbc);
}
STATIC sector_t
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 5ff49523d8ea..bbfa6ba84dcd 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -25,22 +25,18 @@
#include "xfs_error.h"
/*
- * Look at all the extents for this logical region,
- * invalidate any buffers that are incore/in transactions.
+ * Invalidate any incore buffers associated with this remote attribute value
+ * extent. We never log remote attribute value buffers, which means that they
+ * won't be attached to a transaction and are therefore safe to mark stale.
+ * The actual bunmapi will be taken care of later.
*/
STATIC int
-xfs_attr3_leaf_freextent(
- struct xfs_trans **trans,
+xfs_attr3_rmt_stale(
struct xfs_inode *dp,
xfs_dablk_t blkno,
int blkcnt)
{
struct xfs_bmbt_irec map;
- struct xfs_buf *bp;
- xfs_dablk_t tblkno;
- xfs_daddr_t dblkno;
- int tblkcnt;
- int dblkcnt;
int nmap;
int error;
@@ -48,47 +44,29 @@ xfs_attr3_leaf_freextent(
* Roll through the "value", invalidating the attribute value's
* blocks.
*/
- tblkno = blkno;
- tblkcnt = blkcnt;
- while (tblkcnt > 0) {
+ while (blkcnt > 0) {
/*
* Try to remember where we decided to put the value.
*/
nmap = 1;
- error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
+ error = xfs_bmapi_read(dp, (xfs_fileoff_t)blkno, blkcnt,
&map, &nmap, XFS_BMAPI_ATTRFORK);
- if (error) {
+ if (error)
return error;
- }
- ASSERT(nmap == 1);
- ASSERT(map.br_startblock != DELAYSTARTBLOCK);
+ if (XFS_IS_CORRUPT(dp->i_mount, nmap != 1))
+ return -EFSCORRUPTED;
/*
- * If it's a hole, these are already unmapped
- * so there's nothing to invalidate.
+ * Mark any incore buffers for the remote value as stale. We
+ * never log remote attr value buffers, so the buffer should be
+ * easy to kill.
*/
- if (map.br_startblock != HOLESTARTBLOCK) {
-
- dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
- map.br_startblock);
- dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
- map.br_blockcount);
- bp = xfs_trans_get_buf(*trans,
- dp->i_mount->m_ddev_targp,
- dblkno, dblkcnt, 0);
- if (!bp)
- return -ENOMEM;
- xfs_trans_binval(*trans, bp);
- /*
- * Roll to next transaction.
- */
- error = xfs_trans_roll_inode(trans, dp);
- if (error)
- return error;
- }
+ error = xfs_attr_rmtval_stale(dp, &map, 0);
+ if (error)
+ return error;
- tblkno += map.br_blockcount;
- tblkcnt -= map.br_blockcount;
+ blkno += map.br_blockcount;
+ blkcnt -= map.br_blockcount;
}
return 0;
@@ -102,86 +80,45 @@ xfs_attr3_leaf_freextent(
*/
STATIC int
xfs_attr3_leaf_inactive(
- struct xfs_trans **trans,
- struct xfs_inode *dp,
- struct xfs_buf *bp)
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
{
- struct xfs_attr_leafblock *leaf;
- struct xfs_attr3_icleaf_hdr ichdr;
- struct xfs_attr_leaf_entry *entry;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_mount *mp = bp->b_mount;
+ struct xfs_attr_leafblock *leaf = bp->b_addr;
+ struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_name_remote *name_rmt;
- struct xfs_attr_inactive_list *list;
- struct xfs_attr_inactive_list *lp;
- int error;
- int count;
- int size;
- int tmp;
- int i;
- struct xfs_mount *mp = bp->b_mount;
+ int error = 0;
+ int i;
- leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
/*
- * Count the number of "remote" value extents.
+ * Find the remote value extents for this leaf and invalidate their
+ * incore buffers.
*/
- count = 0;
entry = xfs_attr3_leaf_entryp(leaf);
for (i = 0; i < ichdr.count; entry++, i++) {
- if (be16_to_cpu(entry->nameidx) &&
- ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
- name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
- if (name_rmt->valueblk)
- count++;
- }
- }
+ int blkcnt;
- /*
- * If there are no "remote" values, we're done.
- */
- if (count == 0) {
- xfs_trans_brelse(*trans, bp);
- return 0;
- }
-
- /*
- * Allocate storage for a list of all the "remote" value extents.
- */
- size = count * sizeof(xfs_attr_inactive_list_t);
- list = kmem_alloc(size, 0);
-
- /*
- * Identify each of the "remote" value extents.
- */
- lp = list;
- entry = xfs_attr3_leaf_entryp(leaf);
- for (i = 0; i < ichdr.count; entry++, i++) {
- if (be16_to_cpu(entry->nameidx) &&
- ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
- name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
- if (name_rmt->valueblk) {
- lp->valueblk = be32_to_cpu(name_rmt->valueblk);
- lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
- be32_to_cpu(name_rmt->valuelen));
- lp++;
- }
- }
- }
- xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */
+ if (!entry->nameidx || (entry->flags & XFS_ATTR_LOCAL))
+ continue;
- /*
- * Invalidate each of the "remote" value extents.
- */
- error = 0;
- for (lp = list, i = 0; i < count; i++, lp++) {
- tmp = xfs_attr3_leaf_freextent(trans, dp,
- lp->valueblk, lp->valuelen);
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+ if (!name_rmt->valueblk)
+ continue;
- if (error == 0)
- error = tmp; /* save only the 1st errno */
+ blkcnt = xfs_attr3_rmt_blocks(dp->i_mount,
+ be32_to_cpu(name_rmt->valuelen));
+ error = xfs_attr3_rmt_stale(dp,
+ be32_to_cpu(name_rmt->valueblk), blkcnt);
+ if (error)
+ goto err;
}
- kmem_free(list);
+ xfs_trans_brelse(*trans, bp);
+err:
return error;
}
@@ -268,11 +205,12 @@ xfs_attr3_node_inactive(
/*
* Remove the subsidiary block from the cache and from the log.
*/
- child_bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
+ error = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
child_blkno,
- XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
- if (!child_bp)
- return -EIO;
+ XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0,
+ &child_bp);
+ if (error)
+ return error;
error = bp->b_error;
if (error) {
xfs_trans_brelse(*trans, child_bp);
@@ -361,10 +299,10 @@ xfs_attr3_root_inactive(
/*
* Invalidate the incore copy of the root block.
*/
- bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
- XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
- if (!bp)
- return -EIO;
+ error = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
+ XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp);
+ if (error)
+ return error;
error = bp->b_error;
if (error) {
xfs_trans_brelse(*trans, bp);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index a0229c368e78..217e4f82a44a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -198,20 +198,22 @@ xfs_buf_free_maps(
}
}
-static struct xfs_buf *
+static int
_xfs_buf_alloc(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
- xfs_buf_flags_t flags)
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
struct xfs_buf *bp;
int error;
int i;
+ *bpp = NULL;
bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
if (unlikely(!bp))
- return NULL;
+ return -ENOMEM;
/*
* We don't want certain flags to appear in b_flags unless they are
@@ -239,7 +241,7 @@ _xfs_buf_alloc(
error = xfs_buf_get_maps(bp, nmaps);
if (error) {
kmem_cache_free(xfs_buf_zone, bp);
- return NULL;
+ return error;
}
bp->b_bn = map[0].bm_bn;
@@ -256,7 +258,8 @@ _xfs_buf_alloc(
XFS_STATS_INC(bp->b_mount, xb_create);
trace_xfs_buf_init(bp, _RET_IP_);
- return bp;
+ *bpp = bp;
+ return 0;
}
/*
@@ -682,53 +685,39 @@ xfs_buf_incore(
* cache hits, as metadata intensive workloads will see 3 orders of magnitude
* more hits than misses.
*/
-struct xfs_buf *
+int
xfs_buf_get_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
- xfs_buf_flags_t flags)
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
int error = 0;
+ *bpp = NULL;
error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
-
- switch (error) {
- case 0:
- /* cache hit */
+ if (!error)
goto found;
- case -EAGAIN:
- /* cache hit, trylock failure, caller handles failure */
- ASSERT(flags & XBF_TRYLOCK);
- return NULL;
- case -ENOENT:
- /* cache miss, go for insert */
- break;
- case -EFSCORRUPTED:
- default:
- /*
- * None of the higher layers understand failure types
- * yet, so return NULL to signal a fatal lookup error.
- */
- return NULL;
- }
+ if (error != -ENOENT)
+ return error;
- new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
- if (unlikely(!new_bp))
- return NULL;
+ error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp);
+ if (error)
+ return error;
error = xfs_buf_allocate_memory(new_bp, flags);
if (error) {
xfs_buf_free(new_bp);
- return NULL;
+ return error;
}
error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
if (error) {
xfs_buf_free(new_bp);
- return NULL;
+ return error;
}
if (bp != new_bp)
@@ -741,7 +730,7 @@ found:
xfs_warn(target->bt_mount,
"%s: failed to map pagesn", __func__);
xfs_buf_relse(bp);
- return NULL;
+ return error;
}
}
@@ -754,7 +743,8 @@ found:
XFS_STATS_INC(target->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
- return bp;
+ *bpp = bp;
+ return 0;
}
STATIC int
@@ -806,46 +796,77 @@ xfs_buf_reverify(
return bp->b_error;
}
-xfs_buf_t *
+int
xfs_buf_read_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
- const struct xfs_buf_ops *ops)
+ struct xfs_buf **bpp,
+ const struct xfs_buf_ops *ops,
+ xfs_failaddr_t fa)
{
struct xfs_buf *bp;
+ int error;
flags |= XBF_READ;
+ *bpp = NULL;
- bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (!bp)
- return NULL;
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
+ return error;
trace_xfs_buf_read(bp, flags, _RET_IP_);
if (!(bp->b_flags & XBF_DONE)) {
+ /* Initiate the buffer read and wait. */
XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops;
- _xfs_buf_read(bp, flags);
- return bp;
+ error = _xfs_buf_read(bp, flags);
+
+ /* Readahead iodone already dropped the buffer, so exit. */
+ if (flags & XBF_ASYNC)
+ return 0;
+ } else {
+ /* Buffer already read; all we need to do is check it. */
+ error = xfs_buf_reverify(bp, ops);
+
+ /* Readahead already finished; drop the buffer and exit. */
+ if (flags & XBF_ASYNC) {
+ xfs_buf_relse(bp);
+ return 0;
+ }
+
+ /* We do not want read in the flags */
+ bp->b_flags &= ~XBF_READ;
+ ASSERT(bp->b_ops != NULL || ops == NULL);
}
- xfs_buf_reverify(bp, ops);
+ /*
+ * If we've had a read error, then the contents of the buffer are
+ * invalid and should not be used. To ensure that a followup read tries
+ * to pull the buffer from disk again, we clear the XBF_DONE flag and
+ * mark the buffer stale. This ensures that anyone who has a current
+ * reference to the buffer will interpret it's contents correctly and
+ * future cache lookups will also treat it as an empty, uninitialised
+ * buffer.
+ */
+ if (error) {
+ if (!XFS_FORCED_SHUTDOWN(target->bt_mount))
+ xfs_buf_ioerror_alert(bp, fa);
- if (flags & XBF_ASYNC) {
- /*
- * Read ahead call which is already satisfied,
- * drop the buffer
- */
+ bp->b_flags &= ~XBF_DONE;
+ xfs_buf_stale(bp);
xfs_buf_relse(bp);
- return NULL;
+
+ /* bad CRC means corrupted metadata */
+ if (error == -EFSBADCRC)
+ error = -EFSCORRUPTED;
+ return error;
}
- /* We do not want read in the flags */
- bp->b_flags &= ~XBF_READ;
- ASSERT(bp->b_ops != NULL || ops == NULL);
- return bp;
+ *bpp = bp;
+ return 0;
}
/*
@@ -859,11 +880,14 @@ xfs_buf_readahead_map(
int nmaps,
const struct xfs_buf_ops *ops)
{
+ struct xfs_buf *bp;
+
if (bdi_read_congested(target->bt_bdev->bd_bdi))
return;
xfs_buf_read_map(target, map, nmaps,
- XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
+ XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
+ __this_address);
}
/*
@@ -880,12 +904,13 @@ xfs_buf_read_uncached(
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
+ int error;
*bpp = NULL;
- bp = xfs_buf_get_uncached(target, numblks, flags);
- if (!bp)
- return -ENOMEM;
+ error = xfs_buf_get_uncached(target, numblks, flags, &bp);
+ if (error)
+ return error;
/* set up the buffer for a read IO */
ASSERT(bp->b_map_count == 1);
@@ -896,7 +921,7 @@ xfs_buf_read_uncached(
xfs_buf_submit(bp);
if (bp->b_error) {
- int error = bp->b_error;
+ error = bp->b_error;
xfs_buf_relse(bp);
return error;
}
@@ -905,20 +930,23 @@ xfs_buf_read_uncached(
return 0;
}
-xfs_buf_t *
+int
xfs_buf_get_uncached(
struct xfs_buftarg *target,
size_t numblks,
- int flags)
+ int flags,
+ struct xfs_buf **bpp)
{
unsigned long page_count;
int error, i;
struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
+ *bpp = NULL;
+
/* flags might contain irrelevant bits, pass only what we care about */
- bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
- if (unlikely(bp == NULL))
+ error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
+ if (error)
goto fail;
page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
@@ -928,8 +956,10 @@ xfs_buf_get_uncached(
for (i = 0; i < page_count; i++) {
bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
- if (!bp->b_pages[i])
+ if (!bp->b_pages[i]) {
+ error = -ENOMEM;
goto fail_free_mem;
+ }
}
bp->b_flags |= _XBF_PAGES;
@@ -941,7 +971,8 @@ xfs_buf_get_uncached(
}
trace_xfs_buf_get_uncached(bp, _RET_IP_);
- return bp;
+ *bpp = bp;
+ return 0;
fail_free_mem:
while (--i >= 0)
@@ -951,7 +982,7 @@ xfs_buf_get_uncached(
xfs_buf_free_maps(bp);
kmem_cache_free(xfs_buf_zone, bp);
fail:
- return NULL;
+ return error;
}
/*
@@ -1205,10 +1236,10 @@ __xfs_buf_ioerror(
void
xfs_buf_ioerror_alert(
struct xfs_buf *bp,
- const char *func)
+ xfs_failaddr_t func)
{
xfs_alert(bp->b_mount,
-"metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
+"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
-bp->b_error);
}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 56e081dd1d96..d79a1fe5d738 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -192,37 +192,40 @@ struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
xfs_daddr_t blkno, size_t numblks,
xfs_buf_flags_t flags);
-struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
- struct xfs_buf_map *map, int nmaps,
- xfs_buf_flags_t flags);
-struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
- struct xfs_buf_map *map, int nmaps,
- xfs_buf_flags_t flags,
- const struct xfs_buf_ops *ops);
+int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
+ int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
+int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
+ int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
+ const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
void xfs_buf_readahead_map(struct xfs_buftarg *target,
struct xfs_buf_map *map, int nmaps,
const struct xfs_buf_ops *ops);
-static inline struct xfs_buf *
+static inline int
xfs_buf_get(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
- size_t numblks)
+ size_t numblks,
+ struct xfs_buf **bpp)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
- return xfs_buf_get_map(target, &map, 1, 0);
+
+ return xfs_buf_get_map(target, &map, 1, 0, bpp);
}
-static inline struct xfs_buf *
+static inline int
xfs_buf_read(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
size_t numblks,
xfs_buf_flags_t flags,
+ struct xfs_buf **bpp,
const struct xfs_buf_ops *ops)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
- return xfs_buf_read_map(target, &map, 1, flags, ops);
+
+ return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
+ __builtin_return_address(0));
}
static inline void
@@ -236,8 +239,8 @@ xfs_buf_readahead(
return xfs_buf_readahead_map(target, &map, 1, ops);
}
-struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
- int flags);
+int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags,
+ struct xfs_buf **bpp);
int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
size_t numblks, int flags, struct xfs_buf **bpp,
const struct xfs_buf_ops *ops);
@@ -259,7 +262,7 @@ extern void xfs_buf_ioend(struct xfs_buf *bp);
extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
xfs_failaddr_t failaddr);
#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
-extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
+extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
static inline int xfs_buf_submit(struct xfs_buf *bp)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 3984779e5911..663810e6cd59 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -27,6 +27,23 @@ static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
+/* Is this log iovec plausibly large enough to contain the buffer log format? */
+bool
+xfs_buf_log_check_iovec(
+ struct xfs_log_iovec *iovec)
+{
+ struct xfs_buf_log_format *blfp = iovec->i_addr;
+ char *bmp_end;
+ char *item_end;
+
+ if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
+ return false;
+
+ item_end = (char *)iovec->i_addr + iovec->i_len;
+ bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
+ return bmp_end <= item_end;
+}
+
static inline int
xfs_buf_log_format_size(
struct xfs_buf_log_format *blfp)
@@ -688,7 +705,7 @@ static const struct xfs_item_ops xfs_buf_item_ops = {
.iop_push = xfs_buf_item_push,
};
-STATIC int
+STATIC void
xfs_buf_item_get_format(
struct xfs_buf_log_item *bip,
int count)
@@ -698,14 +715,11 @@ xfs_buf_item_get_format(
if (count == 1) {
bip->bli_formats = &bip->__bli_format;
- return 0;
+ return;
}
bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
0);
- if (!bip->bli_formats)
- return -ENOMEM;
- return 0;
}
STATIC void
@@ -731,7 +745,6 @@ xfs_buf_item_init(
struct xfs_buf_log_item *bip = bp->b_log_item;
int chunks;
int map_size;
- int error;
int i;
/*
@@ -760,19 +773,22 @@ xfs_buf_item_init(
* Discontiguous buffer support follows the layout of the underlying
* buffer. This makes the implementation as simple as possible.
*/
- error = xfs_buf_item_get_format(bip, bp->b_map_count);
- ASSERT(error == 0);
- if (error) { /* to stop gcc throwing set-but-unused warnings */
- kmem_cache_free(xfs_buf_item_zone, bip);
- return error;
- }
-
+ xfs_buf_item_get_format(bip, bp->b_map_count);
for (i = 0; i < bip->bli_format_count; i++) {
chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
XFS_BLF_CHUNK);
map_size = DIV_ROUND_UP(chunks, NBWORD);
+ if (map_size > XFS_BLF_DATAMAP_SIZE) {
+ kmem_cache_free(xfs_buf_item_zone, bip);
+ xfs_err(mp,
+ "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
+ map_size,
+ BBTOB(bp->b_maps[i].bm_len));
+ return -EFSCORRUPTED;
+ }
+
bip->bli_formats[i].blf_type = XFS_LI_BUF;
bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
@@ -805,6 +821,9 @@ xfs_buf_item_log_segment(
uint end_bit;
uint mask;
+ ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
+ ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
+
/*
* Convert byte offsets to bit numbers.
*/
@@ -1094,7 +1113,7 @@ xfs_buf_iodone_callback_error(
if (bp->b_target != lasttarg ||
time_after(jiffies, (lasttime + 5*HZ))) {
lasttime = jiffies;
- xfs_buf_ioerror_alert(bp, __func__);
+ xfs_buf_ioerror_alert(bp, __this_address);
}
lasttarg = bp->b_target;
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 4a054b11011a..30114b510332 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -61,6 +61,7 @@ void xfs_buf_iodone_callbacks(struct xfs_buf *);
void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
bool xfs_buf_resubmit_failed_buffers(struct xfs_buf *,
struct list_head *);
+bool xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec);
extern kmem_zone_t *xfs_buf_item_zone;
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index cae613620175..0b8350e84d28 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -45,7 +45,7 @@ xfs_trim_extents(
xfs_log_force(mp, XFS_LOG_SYNC);
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
- if (error || !agbp)
+ if (error)
goto out_put_perag;
cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 2bff21ca9d78..d223e1ae90a6 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -137,7 +137,7 @@ xfs_qm_adjust_dqtimers(
(d->d_blk_hardlimit &&
(be64_to_cpu(d->d_bcount) >
be64_to_cpu(d->d_blk_hardlimit)))) {
- d->d_btimer = cpu_to_be32(get_seconds() +
+ d->d_btimer = cpu_to_be32(ktime_get_real_seconds() +
mp->m_quotainfo->qi_btimelimit);
} else {
d->d_bwarns = 0;
@@ -160,7 +160,7 @@ xfs_qm_adjust_dqtimers(
(d->d_ino_hardlimit &&
(be64_to_cpu(d->d_icount) >
be64_to_cpu(d->d_ino_hardlimit)))) {
- d->d_itimer = cpu_to_be32(get_seconds() +
+ d->d_itimer = cpu_to_be32(ktime_get_real_seconds() +
mp->m_quotainfo->qi_itimelimit);
} else {
d->d_iwarns = 0;
@@ -183,7 +183,7 @@ xfs_qm_adjust_dqtimers(
(d->d_rtb_hardlimit &&
(be64_to_cpu(d->d_rtbcount) >
be64_to_cpu(d->d_rtb_hardlimit)))) {
- d->d_rtbtimer = cpu_to_be32(get_seconds() +
+ d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() +
mp->m_quotainfo->qi_rtbtimelimit);
} else {
d->d_rtbwarns = 0;
@@ -320,10 +320,10 @@ xfs_dquot_disk_alloc(
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
/* now we can just get the buffer (there's nothing to read yet) */
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen, 0);
- if (!bp)
- return -ENOMEM;
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
+ mp->m_quotainfo->qi_dqchunklen, 0, &bp);
+ if (error)
+ return error;
bp->b_ops = &xfs_dquot_buf_ops;
/*
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index c93250108952..b8a4a3f29b36 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -187,7 +187,12 @@ xfs_file_dio_aio_read(
file_accessed(iocb->ki_filp);
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
+ return -EAGAIN;
+ } else {
+ xfs_ilock(ip, XFS_IOLOCK_SHARED);
+ }
ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
is_sync_kiocb(iocb));
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 5f12b5d8527a..1a88025e68a3 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -159,16 +159,15 @@ xfs_filestream_pick_ag(
if (!pag->pagf_init) {
err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
- if (err && !trylock) {
+ if (err) {
xfs_perag_put(pag);
- return err;
+ if (err != -EAGAIN)
+ return err;
+ /* Couldn't lock the AGF, skip this AG. */
+ continue;
}
}
- /* Might fail sometimes during the 1st pass with trylock set. */
- if (!pag->pagf_init)
- goto next_ag;
-
/* Keep track of the AG with the most free blocks. */
if (pag->pagf_freeblks > maxfree) {
maxfree = pag->pagf_freeblks;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 401da197f012..c5077e6326c7 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1518,10 +1518,8 @@ xfs_itruncate_extents_flags(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp = *tpp;
xfs_fileoff_t first_unmap_block;
- xfs_fileoff_t last_block;
xfs_filblks_t unmap_len;
int error = 0;
- int done = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
@@ -1541,21 +1539,22 @@ xfs_itruncate_extents_flags(
* the end of the file (in a crash where the space is allocated
* but the inode size is not yet updated), simply remove any
* blocks which show up between the new EOF and the maximum
- * possible file size. If the first block to be removed is
- * beyond the maximum file size (ie it is the same as last_block),
- * then there is nothing to do.
+ * possible file size.
+ *
+ * We have to free all the blocks to the bmbt maximum offset, even if
+ * the page cache can't scale that far.
*/
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
- last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
- if (first_unmap_block == last_block)
+ if (first_unmap_block >= XFS_MAX_FILEOFF) {
+ WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
return 0;
+ }
- ASSERT(first_unmap_block < last_block);
- unmap_len = last_block - first_unmap_block + 1;
- while (!done) {
+ unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
+ while (unmap_len > 0) {
ASSERT(tp->t_firstblock == NULLFSBLOCK);
- error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
- XFS_ITRUNC_MAX_EXTENTS, &done);
+ error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
+ flags, XFS_ITRUNC_MAX_EXTENTS);
if (error)
goto out;
@@ -1575,7 +1574,7 @@ xfs_itruncate_extents_flags(
if (whichfork == XFS_DATA_FORK) {
/* Remove all pending CoW reservations. */
error = xfs_reflink_cancel_cow_blocks(ip, &tp,
- first_unmap_block, last_block, true);
+ first_unmap_block, XFS_MAX_FILEOFF, true);
if (error)
goto out;
@@ -2547,6 +2546,7 @@ xfs_ifree_cluster(
struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
xfs_ino_t inum;
+ int error;
inum = xic->first_ino;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
@@ -2575,12 +2575,11 @@ xfs_ifree_cluster(
* complete before we get a lock on it, and hence we may fail
* to mark all the active inodes on the buffer stale.
*/
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
- mp->m_bsize * igeo->blocks_per_cluster,
- XBF_UNMAPPED);
-
- if (!bp)
- return -ENOMEM;
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
+ mp->m_bsize * igeo->blocks_per_cluster,
+ XBF_UNMAPPED, &bp);
+ if (error)
+ return error;
/*
* This buffer may not have been correctly initialised as we
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 7b35d62ede9f..d42de92cb283 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -357,6 +357,7 @@ xfs_attrmulti_attr_get(
{
unsigned char *kbuf;
int error = -EFAULT;
+ size_t namelen;
if (*len > XFS_XATTR_SIZE_MAX)
return -EINVAL;
@@ -364,7 +365,9 @@ xfs_attrmulti_attr_get(
if (!kbuf)
return -ENOMEM;
- error = xfs_attr_get(XFS_I(inode), name, &kbuf, (int *)len, flags);
+ namelen = strlen(name);
+ error = xfs_attr_get(XFS_I(inode), name, namelen, &kbuf, (int *)len,
+ flags);
if (error)
goto out_kfree;
@@ -386,6 +389,7 @@ xfs_attrmulti_attr_set(
{
unsigned char *kbuf;
int error;
+ size_t namelen;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
@@ -396,7 +400,8 @@ xfs_attrmulti_attr_set(
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
- error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
+ namelen = strlen(name);
+ error = xfs_attr_set(XFS_I(inode), name, namelen, kbuf, len, flags);
if (!error)
xfs_forget_acl(inode, name, flags);
kfree(kbuf);
@@ -410,10 +415,12 @@ xfs_attrmulti_attr_remove(
uint32_t flags)
{
int error;
+ size_t namelen;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
- error = xfs_attr_remove(XFS_I(inode), name, flags);
+ namelen = strlen(name);
+ error = xfs_attr_remove(XFS_I(inode), name, namelen, flags);
if (!error)
xfs_forget_acl(inode, name, flags);
return error;
@@ -462,6 +469,13 @@ xfs_attrmulti_by_handle(
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
+ if ((ops[i].am_flags & ATTR_ROOT) &&
+ (ops[i].am_flags & ATTR_SECURE)) {
+ ops[i].am_error = -EINVAL;
+ continue;
+ }
+ ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
+
ops[i].am_error = strncpy_from_user((char *)attr_name,
ops[i].am_attrname, MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index c4c4f09113d3..769581a79c58 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -107,7 +107,7 @@ xfs_ioctl32_bstime_copyin(
xfs_bstime_t *bstime,
compat_xfs_bstime_t __user *bstime32)
{
- compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
+ old_time32_t sec32; /* tv_sec differs on 64 vs. 32 */
if (get_user(sec32, &bstime32->tv_sec) ||
get_user(bstime->tv_nsec, &bstime32->tv_nsec))
@@ -450,6 +450,13 @@ xfs_compat_attrmulti_by_handle(
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
+ if ((ops[i].am_flags & ATTR_ROOT) &&
+ (ops[i].am_flags & ATTR_SECURE)) {
+ ops[i].am_error = -EINVAL;
+ continue;
+ }
+ ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
+
ops[i].am_error = strncpy_from_user((char *)attr_name,
compat_ptr(ops[i].am_attrname),
MAXNAMELEN);
diff --git a/fs/xfs/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h
index 8c7743cd490e..053de7d894cd 100644
--- a/fs/xfs/xfs_ioctl32.h
+++ b/fs/xfs/xfs_ioctl32.h
@@ -32,7 +32,7 @@
#endif
typedef struct compat_xfs_bstime {
- compat_time_t tv_sec; /* seconds */
+ old_time32_t tv_sec; /* seconds */
__s32 tv_nsec; /* and nanoseconds */
} compat_xfs_bstime_t;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 28e2d1f37267..bb590a267a7f 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -923,7 +923,7 @@ xfs_buffered_write_iomap_begin(
xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
/* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_inode_need_cow(ip, &imap, &shared);
+ error = xfs_bmap_trim_cow(ip, &imap, &shared);
if (error)
goto out_unlock;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 8afe69ca188b..81f2f93caec0 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -50,8 +50,10 @@ xfs_initxattrs(
int error = 0;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
- error = xfs_attr_set(ip, xattr->name, xattr->value,
- xattr->value_len, ATTR_SECURE);
+ error = xfs_attr_set(ip, xattr->name,
+ strlen(xattr->name),
+ xattr->value, xattr->value_len,
+ ATTR_SECURE);
if (error < 0)
break;
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 99ec3fba4548..25cfc85dbaa7 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -294,7 +294,7 @@ xlog_recover_iodone(
* this during recovery. One strike!
*/
if (!XFS_FORCED_SHUTDOWN(bp->b_mount)) {
- xfs_buf_ioerror_alert(bp, __func__);
+ xfs_buf_ioerror_alert(bp, __this_address);
xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
}
}
@@ -1934,6 +1934,12 @@ xlog_recover_buffer_pass1(
struct list_head *bucket;
struct xfs_buf_cancel *bcp;
+ if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) {
+ xfs_err(log->l_mp, "bad buffer log item size (%d)",
+ item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
/*
* If this isn't a cancel buffer item, then just return.
*/
@@ -2739,15 +2745,10 @@ xlog_recover_buffer_pass2(
if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
buf_flags |= XBF_UNMAPPED;
- bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
- buf_flags, NULL);
- if (!bp)
- return -ENOMEM;
- error = bp->b_error;
- if (error) {
- xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
- goto out_release;
- }
+ error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
+ buf_flags, &bp, NULL);
+ if (error)
+ return error;
/*
* Recover the buffer only if we get an LSN from it and it's less than
@@ -2944,17 +2945,10 @@ xlog_recover_inode_pass2(
}
trace_xfs_log_recover_inode_recover(log, in_f);
- bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
- &xfs_inode_buf_ops);
- if (!bp) {
- error = -ENOMEM;
+ error = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
+ 0, &bp, &xfs_inode_buf_ops);
+ if (error)
goto error;
- }
- error = bp->b_error;
- if (error) {
- xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
- goto out_release;
- }
ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
dip = xfs_buf_offset(bp, in_f->ilf_boffset);
@@ -5633,7 +5627,7 @@ xlog_do_recover(
error = xfs_buf_submit(bp);
if (error) {
if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_buf_ioerror_alert(bp, __func__);
+ xfs_buf_ioerror_alert(bp, __this_address);
ASSERT(0);
}
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index b6701b4f59a9..5f04d8a5ab2a 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -111,6 +111,7 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(xfs_dir2_sf_hdr_t, 10);
/* log structures */
+ XFS_CHECK_STRUCT_SIZE(struct xfs_buf_log_format, 88);
XFS_CHECK_STRUCT_SIZE(struct xfs_dq_logformat, 24);
XFS_CHECK_STRUCT_SIZE(struct xfs_efd_log_format_32, 28);
XFS_CHECK_STRUCT_SIZE(struct xfs_efd_log_format_64, 32);
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 7823af39008b..4e57edca8bce 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -64,9 +64,9 @@ struct xfs_quotainfo {
struct xfs_inode *qi_pquotaip; /* project quota inode */
struct list_lru qi_lru;
int qi_dquots;
- time_t qi_btimelimit; /* limit for blks timer */
- time_t qi_itimelimit; /* limit for inodes timer */
- time_t qi_rtbtimelimit;/* limit for rt blks timer */
+ time64_t qi_btimelimit; /* limit for blks timer */
+ time64_t qi_itimelimit; /* limit for inodes timer */
+ time64_t qi_rtbtimelimit;/* limit for rt blks timer */
xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */
xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */
xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index c7de17deeae6..38669e827206 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -37,9 +37,9 @@ xfs_qm_fill_state(
tstate->flags |= QCI_SYSFILE;
tstate->blocks = ip->i_d.di_nblocks;
tstate->nextents = ip->i_d.di_nextents;
- tstate->spc_timelimit = q->qi_btimelimit;
- tstate->ino_timelimit = q->qi_itimelimit;
- tstate->rt_spc_timelimit = q->qi_rtbtimelimit;
+ tstate->spc_timelimit = (u32)q->qi_btimelimit;
+ tstate->ino_timelimit = (u32)q->qi_itimelimit;
+ tstate->rt_spc_timelimit = (u32)q->qi_rtbtimelimit;
tstate->spc_warnlimit = q->qi_bwarnlimit;
tstate->ino_warnlimit = q->qi_iwarnlimit;
tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index de451235c4ee..b0ce04ffd3cd 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -143,8 +143,6 @@ xfs_reflink_find_shared(
error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
return error;
- if (!agbp)
- return -ENOMEM;
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
@@ -223,8 +221,8 @@ xfs_reflink_trim_around_shared(
}
}
-bool
-xfs_inode_need_cow(
+int
+xfs_bmap_trim_cow(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
bool *shared)
@@ -327,7 +325,7 @@ xfs_find_trim_cow_extent(
if (cmap->br_startoff > offset_fsb) {
xfs_trim_extent(imap, imap->br_startoff,
cmap->br_startoff - imap->br_startoff);
- return xfs_inode_need_cow(ip, imap, shared);
+ return xfs_bmap_trim_cow(ip, imap, shared);
}
*shared = true;
@@ -1457,7 +1455,8 @@ xfs_reflink_clear_inode_flag(
* We didn't find any shared blocks so turn off the reflink flag.
* First, get rid of any leftover CoW mappings.
*/
- error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
+ error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, XFS_MAX_FILEOFF,
+ true);
if (error)
return error;
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index d18ad7f4fb64..3e4fd46373ab 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -22,7 +22,7 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared);
-bool xfs_inode_need_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
+int xfs_bmap_trim_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
bool *shared);
int xfs_reflink_allocate_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index d42b5a2047e0..6209e7b6b895 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -826,12 +826,10 @@ xfs_growfs_rt_alloc(
* Get a buffer for the block.
*/
d = XFS_FSB_TO_DADDR(mp, fsbno);
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
- mp->m_bsize, 0);
- if (bp == NULL) {
- error = -EIO;
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+ mp->m_bsize, 0, &bp);
+ if (error)
goto out_trans_cancel;
- }
memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d9ae27ddf253..2094386af8ac 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -62,7 +62,7 @@ enum {
Opt_discard, Opt_nodiscard, Opt_dax,
};
-static const struct fs_parameter_spec xfs_param_specs[] = {
+static const struct fs_parameter_spec xfs_fs_parameters[] = {
fsparam_u32("logbufs", Opt_logbufs),
fsparam_string("logbsize", Opt_logbsize),
fsparam_string("logdev", Opt_logdev),
@@ -106,11 +106,6 @@ static const struct fs_parameter_spec xfs_param_specs[] = {
{}
};
-static const struct fs_parameter_description xfs_fs_parameters = {
- .name = "xfs",
- .specs = xfs_param_specs,
-};
-
struct proc_xfs_info {
uint64_t flag;
char *str;
@@ -193,32 +188,6 @@ xfs_fs_show_options(
return 0;
}
-static uint64_t
-xfs_max_file_offset(
- unsigned int blockshift)
-{
- unsigned int pagefactor = 1;
- unsigned int bitshift = BITS_PER_LONG - 1;
-
- /* Figure out maximum filesize, on Linux this can depend on
- * the filesystem blocksize (on 32 bit platforms).
- * __block_write_begin does this in an [unsigned] long long...
- * page->index << (PAGE_SHIFT - bbits)
- * So, for page sized blocks (4K on 32 bit platforms),
- * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
- * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
- * but for smaller blocksizes it is less (bbits = log2 bsize).
- */
-
-#if BITS_PER_LONG == 32
- ASSERT(sizeof(sector_t) == 8);
- pagefactor = PAGE_SIZE;
- bitshift = BITS_PER_LONG;
-#endif
-
- return (((uint64_t)pagefactor) << bitshift) - 1;
-}
-
/*
* Set parameters for inode allocation heuristics, taking into account
* filesystem size and inode32/inode64 mount options; i.e. specifically
@@ -1146,7 +1115,7 @@ xfs_fc_parse_param(
int size = 0;
int opt;
- opt = fs_parse(fc, &xfs_fs_parameters, param, &result);
+ opt = fs_parse(fc, xfs_fs_parameters, param, &result);
if (opt < 0)
return opt;
@@ -1424,6 +1393,26 @@ xfs_fc_fill_super(
if (error)
goto out_free_sb;
+ /*
+ * XFS block mappings use 54 bits to store the logical block offset.
+ * This should suffice to handle the maximum file size that the VFS
+ * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
+ * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
+ * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
+ * to check this assertion.
+ *
+ * Avoid integer overflow by comparing the maximum bmbt offset to the
+ * maximum pagecache offset in units of fs blocks.
+ */
+ if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
+ xfs_warn(mp,
+"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
+ XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
+ XFS_MAX_FILEOFF);
+ error = -EINVAL;
+ goto out_free_sb;
+ }
+
error = xfs_filestream_mount(mp);
if (error)
goto out_free_sb;
@@ -1435,7 +1424,7 @@ xfs_fc_fill_super(
sb->s_magic = XFS_SUPER_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
- sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_max_links = XFS_MAXLINK;
sb->s_time_gran = 1;
sb->s_time_min = S32_MIN;
@@ -1788,7 +1777,7 @@ static struct file_system_type xfs_fs_type = {
.owner = THIS_MODULE,
.name = "xfs",
.init_fs_context = xfs_init_fs_context,
- .parameters = &xfs_fs_parameters,
+ .parameters = xfs_fs_parameters,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index a25502bc2071..d762d42ed0ff 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -53,20 +53,10 @@ xfs_readlink_bmap_ilocked(
d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
- bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0,
- &xfs_symlink_buf_ops);
- if (!bp)
- return -ENOMEM;
- error = bp->b_error;
- if (error) {
- xfs_buf_ioerror_alert(bp, __func__);
- xfs_buf_relse(bp);
-
- /* bad CRC means corrupted metadata */
- if (error == -EFSBADCRC)
- error = -EFSCORRUPTED;
- goto out;
- }
+ error = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0,
+ &bp, &xfs_symlink_buf_ops);
+ if (error)
+ return error;
byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
if (pathlen < byte_cnt)
byte_cnt = pathlen;
@@ -290,12 +280,10 @@ xfs_symlink(
d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
- BTOBB(byte_cnt), 0);
- if (!bp) {
- error = -ENOMEM;
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+ BTOBB(byte_cnt), 0, &bp);
+ if (error)
goto out_trans_cancel;
- }
bp->b_ops = &xfs_symlink_buf_ops;
byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
@@ -433,13 +421,12 @@ xfs_inactive_symlink_rmt(
* Invalidate the block(s). No validation is done.
*/
for (i = 0; i < nmaps; i++) {
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
- XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
- XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
- if (!bp) {
- error = -ENOMEM;
+ error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
+ XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0,
+ &bp);
+ if (error)
goto error_trans_cancel;
- }
xfs_trans_binval(tp, bp);
}
/*
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index a86be7f807ee..e242988f57fb 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -218,8 +218,8 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
- __field(void *, leaf);
- __field(int, pos);
+ __field(void *, leaf)
+ __field(int, pos)
__field(xfs_fileoff_t, startoff)
__field(xfs_fsblock_t, startblock)
__field(xfs_filblks_t, blockcount)
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 64d7f171ebd3..752c7fef9de7 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -169,21 +169,21 @@ int xfs_trans_alloc_empty(struct xfs_mount *mp,
struct xfs_trans **tpp);
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
-struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
- struct xfs_buftarg *target,
- struct xfs_buf_map *map, int nmaps,
- uint flags);
+int xfs_trans_get_buf_map(struct xfs_trans *tp, struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags,
+ struct xfs_buf **bpp);
-static inline struct xfs_buf *
+static inline int
xfs_trans_get_buf(
struct xfs_trans *tp,
struct xfs_buftarg *target,
xfs_daddr_t blkno,
int numblks,
- uint flags)
+ uint flags,
+ struct xfs_buf **bpp)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
- return xfs_trans_get_buf_map(tp, target, &map, 1, flags);
+ return xfs_trans_get_buf_map(tp, target, &map, 1, flags, bpp);
}
int xfs_trans_read_buf_map(struct xfs_mount *mp,
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index b5b3a78ef31c..08174ffa2118 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -112,19 +112,22 @@ xfs_trans_bjoin(
* If the transaction pointer is NULL, make this just a normal
* get_buf() call.
*/
-struct xfs_buf *
+int
xfs_trans_get_buf_map(
struct xfs_trans *tp,
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
- xfs_buf_flags_t flags)
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
xfs_buf_t *bp;
struct xfs_buf_log_item *bip;
+ int error;
+ *bpp = NULL;
if (!tp)
- return xfs_buf_get_map(target, map, nmaps, flags);
+ return xfs_buf_get_map(target, map, nmaps, flags, bpp);
/*
* If we find the buffer in the cache with this transaction
@@ -146,19 +149,20 @@ xfs_trans_get_buf_map(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
trace_xfs_trans_get_buf_recur(bip);
- return bp;
+ *bpp = bp;
+ return 0;
}
- bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (bp == NULL) {
- return NULL;
- }
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
+ return error;
ASSERT(!bp->b_error);
_xfs_trans_bjoin(tp, bp, 1);
trace_xfs_trans_get_buf(bp->b_log_item);
- return bp;
+ *bpp = bp;
+ return 0;
}
/*
@@ -276,7 +280,7 @@ xfs_trans_read_buf_map(
ASSERT(bp->b_ops != NULL);
error = xfs_buf_reverify(bp, ops);
if (error) {
- xfs_buf_ioerror_alert(bp, __func__);
+ xfs_buf_ioerror_alert(bp, __return_address);
if (tp->t_flags & XFS_TRANS_DIRTY)
xfs_force_shutdown(tp->t_mountp,
@@ -298,36 +302,17 @@ xfs_trans_read_buf_map(
return 0;
}
- bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
- if (!bp) {
- if (!(flags & XBF_TRYLOCK))
- return -ENOMEM;
- return tp ? 0 : -EAGAIN;
- }
-
- /*
- * If we've had a read error, then the contents of the buffer are
- * invalid and should not be used. To ensure that a followup read tries
- * to pull the buffer from disk again, we clear the XBF_DONE flag and
- * mark the buffer stale. This ensures that anyone who has a current
- * reference to the buffer will interpret it's contents correctly and
- * future cache lookups will also treat it as an empty, uninitialised
- * buffer.
- */
- if (bp->b_error) {
- error = bp->b_error;
- if (!XFS_FORCED_SHUTDOWN(mp))
- xfs_buf_ioerror_alert(bp, __func__);
- bp->b_flags &= ~XBF_DONE;
- xfs_buf_stale(bp);
-
+ error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops,
+ __return_address);
+ switch (error) {
+ case 0:
+ break;
+ default:
if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
- xfs_buf_relse(bp);
-
- /* bad CRC means corrupted metadata */
- if (error == -EFSBADCRC)
- error = -EFSCORRUPTED;
+ /* fall through */
+ case -ENOMEM:
+ case -EAGAIN:
return error;
}
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index a6fe2d8dc40f..d1b9869bc5fa 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -580,7 +580,7 @@ xfs_trans_dqresv(
{
xfs_qcnt_t hardlimit;
xfs_qcnt_t softlimit;
- time_t timer;
+ time64_t timer;
xfs_qwarncnt_t warns;
xfs_qwarncnt_t warnlimit;
xfs_qcnt_t total_count;
@@ -635,7 +635,8 @@ xfs_trans_dqresv(
goto error_return;
}
if (softlimit && total_count > softlimit) {
- if ((timer != 0 && get_seconds() > timer) ||
+ if ((timer != 0 &&
+ ktime_get_real_seconds() > timer) ||
(warns != 0 && warns >= warnlimit)) {
xfs_quota_warn(mp, dqp,
QUOTA_NL_BSOFTLONGWARN);
@@ -662,7 +663,8 @@ xfs_trans_dqresv(
goto error_return;
}
if (softlimit && total_count > softlimit) {
- if ((timer != 0 && get_seconds() > timer) ||
+ if ((timer != 0 &&
+ ktime_get_real_seconds() > timer) ||
(warns != 0 && warns >= warnlimit)) {
xfs_quota_warn(mp, dqp,
QUOTA_NL_ISOFTLONGWARN);
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 383f0203d103..b0fedb543f97 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -24,6 +24,7 @@ xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
int xflags = handler->flags;
struct xfs_inode *ip = XFS_I(inode);
int error, asize = size;
+ size_t namelen = strlen(name);
/* Convert Linux syscall to XFS internal ATTR flags */
if (!size) {
@@ -31,7 +32,8 @@ xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
value = NULL;
}
- error = xfs_attr_get(ip, name, (unsigned char **)&value, &asize, xflags);
+ error = xfs_attr_get(ip, name, namelen, (unsigned char **)&value,
+ &asize, xflags);
if (error)
return error;
return asize;
@@ -67,6 +69,7 @@ xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
int xflags = handler->flags;
struct xfs_inode *ip = XFS_I(inode);
int error;
+ size_t namelen = strlen(name);
/* Convert Linux syscall to XFS internal ATTR flags */
if (flags & XATTR_CREATE)
@@ -74,10 +77,11 @@ xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
if (flags & XATTR_REPLACE)
xflags |= ATTR_REPLACE;
- if (!value)
- return xfs_attr_remove(ip, (unsigned char *)name, xflags);
- error = xfs_attr_set(ip, (unsigned char *)name,
- (void *)value, size, xflags);
+ if (value)
+ error = xfs_attr_set(ip, name, namelen, (void *)value, size,
+ xflags);
+ else
+ error = xfs_attr_remove(ip, name, namelen, xflags);
if (!error)
xfs_forget_acl(inode, name, xflags);
diff --git a/fs/zonefs/Kconfig b/fs/zonefs/Kconfig
new file mode 100644
index 000000000000..fb87ad372e29
--- /dev/null
+++ b/fs/zonefs/Kconfig
@@ -0,0 +1,9 @@
+config ZONEFS_FS
+ tristate "zonefs filesystem support"
+ depends on BLOCK
+ depends on BLK_DEV_ZONED
+ help
+ zonefs is a simple file system which exposes zones of a zoned block
+ device (e.g. host-managed or host-aware SMR disk drives) as files.
+
+ If unsure, say N.
diff --git a/fs/zonefs/Makefile b/fs/zonefs/Makefile
new file mode 100644
index 000000000000..75a380aa1ae1
--- /dev/null
+++ b/fs/zonefs/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ZONEFS_FS) += zonefs.o
+
+zonefs-y := super.o
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
new file mode 100644
index 000000000000..8bc6ef82d693
--- /dev/null
+++ b/fs/zonefs/super.c
@@ -0,0 +1,1439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple file system for zoned block devices exposing zones as files.
+ *
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/iomap.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/statfs.h>
+#include <linux/writeback.h>
+#include <linux/quotaops.h>
+#include <linux/seq_file.h>
+#include <linux/parser.h>
+#include <linux/uio.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/crc32.h>
+
+#include "zonefs.h"
+
+static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ loff_t isize;
+
+ /* All I/Os should always be within the file maximum size */
+ if (WARN_ON_ONCE(offset + length > zi->i_max_size))
+ return -EIO;
+
+ /*
+ * Sequential zones can only accept direct writes. This is already
+ * checked when writes are issued, so warn if we see a page writeback
+ * operation.
+ */
+ if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+ (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
+ return -EIO;
+
+ /*
+ * For conventional zones, all blocks are always mapped. For sequential
+ * zones, all blocks after always mapped below the inode size (zone
+ * write pointer) and unwriten beyond.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ isize = i_size_read(inode);
+ if (offset >= isize)
+ iomap->type = IOMAP_UNWRITTEN;
+ else
+ iomap->type = IOMAP_MAPPED;
+ if (flags & IOMAP_WRITE)
+ length = zi->i_max_size - offset;
+ else
+ length = min(length, isize - offset);
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+ iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+
+ return 0;
+}
+
+static const struct iomap_ops zonefs_iomap_ops = {
+ .iomap_begin = zonefs_iomap_begin,
+};
+
+static int zonefs_readpage(struct file *unused, struct page *page)
+{
+ return iomap_readpage(page, &zonefs_iomap_ops);
+}
+
+static int zonefs_readpages(struct file *unused, struct address_space *mapping,
+ struct list_head *pages, unsigned int nr_pages)
+{
+ return iomap_readpages(mapping, pages, nr_pages, &zonefs_iomap_ops);
+}
+
+/*
+ * Map blocks for page writeback. This is used only on conventional zone files,
+ * which implies that the page range can only be within the fixed inode size.
+ */
+static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
+ struct inode *inode, loff_t offset)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
+ return -EIO;
+ if (WARN_ON_ONCE(offset >= i_size_read(inode)))
+ return -EIO;
+
+ /* If the mapping is already OK, nothing needs to be done */
+ if (offset >= wpc->iomap.offset &&
+ offset < wpc->iomap.offset + wpc->iomap.length)
+ return 0;
+
+ return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
+ IOMAP_WRITE, &wpc->iomap, NULL);
+}
+
+static const struct iomap_writeback_ops zonefs_writeback_ops = {
+ .map_blocks = zonefs_map_blocks,
+};
+
+static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct iomap_writepage_ctx wpc = { };
+
+ return iomap_writepage(page, wbc, &wpc, &zonefs_writeback_ops);
+}
+
+static int zonefs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct iomap_writepage_ctx wpc = { };
+
+ return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
+}
+
+static const struct address_space_operations zonefs_file_aops = {
+ .readpage = zonefs_readpage,
+ .readpages = zonefs_readpages,
+ .writepage = zonefs_writepage,
+ .writepages = zonefs_writepages,
+ .set_page_dirty = iomap_set_page_dirty,
+ .releasepage = iomap_releasepage,
+ .invalidatepage = iomap_invalidatepage,
+ .migratepage = iomap_migrate_page,
+ .is_partially_uptodate = iomap_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
+ .direct_IO = noop_direct_IO,
+};
+
+static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
+{
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ loff_t old_isize = i_size_read(inode);
+ loff_t nr_blocks;
+
+ if (new_isize == old_isize)
+ return;
+
+ spin_lock(&sbi->s_lock);
+
+ /*
+ * This may be called for an update after an IO error.
+ * So beware of the values seen.
+ */
+ if (new_isize < old_isize) {
+ nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits;
+ if (sbi->s_used_blocks > nr_blocks)
+ sbi->s_used_blocks -= nr_blocks;
+ else
+ sbi->s_used_blocks = 0;
+ } else {
+ sbi->s_used_blocks +=
+ (new_isize - old_isize) >> sb->s_blocksize_bits;
+ if (sbi->s_used_blocks > sbi->s_blocks)
+ sbi->s_used_blocks = sbi->s_blocks;
+ }
+
+ spin_unlock(&sbi->s_lock);
+}
+
+/*
+ * Check a zone condition and adjust its file inode access permissions for
+ * offline and readonly zones. Return the inode size corresponding to the
+ * amount of readable data in the zone.
+ */
+static loff_t zonefs_check_zone_condition(struct inode *inode,
+ struct blk_zone *zone, bool warn)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_OFFLINE:
+ /*
+ * Dead zone: make the inode immutable, disable all accesses
+ * and set the file size to 0 (zone wp set to zone start).
+ */
+ if (warn)
+ zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
+ inode->i_ino);
+ inode->i_flags |= S_IMMUTABLE;
+ inode->i_mode &= ~0777;
+ zone->wp = zone->start;
+ return 0;
+ case BLK_ZONE_COND_READONLY:
+ /* Do not allow writes in read-only zones */
+ if (warn)
+ zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
+ inode->i_ino);
+ inode->i_flags |= S_IMMUTABLE;
+ inode->i_mode &= ~0222;
+ /* fallthrough */
+ default:
+ if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
+ return zi->i_max_size;
+ return (zone->wp - zone->start) << SECTOR_SHIFT;
+ }
+}
+
+struct zonefs_ioerr_data {
+ struct inode *inode;
+ bool write;
+};
+
+static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct zonefs_ioerr_data *err = data;
+ struct inode *inode = err->inode;
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ loff_t isize, data_size;
+
+ /*
+ * Check the zone condition: if the zone is not "bad" (offline or
+ * read-only), read errors are simply signaled to the IO issuer as long
+ * as there is no inconsistency between the inode size and the amount of
+ * data writen in the zone (data_size).
+ */
+ data_size = zonefs_check_zone_condition(inode, zone, true);
+ isize = i_size_read(inode);
+ if (zone->cond != BLK_ZONE_COND_OFFLINE &&
+ zone->cond != BLK_ZONE_COND_READONLY &&
+ !err->write && isize == data_size)
+ return 0;
+
+ /*
+ * At this point, we detected either a bad zone or an inconsistency
+ * between the inode size and the amount of data written in the zone.
+ * For the latter case, the cause may be a write IO error or an external
+ * action on the device. Two error patterns exist:
+ * 1) The inode size is lower than the amount of data in the zone:
+ * a write operation partially failed and data was writen at the end
+ * of the file. This can happen in the case of a large direct IO
+ * needing several BIOs and/or write requests to be processed.
+ * 2) The inode size is larger than the amount of data in the zone:
+ * this can happen with a deferred write error with the use of the
+ * device side write cache after getting successful write IO
+ * completions. Other possibilities are (a) an external corruption,
+ * e.g. an application reset the zone directly, or (b) the device
+ * has a serious problem (e.g. firmware bug).
+ *
+ * In all cases, warn about inode size inconsistency and handle the
+ * IO error according to the zone condition and to the mount options.
+ */
+ if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
+ zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
+ inode->i_ino, isize, data_size);
+
+ /*
+ * First handle bad zones signaled by hardware. The mount options
+ * errors=zone-ro and errors=zone-offline result in changing the
+ * zone condition to read-only and offline respectively, as if the
+ * condition was signaled by the hardware.
+ */
+ if (zone->cond == BLK_ZONE_COND_OFFLINE ||
+ sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) {
+ zonefs_warn(sb, "inode %lu: read/write access disabled\n",
+ inode->i_ino);
+ if (zone->cond != BLK_ZONE_COND_OFFLINE) {
+ zone->cond = BLK_ZONE_COND_OFFLINE;
+ data_size = zonefs_check_zone_condition(inode, zone,
+ false);
+ }
+ } else if (zone->cond == BLK_ZONE_COND_READONLY ||
+ sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
+ zonefs_warn(sb, "inode %lu: write access disabled\n",
+ inode->i_ino);
+ if (zone->cond != BLK_ZONE_COND_READONLY) {
+ zone->cond = BLK_ZONE_COND_READONLY;
+ data_size = zonefs_check_zone_condition(inode, zone,
+ false);
+ }
+ }
+
+ /*
+ * If error=remount-ro was specified, any error result in remounting
+ * the volume as read-only.
+ */
+ if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) {
+ zonefs_warn(sb, "remounting filesystem read-only\n");
+ sb->s_flags |= SB_RDONLY;
+ }
+
+ /*
+ * Update block usage stats and the inode size to prevent access to
+ * invalid data.
+ */
+ zonefs_update_stats(inode, data_size);
+ i_size_write(inode, data_size);
+ zi->i_wpoffset = data_size;
+
+ return 0;
+}
+
+/*
+ * When an file IO error occurs, check the file zone to see if there is a change
+ * in the zone condition (e.g. offline or read-only). For a failed write to a
+ * sequential zone, the zone write pointer position must also be checked to
+ * eventually correct the file size and zonefs inode write pointer offset
+ * (which can be out of sync with the drive due to partial write failures).
+ */
+static void zonefs_io_error(struct inode *inode, bool write)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ unsigned int noio_flag;
+ unsigned int nr_zones =
+ zi->i_max_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+ struct zonefs_ioerr_data err = {
+ .inode = inode,
+ .write = write,
+ };
+ int ret;
+
+ mutex_lock(&zi->i_truncate_mutex);
+
+ /*
+ * Memory allocations in blkdev_report_zones() can trigger a memory
+ * reclaim which may in turn cause a recursion into zonefs as well as
+ * struct request allocations for the same device. The former case may
+ * end up in a deadlock on the inode truncate mutex, while the latter
+ * may prevent IO forward progress. Executing the report zones under
+ * the GFP_NOIO context avoids both problems.
+ */
+ noio_flag = memalloc_noio_save();
+ ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones,
+ zonefs_io_error_cb, &err);
+ if (ret != nr_zones)
+ zonefs_err(sb, "Get inode %lu zone information failed %d\n",
+ inode->i_ino, ret);
+ memalloc_noio_restore(noio_flag);
+
+ mutex_unlock(&zi->i_truncate_mutex);
+}
+
+static int zonefs_file_truncate(struct inode *inode, loff_t isize)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ loff_t old_isize;
+ enum req_opf op;
+ int ret = 0;
+
+ /*
+ * Only sequential zone files can be truncated and truncation is allowed
+ * only down to a 0 size, which is equivalent to a zone reset, and to
+ * the maximum file size, which is equivalent to a zone finish.
+ */
+ if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+ return -EPERM;
+
+ if (!isize)
+ op = REQ_OP_ZONE_RESET;
+ else if (isize == zi->i_max_size)
+ op = REQ_OP_ZONE_FINISH;
+ else
+ return -EPERM;
+
+ inode_dio_wait(inode);
+
+ /* Serialize against page faults */
+ down_write(&zi->i_mmap_sem);
+
+ /* Serialize against zonefs_iomap_begin() */
+ mutex_lock(&zi->i_truncate_mutex);
+
+ old_isize = i_size_read(inode);
+ if (isize == old_isize)
+ goto unlock;
+
+ ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
+ zi->i_max_size >> SECTOR_SHIFT, GFP_NOFS);
+ if (ret) {
+ zonefs_err(inode->i_sb,
+ "Zone management operation at %llu failed %d",
+ zi->i_zsector, ret);
+ goto unlock;
+ }
+
+ zonefs_update_stats(inode, isize);
+ truncate_setsize(inode, isize);
+ zi->i_wpoffset = isize;
+
+unlock:
+ mutex_unlock(&zi->i_truncate_mutex);
+ up_write(&zi->i_mmap_sem);
+
+ return ret;
+}
+
+static int zonefs_inode_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ ret = setattr_prepare(dentry, iattr);
+ if (ret)
+ return ret;
+
+ /*
+ * Since files and directories cannot be created nor deleted, do not
+ * allow setting any write attributes on the sub-directories grouping
+ * files by zone type.
+ */
+ if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
+ (iattr->ia_mode & 0222))
+ return -EPERM;
+
+ if (((iattr->ia_valid & ATTR_UID) &&
+ !uid_eq(iattr->ia_uid, inode->i_uid)) ||
+ ((iattr->ia_valid & ATTR_GID) &&
+ !gid_eq(iattr->ia_gid, inode->i_gid))) {
+ ret = dquot_transfer(inode, iattr);
+ if (ret)
+ return ret;
+ }
+
+ if (iattr->ia_valid & ATTR_SIZE) {
+ ret = zonefs_file_truncate(inode, iattr->ia_size);
+ if (ret)
+ return ret;
+ }
+
+ setattr_copy(inode, iattr);
+
+ return 0;
+}
+
+static const struct inode_operations zonefs_file_inode_operations = {
+ .setattr = zonefs_inode_setattr,
+};
+
+static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ struct inode *inode = file_inode(file);
+ int ret = 0;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ /*
+ * Since only direct writes are allowed in sequential files, page cache
+ * flush is needed only for conventional zone files.
+ */
+ if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
+ ret = file_write_and_wait_range(file, start, end);
+ if (!ret)
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+
+ if (ret)
+ zonefs_io_error(inode, true);
+
+ return ret;
+}
+
+static vm_fault_t zonefs_filemap_fault(struct vm_fault *vmf)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(file_inode(vmf->vma->vm_file));
+ vm_fault_t ret;
+
+ down_read(&zi->i_mmap_sem);
+ ret = filemap_fault(vmf);
+ up_read(&zi->i_mmap_sem);
+
+ return ret;
+}
+
+static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ vm_fault_t ret;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return VM_FAULT_SIGBUS;
+
+ /*
+ * Sanity check: only conventional zone files can have shared
+ * writeable mappings.
+ */
+ if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
+ return VM_FAULT_NOPAGE;
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vmf->vma->vm_file);
+
+ /* Serialize against truncates */
+ down_read(&zi->i_mmap_sem);
+ ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
+ up_read(&zi->i_mmap_sem);
+
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+}
+
+static const struct vm_operations_struct zonefs_file_vm_ops = {
+ .fault = zonefs_filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = zonefs_filemap_page_mkwrite,
+};
+
+static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /*
+ * Conventional zones accept random writes, so their files can support
+ * shared writable mappings. For sequential zone files, only read
+ * mappings are possible since there are no guarantees for write
+ * ordering between msync() and page cache writeback.
+ */
+ if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
+ (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ return -EINVAL;
+
+ file_accessed(file);
+ vma->vm_ops = &zonefs_file_vm_ops;
+
+ return 0;
+}
+
+static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t isize = i_size_read(file_inode(file));
+
+ /*
+ * Seeks are limited to below the zone size for conventional zones
+ * and below the zone write pointer for sequential zones. In both
+ * cases, this limit is the inode size.
+ */
+ return generic_file_llseek_size(file, offset, whence, isize, isize);
+}
+
+static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ if (error) {
+ zonefs_io_error(inode, true);
+ return error;
+ }
+
+ if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+ /*
+ * Note that we may be seeing completions out of order,
+ * but that is not a problem since a write completed
+ * successfully necessarily means that all preceding writes
+ * were also successful. So we can safely increase the inode
+ * size to the write end location.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ if (i_size_read(inode) < iocb->ki_pos + size) {
+ zonefs_update_stats(inode, iocb->ki_pos + size);
+ i_size_write(inode, iocb->ki_pos + size);
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+ return 0;
+}
+
+static const struct iomap_dio_ops zonefs_write_dio_ops = {
+ .end_io = zonefs_file_write_dio_end_io,
+};
+
+/*
+ * Handle direct writes. For sequential zone files, this is the only possible
+ * write path. For these files, check that the user is issuing writes
+ * sequentially from the end of the file. This code assumes that the block layer
+ * delivers write requests to the device in sequential order. This is always the
+ * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
+ * elevator feature is being used (e.g. mq-deadline). The block layer always
+ * automatically select such an elevator for zoned block devices during the
+ * device initialization.
+ */
+static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ size_t count;
+ ssize_t ret;
+
+ /*
+ * For async direct IOs to sequential zone files, ignore IOCB_NOWAIT
+ * as this can cause write reordering (e.g. the first aio gets EAGAIN
+ * on the inode lock but the second goes through but is now unaligned).
+ */
+ if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb)
+ && (iocb->ki_flags & IOCB_NOWAIT))
+ iocb->ki_flags &= ~IOCB_NOWAIT;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock(inode);
+ }
+
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto inode_unlock;
+
+ iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
+ count = iov_iter_count(from);
+
+ if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+
+ /* Enforce sequential writes (append only) in sequential zones */
+ mutex_lock(&zi->i_truncate_mutex);
+ if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && iocb->ki_pos != zi->i_wpoffset) {
+ mutex_unlock(&zi->i_truncate_mutex);
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
+ &zonefs_write_dio_ops, is_sync_kiocb(iocb));
+ if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+ (ret > 0 || ret == -EIOCBQUEUED)) {
+ if (ret > 0)
+ count = ret;
+ mutex_lock(&zi->i_truncate_mutex);
+ zi->i_wpoffset += count;
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+inode_unlock:
+ inode_unlock(inode);
+
+ return ret;
+}
+
+static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ ssize_t ret;
+
+ /*
+ * Direct IO writes are mandatory for sequential zone files so that the
+ * write IO issuing order is preserved.
+ */
+ if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
+ return -EIO;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock(inode);
+ }
+
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto inode_unlock;
+
+ iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
+
+ ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
+ if (ret > 0)
+ iocb->ki_pos += ret;
+ else if (ret == -EIO)
+ zonefs_io_error(inode, true);
+
+inode_unlock:
+ inode_unlock(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+
+ return ret;
+}
+
+static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ if (sb_rdonly(inode->i_sb))
+ return -EROFS;
+
+ /* Write operations beyond the zone size are not allowed */
+ if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
+ return -EFBIG;
+
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return zonefs_file_dio_write(iocb, from);
+
+ return zonefs_file_buffered_write(iocb, from);
+}
+
+static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ if (error) {
+ zonefs_io_error(file_inode(iocb->ki_filp), false);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct iomap_dio_ops zonefs_read_dio_ops = {
+ .end_io = zonefs_file_read_dio_end_io,
+};
+
+static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ loff_t isize;
+ ssize_t ret;
+
+ /* Offline zones cannot be read */
+ if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
+ return -EPERM;
+
+ if (iocb->ki_pos >= zi->i_max_size)
+ return 0;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock_shared(inode);
+ }
+
+ /* Limit read operations to written data */
+ mutex_lock(&zi->i_truncate_mutex);
+ isize = i_size_read(inode);
+ if (iocb->ki_pos >= isize) {
+ mutex_unlock(&zi->i_truncate_mutex);
+ ret = 0;
+ goto inode_unlock;
+ }
+ iov_iter_truncate(to, isize - iocb->ki_pos);
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ size_t count = iov_iter_count(to);
+
+ if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+ file_accessed(iocb->ki_filp);
+ ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
+ &zonefs_read_dio_ops, is_sync_kiocb(iocb));
+ } else {
+ ret = generic_file_read_iter(iocb, to);
+ if (ret == -EIO)
+ zonefs_io_error(inode, false);
+ }
+
+inode_unlock:
+ inode_unlock_shared(inode);
+
+ return ret;
+}
+
+static const struct file_operations zonefs_file_operations = {
+ .open = generic_file_open,
+ .fsync = zonefs_file_fsync,
+ .mmap = zonefs_file_mmap,
+ .llseek = zonefs_file_llseek,
+ .read_iter = zonefs_file_read_iter,
+ .write_iter = zonefs_file_write_iter,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .iopoll = iomap_dio_iopoll,
+};
+
+static struct kmem_cache *zonefs_inode_cachep;
+
+static struct inode *zonefs_alloc_inode(struct super_block *sb)
+{
+ struct zonefs_inode_info *zi;
+
+ zi = kmem_cache_alloc(zonefs_inode_cachep, GFP_KERNEL);
+ if (!zi)
+ return NULL;
+
+ inode_init_once(&zi->i_vnode);
+ mutex_init(&zi->i_truncate_mutex);
+ init_rwsem(&zi->i_mmap_sem);
+
+ return &zi->i_vnode;
+}
+
+static void zonefs_free_inode(struct inode *inode)
+{
+ kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
+}
+
+/*
+ * File system stat.
+ */
+static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ enum zonefs_ztype t;
+ u64 fsid;
+
+ buf->f_type = ZONEFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = ZONEFS_NAME_MAX;
+
+ spin_lock(&sbi->s_lock);
+
+ buf->f_blocks = sbi->s_blocks;
+ if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
+ buf->f_bfree = 0;
+ else
+ buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
+ buf->f_bavail = buf->f_bfree;
+
+ for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
+ if (sbi->s_nr_files[t])
+ buf->f_files += sbi->s_nr_files[t] + 1;
+ }
+ buf->f_ffree = 0;
+
+ spin_unlock(&sbi->s_lock);
+
+ fsid = le64_to_cpup((void *)sbi->s_uuid.b) ^
+ le64_to_cpup((void *)sbi->s_uuid.b + sizeof(u64));
+ buf->f_fsid.val[0] = (u32)fsid;
+ buf->f_fsid.val[1] = (u32)(fsid >> 32);
+
+ return 0;
+}
+
+enum {
+ Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
+ Opt_err,
+};
+
+static const match_table_t tokens = {
+ { Opt_errors_ro, "errors=remount-ro"},
+ { Opt_errors_zro, "errors=zone-ro"},
+ { Opt_errors_zol, "errors=zone-offline"},
+ { Opt_errors_repair, "errors=repair"},
+ { Opt_err, NULL}
+};
+
+static int zonefs_parse_options(struct super_block *sb, char *options)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ substring_t args[MAX_OPT_ARGS];
+ char *p;
+
+ if (!options)
+ return 0;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_errors_ro:
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
+ break;
+ case Opt_errors_zro:
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
+ break;
+ case Opt_errors_zol:
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
+ break;
+ case Opt_errors_repair:
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
+
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
+ seq_puts(seq, ",errors=remount-ro");
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
+ seq_puts(seq, ",errors=zone-ro");
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
+ seq_puts(seq, ",errors=zone-offline");
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
+ seq_puts(seq, ",errors=repair");
+
+ return 0;
+}
+
+static int zonefs_remount(struct super_block *sb, int *flags, char *data)
+{
+ sync_filesystem(sb);
+
+ return zonefs_parse_options(sb, data);
+}
+
+static const struct super_operations zonefs_sops = {
+ .alloc_inode = zonefs_alloc_inode,
+ .free_inode = zonefs_free_inode,
+ .statfs = zonefs_statfs,
+ .remount_fs = zonefs_remount,
+ .show_options = zonefs_show_options,
+};
+
+static const struct inode_operations zonefs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .setattr = zonefs_inode_setattr,
+};
+
+static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
+ enum zonefs_ztype type)
+{
+ struct super_block *sb = parent->i_sb;
+
+ inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk) + type + 1;
+ inode_init_owner(inode, parent, S_IFDIR | 0555);
+ inode->i_op = &zonefs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ set_nlink(inode, 2);
+ inc_nlink(parent);
+}
+
+static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
+ enum zonefs_ztype type)
+{
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
+ inode->i_mode = S_IFREG | sbi->s_perm;
+
+ zi->i_ztype = type;
+ zi->i_zsector = zone->start;
+ zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
+ zone->len << SECTOR_SHIFT);
+ zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true);
+
+ inode->i_uid = sbi->s_uid;
+ inode->i_gid = sbi->s_gid;
+ inode->i_size = zi->i_wpoffset;
+ inode->i_blocks = zone->len;
+
+ inode->i_op = &zonefs_file_inode_operations;
+ inode->i_fop = &zonefs_file_operations;
+ inode->i_mapping->a_ops = &zonefs_file_aops;
+
+ sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
+ sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
+ sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
+}
+
+static struct dentry *zonefs_create_inode(struct dentry *parent,
+ const char *name, struct blk_zone *zone,
+ enum zonefs_ztype type)
+{
+ struct inode *dir = d_inode(parent);
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = d_alloc_name(parent, name);
+ if (!dentry)
+ return NULL;
+
+ inode = new_inode(parent->d_sb);
+ if (!inode)
+ goto dput;
+
+ inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
+ if (zone)
+ zonefs_init_file_inode(inode, zone, type);
+ else
+ zonefs_init_dir_inode(dir, inode, type);
+ d_add(dentry, inode);
+ dir->i_size++;
+
+ return dentry;
+
+dput:
+ dput(dentry);
+
+ return NULL;
+}
+
+struct zonefs_zone_data {
+ struct super_block *sb;
+ unsigned int nr_zones[ZONEFS_ZTYPE_MAX];
+ struct blk_zone *zones;
+};
+
+/*
+ * Create a zone group and populate it with zone files.
+ */
+static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
+ enum zonefs_ztype type)
+{
+ struct super_block *sb = zd->sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct blk_zone *zone, *next, *end;
+ const char *zgroup_name;
+ char *file_name;
+ struct dentry *dir;
+ unsigned int n = 0;
+ int ret = -ENOMEM;
+
+ /* If the group is empty, there is nothing to do */
+ if (!zd->nr_zones[type])
+ return 0;
+
+ file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
+ if (!file_name)
+ return -ENOMEM;
+
+ if (type == ZONEFS_ZTYPE_CNV)
+ zgroup_name = "cnv";
+ else
+ zgroup_name = "seq";
+
+ dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
+ if (!dir)
+ goto free;
+
+ /*
+ * The first zone contains the super block: skip it.
+ */
+ end = zd->zones + blkdev_nr_zones(sb->s_bdev->bd_disk);
+ for (zone = &zd->zones[1]; zone < end; zone = next) {
+
+ next = zone + 1;
+ if (zonefs_zone_type(zone) != type)
+ continue;
+
+ /*
+ * For conventional zones, contiguous zones can be aggregated
+ * together to form larger files. Note that this overwrites the
+ * length of the first zone of the set of contiguous zones
+ * aggregated together. If one offline or read-only zone is
+ * found, assume that all zones aggregated have the same
+ * condition.
+ */
+ if (type == ZONEFS_ZTYPE_CNV &&
+ (sbi->s_features & ZONEFS_F_AGGRCNV)) {
+ for (; next < end; next++) {
+ if (zonefs_zone_type(next) != type)
+ break;
+ zone->len += next->len;
+ if (next->cond == BLK_ZONE_COND_READONLY &&
+ zone->cond != BLK_ZONE_COND_OFFLINE)
+ zone->cond = BLK_ZONE_COND_READONLY;
+ else if (next->cond == BLK_ZONE_COND_OFFLINE)
+ zone->cond = BLK_ZONE_COND_OFFLINE;
+ }
+ }
+
+ /*
+ * Use the file number within its group as file name.
+ */
+ snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
+ if (!zonefs_create_inode(dir, file_name, zone, type))
+ goto free;
+
+ n++;
+ }
+
+ zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
+ zgroup_name, n, n > 1 ? "s" : "");
+
+ sbi->s_nr_files[type] = n;
+ ret = 0;
+
+free:
+ kfree(file_name);
+
+ return ret;
+}
+
+static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct zonefs_zone_data *zd = data;
+
+ /*
+ * Count the number of usable zones: the first zone at index 0 contains
+ * the super block and is ignored.
+ */
+ switch (zone->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ zone->wp = zone->start + zone->len;
+ if (idx)
+ zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
+ break;
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ if (idx)
+ zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
+ break;
+ default:
+ zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
+ zone->type);
+ return -EIO;
+ }
+
+ memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
+
+ return 0;
+}
+
+static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
+{
+ struct block_device *bdev = zd->sb->s_bdev;
+ int ret;
+
+ zd->zones = kvcalloc(blkdev_nr_zones(bdev->bd_disk),
+ sizeof(struct blk_zone), GFP_KERNEL);
+ if (!zd->zones)
+ return -ENOMEM;
+
+ /* Get zones information from the device */
+ ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
+ zonefs_get_zone_info_cb, zd);
+ if (ret < 0) {
+ zonefs_err(zd->sb, "Zone report failed %d\n", ret);
+ return ret;
+ }
+
+ if (ret != blkdev_nr_zones(bdev->bd_disk)) {
+ zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
+ ret, blkdev_nr_zones(bdev->bd_disk));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd)
+{
+ kvfree(zd->zones);
+}
+
+/*
+ * Read super block information from the device.
+ */
+static int zonefs_read_super(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_super *super;
+ u32 crc, stored_crc;
+ struct page *page;
+ struct bio_vec bio_vec;
+ struct bio bio;
+ int ret;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ bio_init(&bio, &bio_vec, 1);
+ bio.bi_iter.bi_sector = 0;
+ bio.bi_opf = REQ_OP_READ;
+ bio_set_dev(&bio, sb->s_bdev);
+ bio_add_page(&bio, page, PAGE_SIZE, 0);
+
+ ret = submit_bio_wait(&bio);
+ if (ret)
+ goto free_page;
+
+ super = kmap(page);
+
+ ret = -EINVAL;
+ if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC)
+ goto unmap;
+
+ stored_crc = le32_to_cpu(super->s_crc);
+ super->s_crc = 0;
+ crc = crc32(~0U, (unsigned char *)super, sizeof(struct zonefs_super));
+ if (crc != stored_crc) {
+ zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)",
+ crc, stored_crc);
+ goto unmap;
+ }
+
+ sbi->s_features = le64_to_cpu(super->s_features);
+ if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) {
+ zonefs_err(sb, "Unknown features set 0x%llx\n",
+ sbi->s_features);
+ goto unmap;
+ }
+
+ if (sbi->s_features & ZONEFS_F_UID) {
+ sbi->s_uid = make_kuid(current_user_ns(),
+ le32_to_cpu(super->s_uid));
+ if (!uid_valid(sbi->s_uid)) {
+ zonefs_err(sb, "Invalid UID feature\n");
+ goto unmap;
+ }
+ }
+
+ if (sbi->s_features & ZONEFS_F_GID) {
+ sbi->s_gid = make_kgid(current_user_ns(),
+ le32_to_cpu(super->s_gid));
+ if (!gid_valid(sbi->s_gid)) {
+ zonefs_err(sb, "Invalid GID feature\n");
+ goto unmap;
+ }
+ }
+
+ if (sbi->s_features & ZONEFS_F_PERM)
+ sbi->s_perm = le32_to_cpu(super->s_perm);
+
+ if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) {
+ zonefs_err(sb, "Reserved area is being used\n");
+ goto unmap;
+ }
+
+ uuid_copy(&sbi->s_uuid, (uuid_t *)super->s_uuid);
+ ret = 0;
+
+unmap:
+ kunmap(page);
+free_page:
+ __free_page(page);
+
+ return ret;
+}
+
+/*
+ * Check that the device is zoned. If it is, get the list of zones and create
+ * sub-directories and files according to the device zone configuration and
+ * format options.
+ */
+static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct zonefs_zone_data zd;
+ struct zonefs_sb_info *sbi;
+ struct inode *inode;
+ enum zonefs_ztype t;
+ int ret;
+
+ if (!bdev_is_zoned(sb->s_bdev)) {
+ zonefs_err(sb, "Not a zoned block device\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize super block information: the maximum file size is updated
+ * when the zone files are created so that the format option
+ * ZONEFS_F_AGGRCNV which increases the maximum file size of a file
+ * beyond the zone size is taken into account.
+ */
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ spin_lock_init(&sbi->s_lock);
+ sb->s_fs_info = sbi;
+ sb->s_magic = ZONEFS_MAGIC;
+ sb->s_maxbytes = 0;
+ sb->s_op = &zonefs_sops;
+ sb->s_time_gran = 1;
+
+ /*
+ * The block size is set to the device physical sector size to ensure
+ * that write operations on 512e devices (512B logical block and 4KB
+ * physical block) are always aligned to the device physical blocks,
+ * as mandated by the ZBC/ZAC specifications.
+ */
+ sb_set_blocksize(sb, bdev_physical_block_size(sb->s_bdev));
+ sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
+ sbi->s_uid = GLOBAL_ROOT_UID;
+ sbi->s_gid = GLOBAL_ROOT_GID;
+ sbi->s_perm = 0640;
+ sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO;
+
+ ret = zonefs_read_super(sb);
+ if (ret)
+ return ret;
+
+ ret = zonefs_parse_options(sb, data);
+ if (ret)
+ return ret;
+
+ memset(&zd, 0, sizeof(struct zonefs_zone_data));
+ zd.sb = sb;
+ ret = zonefs_get_zone_info(&zd);
+ if (ret)
+ goto cleanup;
+
+ zonefs_info(sb, "Mounting %u zones",
+ blkdev_nr_zones(sb->s_bdev->bd_disk));
+
+ /* Create root directory inode */
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto cleanup;
+
+ inode->i_ino = blkdev_nr_zones(sb->s_bdev->bd_disk);
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode);
+ inode->i_op = &zonefs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ set_nlink(inode, 2);
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ goto cleanup;
+
+ /* Create and populate files in zone groups directories */
+ for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
+ ret = zonefs_create_zgroup(&zd, t);
+ if (ret)
+ break;
+ }
+
+cleanup:
+ zonefs_cleanup_zone_info(&zd);
+
+ return ret;
+}
+
+static struct dentry *zonefs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, zonefs_fill_super);
+}
+
+static void zonefs_kill_super(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+
+ if (sb->s_root)
+ d_genocide(sb->s_root);
+ kill_block_super(sb);
+ kfree(sbi);
+}
+
+/*
+ * File system definition and registration.
+ */
+static struct file_system_type zonefs_type = {
+ .owner = THIS_MODULE,
+ .name = "zonefs",
+ .mount = zonefs_mount,
+ .kill_sb = zonefs_kill_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+static int __init zonefs_init_inodecache(void)
+{
+ zonefs_inode_cachep = kmem_cache_create("zonefs_inode_cache",
+ sizeof(struct zonefs_inode_info), 0,
+ (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
+ NULL);
+ if (zonefs_inode_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void zonefs_destroy_inodecache(void)
+{
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy the inode cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(zonefs_inode_cachep);
+}
+
+static int __init zonefs_init(void)
+{
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct zonefs_super) != ZONEFS_SUPER_SIZE);
+
+ ret = zonefs_init_inodecache();
+ if (ret)
+ return ret;
+
+ ret = register_filesystem(&zonefs_type);
+ if (ret) {
+ zonefs_destroy_inodecache();
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit zonefs_exit(void)
+{
+ zonefs_destroy_inodecache();
+ unregister_filesystem(&zonefs_type);
+}
+
+MODULE_AUTHOR("Damien Le Moal");
+MODULE_DESCRIPTION("Zone file system for zoned block devices");
+MODULE_LICENSE("GPL");
+module_init(zonefs_init);
+module_exit(zonefs_exit);
diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h
new file mode 100644
index 000000000000..ad17fef7ce91
--- /dev/null
+++ b/fs/zonefs/zonefs.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Simple zone file system for zoned block devices.
+ *
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ */
+#ifndef __ZONEFS_H__
+#define __ZONEFS_H__
+
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/uuid.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+
+/*
+ * Maximum length of file names: this only needs to be large enough to fit
+ * the zone group directory names and a decimal zone number for file names.
+ * 16 characters is plenty.
+ */
+#define ZONEFS_NAME_MAX 16
+
+/*
+ * Zone types: ZONEFS_ZTYPE_SEQ is used for all sequential zone types
+ * defined in linux/blkzoned.h, that is, BLK_ZONE_TYPE_SEQWRITE_REQ and
+ * BLK_ZONE_TYPE_SEQWRITE_PREF.
+ */
+enum zonefs_ztype {
+ ZONEFS_ZTYPE_CNV,
+ ZONEFS_ZTYPE_SEQ,
+ ZONEFS_ZTYPE_MAX,
+};
+
+static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
+{
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return ZONEFS_ZTYPE_CNV;
+ return ZONEFS_ZTYPE_SEQ;
+}
+
+/*
+ * In-memory inode data.
+ */
+struct zonefs_inode_info {
+ struct inode i_vnode;
+
+ /* File zone type */
+ enum zonefs_ztype i_ztype;
+
+ /* File zone start sector (512B unit) */
+ sector_t i_zsector;
+
+ /* File zone write pointer position (sequential zones only) */
+ loff_t i_wpoffset;
+
+ /* File maximum size */
+ loff_t i_max_size;
+
+ /*
+ * To serialise fully against both syscall and mmap based IO and
+ * sequential file truncation, two locks are used. For serializing
+ * zonefs_seq_file_truncate() against zonefs_iomap_begin(), that is,
+ * file truncate operations against block mapping, i_truncate_mutex is
+ * used. i_truncate_mutex also protects against concurrent accesses
+ * and changes to the inode private data, and in particular changes to
+ * a sequential file size on completion of direct IO writes.
+ * Serialization of mmap read IOs with truncate and syscall IO
+ * operations is done with i_mmap_sem in addition to i_truncate_mutex.
+ * Only zonefs_seq_file_truncate() takes both lock (i_mmap_sem first,
+ * i_truncate_mutex second).
+ */
+ struct mutex i_truncate_mutex;
+ struct rw_semaphore i_mmap_sem;
+};
+
+static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
+{
+ return container_of(inode, struct zonefs_inode_info, i_vnode);
+}
+
+/*
+ * On-disk super block (block 0).
+ */
+#define ZONEFS_LABEL_LEN 64
+#define ZONEFS_UUID_SIZE 16
+#define ZONEFS_SUPER_SIZE 4096
+
+struct zonefs_super {
+
+ /* Magic number */
+ __le32 s_magic;
+
+ /* Checksum */
+ __le32 s_crc;
+
+ /* Volume label */
+ char s_label[ZONEFS_LABEL_LEN];
+
+ /* 128-bit uuid */
+ __u8 s_uuid[ZONEFS_UUID_SIZE];
+
+ /* Features */
+ __le64 s_features;
+
+ /* UID/GID to use for files */
+ __le32 s_uid;
+ __le32 s_gid;
+
+ /* File permissions */
+ __le32 s_perm;
+
+ /* Padding to ZONEFS_SUPER_SIZE bytes */
+ __u8 s_reserved[3988];
+
+} __packed;
+
+/*
+ * Feature flags: specified in the s_features field of the on-disk super
+ * block struct zonefs_super and in-memory in the s_feartures field of
+ * struct zonefs_sb_info.
+ */
+enum zonefs_features {
+ /*
+ * Aggregate contiguous conventional zones into a single file.
+ */
+ ZONEFS_F_AGGRCNV = 1ULL << 0,
+ /*
+ * Use super block specified UID for files instead of default 0.
+ */
+ ZONEFS_F_UID = 1ULL << 1,
+ /*
+ * Use super block specified GID for files instead of default 0.
+ */
+ ZONEFS_F_GID = 1ULL << 2,
+ /*
+ * Use super block specified file permissions instead of default 640.
+ */
+ ZONEFS_F_PERM = 1ULL << 3,
+};
+
+#define ZONEFS_F_DEFINED_FEATURES \
+ (ZONEFS_F_AGGRCNV | ZONEFS_F_UID | ZONEFS_F_GID | ZONEFS_F_PERM)
+
+/*
+ * Mount options for zone write pointer error handling.
+ */
+#define ZONEFS_MNTOPT_ERRORS_RO (1 << 0) /* Make zone file readonly */
+#define ZONEFS_MNTOPT_ERRORS_ZRO (1 << 1) /* Make zone file offline */
+#define ZONEFS_MNTOPT_ERRORS_ZOL (1 << 2) /* Make zone file offline */
+#define ZONEFS_MNTOPT_ERRORS_REPAIR (1 << 3) /* Remount read-only */
+#define ZONEFS_MNTOPT_ERRORS_MASK \
+ (ZONEFS_MNTOPT_ERRORS_RO | ZONEFS_MNTOPT_ERRORS_ZRO | \
+ ZONEFS_MNTOPT_ERRORS_ZOL | ZONEFS_MNTOPT_ERRORS_REPAIR)
+
+/*
+ * In-memory Super block information.
+ */
+struct zonefs_sb_info {
+
+ unsigned long s_mount_opts;
+
+ spinlock_t s_lock;
+
+ unsigned long long s_features;
+ kuid_t s_uid;
+ kgid_t s_gid;
+ umode_t s_perm;
+ uuid_t s_uuid;
+ unsigned int s_zone_sectors_shift;
+
+ unsigned int s_nr_files[ZONEFS_ZTYPE_MAX];
+
+ loff_t s_blocks;
+ loff_t s_used_blocks;
+};
+
+static inline struct zonefs_sb_info *ZONEFS_SB(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+#define zonefs_info(sb, format, args...) \
+ pr_info("zonefs (%s): " format, sb->s_id, ## args)
+#define zonefs_err(sb, format, args...) \
+ pr_err("zonefs (%s) ERROR: " format, sb->s_id, ## args)
+#define zonefs_warn(sb, format, args...) \
+ pr_warn("zonefs (%s) WARNING: " format, sb->s_id, ## args)
+
+#endif