aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/v9fs.c2
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/9p/vfs_inode.c7
-rw-r--r--fs/9p/vfs_inode_dotl.c7
-rw-r--r--fs/9p/xattr.c6
-rw-r--r--fs/Kconfig.binfmt5
-rw-r--r--fs/adfs/inode.c13
-rw-r--r--fs/adfs/super.c1
-rw-r--r--fs/afs/dir.c45
-rw-r--r--fs/afs/dynroot.c25
-rw-r--r--fs/afs/internal.h3
-rw-r--r--fs/afs/rxrpc.c30
-rw-r--r--fs/afs/write.c2
-rw-r--r--fs/aio.c232
-rw-r--r--fs/anon_inodes.c30
-rw-r--r--fs/attr.c5
-rw-r--r--fs/autofs/autofs_i.h13
-rw-r--r--fs/autofs/expire.c143
-rw-r--r--fs/autofs/inode.c1
-rw-r--r--fs/autofs/root.c33
-rw-r--r--fs/bad_inode.c2
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/acl.c13
-rw-r--r--fs/btrfs/backref.c6
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/check-integrity.c9
-rw-r--r--fs/btrfs/compression.c18
-rw-r--r--fs/btrfs/ctree.c53
-rw-r--r--fs/btrfs/ctree.h94
-rw-r--r--fs/btrfs/delayed-inode.c14
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/delayed-ref.c43
-rw-r--r--fs/btrfs/delayed-ref.h6
-rw-r--r--fs/btrfs/dev-replace.c29
-rw-r--r--fs/btrfs/dir-item.c4
-rw-r--r--fs/btrfs/disk-io.c113
-rw-r--r--fs/btrfs/disk-io.h5
-rw-r--r--fs/btrfs/extent-tree.c883
-rw-r--r--fs/btrfs/extent_io.c158
-rw-r--r--fs/btrfs/extent_io.h16
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c128
-rw-r--r--fs/btrfs/free-space-cache.c19
-rw-r--r--fs/btrfs/free-space-tree.c2
-rw-r--r--fs/btrfs/inode-map.c12
-rw-r--r--fs/btrfs/inode.c267
-rw-r--r--fs/btrfs/ioctl.c80
-rw-r--r--fs/btrfs/ordered-data.c138
-rw-r--r--fs/btrfs/ordered-data.h23
-rw-r--r--fs/btrfs/print-tree.c39
-rw-r--r--fs/btrfs/qgroup.c270
-rw-r--r--fs/btrfs/qgroup.h46
-rw-r--r--fs/btrfs/raid56.c109
-rw-r--r--fs/btrfs/reada.c3
-rw-r--r--fs/btrfs/relocation.c216
-rw-r--r--fs/btrfs/root-tree.c22
-rw-r--r--fs/btrfs/scrub.c679
-rw-r--r--fs/btrfs/send.c172
-rw-r--r--fs/btrfs/struct-funcs.c1
-rw-r--r--fs/btrfs/super.c115
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c24
-rw-r--r--fs/btrfs/transaction.c11
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-checker.c115
-rw-r--r--fs/btrfs/tree-log.c270
-rw-r--r--fs/btrfs/volumes.c611
-rw-r--r--fs/btrfs/volumes.h31
-rw-r--r--fs/buffer.c88
-rw-r--r--fs/ceph/acl.c30
-rw-r--r--fs/ceph/addr.c74
-rw-r--r--fs/ceph/cache.c11
-rw-r--r--fs/ceph/caps.c138
-rw-r--r--fs/ceph/dir.c20
-rw-r--r--fs/ceph/file.c41
-rw-r--r--fs/ceph/inode.c83
-rw-r--r--fs/ceph/mds_client.c98
-rw-r--r--fs/ceph/mds_client.h14
-rw-r--r--fs/ceph/quota.c2
-rw-r--r--fs/ceph/snap.c6
-rw-r--r--fs/ceph/super.c6
-rw-r--r--fs/ceph/super.h15
-rw-r--r--fs/ceph/xattr.c4
-rw-r--r--fs/cifs/Kconfig59
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifs_debug.c66
-rw-r--r--fs/cifs/cifsencrypt.c12
-rw-r--r--fs/cifs/cifsfs.c33
-rw-r--r--fs/cifs/cifsfs.h5
-rw-r--r--fs/cifs/cifsglob.h54
-rw-r--r--fs/cifs/cifsproto.h10
-rw-r--r--fs/cifs/cifssmb.c12
-rw-r--r--fs/cifs/connect.c111
-rw-r--r--fs/cifs/dir.c7
-rw-r--r--fs/cifs/fscache.c12
-rw-r--r--fs/cifs/fscache.h8
-rw-r--r--fs/cifs/inode.c38
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/netmisc.c19
-rw-r--r--fs/cifs/sess.c6
-rw-r--r--fs/cifs/smb1ops.c4
-rw-r--r--fs/cifs/smb2inode.c6
-rw-r--r--fs/cifs/smb2misc.c20
-rw-r--r--fs/cifs/smb2ops.c540
-rw-r--r--fs/cifs/smb2pdu.c557
-rw-r--r--fs/cifs/smb2pdu.h36
-rw-r--r--fs/cifs/smb2proto.h24
-rw-r--r--fs/cifs/smb2transport.c9
-rw-r--r--fs/cifs/smbdirect.c32
-rw-r--r--fs/cifs/trace.h64
-rw-r--r--fs/cifs/transport.c211
-rw-r--r--fs/cifs/xattr.c28
-rw-r--r--fs/compat_ioctl.c40
-rw-r--r--fs/configfs/dir.c11
-rw-r--r--fs/configfs/item.c24
-rw-r--r--fs/configfs/symlink.c2
-rw-r--r--fs/dax.c148
-rw-r--r--fs/dcache.c71
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/eventpoll.c118
-rw-r--r--fs/exec.c1
-rw-r--r--fs/exofs/ore.c4
-rw-r--r--fs/ext2/file.c1
-rw-r--r--fs/ext2/ialloc.c3
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/ext2/super.c7
-rw-r--r--fs/ext4/balloc.c6
-rw-r--r--fs/ext4/ext4.h34
-rw-r--r--fs/ext4/extents.c17
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inode.c70
-rw-r--r--fs/ext4/mballoc.c7
-rw-r--r--fs/ext4/mmp.c6
-rw-r--r--fs/ext4/move_extent.c4
-rw-r--r--fs/ext4/namei.c1
-rw-r--r--fs/ext4/readpage.c5
-rw-r--r--fs/ext4/super.c75
-rw-r--r--fs/ext4/sysfs.c38
-rw-r--r--fs/ext4/truncate.h4
-rw-r--r--fs/ext4/xattr.c2
-rw-r--r--fs/f2fs/checkpoint.c159
-rw-r--r--fs/f2fs/data.c185
-rw-r--r--fs/f2fs/debug.c3
-rw-r--r--fs/f2fs/dir.c3
-rw-r--r--fs/f2fs/f2fs.h193
-rw-r--r--fs/f2fs/file.c216
-rw-r--r--fs/f2fs/gc.c163
-rw-r--r--fs/f2fs/inline.c36
-rw-r--r--fs/f2fs/inode.c154
-rw-r--r--fs/f2fs/namei.c6
-rw-r--r--fs/f2fs/node.c380
-rw-r--r--fs/f2fs/node.h9
-rw-r--r--fs/f2fs/recovery.c31
-rw-r--r--fs/f2fs/segment.c399
-rw-r--r--fs/f2fs/segment.h16
-rw-r--r--fs/f2fs/super.c165
-rw-r--r--fs/f2fs/sysfs.c44
-rw-r--r--fs/f2fs/xattr.c18
-rw-r--r--fs/fat/cache.c19
-rw-r--r--fs/fat/dir.c2
-rw-r--r--fs/fat/fat.h12
-rw-r--r--fs/fat/fatent.c108
-rw-r--r--fs/fat/file.c33
-rw-r--r--fs/fat/inode.c20
-rw-r--r--fs/fat/misc.c13
-rw-r--r--fs/fat/namei_msdos.c17
-rw-r--r--fs/fat/namei_vfat.c20
-rw-r--r--fs/fcntl.c74
-rw-r--r--fs/file_table.c146
-rw-r--r--fs/fuse/dev.c69
-rw-r--r--fs/fuse/dir.c35
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/fuse/fuse_i.h5
-rw-r--r--fs/fuse/inode.c45
-rw-r--r--fs/gfs2/acl.c6
-rw-r--r--fs/gfs2/aops.c337
-rw-r--r--fs/gfs2/aops.h19
-rw-r--r--fs/gfs2/bmap.c414
-rw-r--r--fs/gfs2/dir.c4
-rw-r--r--fs/gfs2/file.c161
-rw-r--r--fs/gfs2/incore.h23
-rw-r--r--fs/gfs2/inode.c32
-rw-r--r--fs/gfs2/lock_dlm.c20
-rw-r--r--fs/gfs2/log.c28
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/meta_io.c4
-rw-r--r--fs/gfs2/recovery.c7
-rw-r--r--fs/gfs2/rgrp.c169
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/sys.c11
-rw-r--r--fs/gfs2/trace_gfs2.h3
-rw-r--r--fs/gfs2/trans.h6
-rw-r--r--fs/gfs2/util.c38
-rw-r--r--fs/gfs2/util.h10
-rw-r--r--fs/gfs2/xattr.c59
-rw-r--r--fs/hfs/brec.c7
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/Kconfig15
-rw-r--r--fs/hfsplus/Makefile2
-rw-r--r--fs/hfsplus/acl.h28
-rw-r--r--fs/hfsplus/brec.c7
-rw-r--r--fs/hfsplus/dir.c13
-rw-r--r--fs/hfsplus/extents.c18
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/inode.c11
-rw-r--r--fs/hfsplus/posix_acl.c144
-rw-r--r--fs/hfsplus/super.c8
-rw-r--r--fs/hfsplus/unicode.c62
-rw-r--r--fs/hfsplus/xattr.c6
-rw-r--r--fs/hfsplus/xattr.h3
-rw-r--r--fs/hfsplus/xattr_security.c13
-rw-r--r--fs/hostfs/hostfs.h2
-rw-r--r--fs/hostfs/hostfs_kern.c28
-rw-r--r--fs/hpfs/dir.c23
-rw-r--r--fs/hpfs/hpfs_fn.h13
-rw-r--r--fs/hpfs/namei.c14
-rw-r--r--fs/hugetlbfs/inode.c56
-rw-r--r--fs/inode.c99
-rw-r--r--fs/internal.h18
-rw-r--r--fs/ioctl.c1
-rw-r--r--fs/iomap.c780
-rw-r--r--fs/jbd2/commit.c3
-rw-r--r--fs/jffs2/dir.c32
-rw-r--r--fs/jffs2/file.c6
-rw-r--r--fs/jffs2/fs.c12
-rw-r--r--fs/jffs2/os-linux.h13
-rw-r--r--fs/jfs/jfs_imap.c8
-rw-r--r--fs/jfs/jfs_incore.h2
-rw-r--r--fs/jfs/jfs_inode.c10
-rw-r--r--fs/jfs/namei.c12
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/dir.c29
-rw-r--r--fs/kernfs/file.c8
-rw-r--r--fs/kernfs/inode.c2
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/kernfs/symlink.c13
-rw-r--r--fs/lockd/clntlock.c2
-rw-r--r--fs/lockd/clntproc.c2
-rw-r--r--fs/lockd/svclock.c16
-rw-r--r--fs/lockd/svcsubs.c4
-rw-r--r--fs/locks.c42
-rw-r--r--fs/mpage.c122
-rw-r--r--fs/namei.c316
-rw-r--r--fs/namespace.c69
-rw-r--r--fs/nfs/blocklayout/blocklayout.c1
-rw-r--r--fs/nfs/blocklayout/dev.c2
-rw-r--r--fs/nfs/callback.h12
-rw-r--r--fs/nfs/callback_proc.c97
-rw-r--r--fs/nfs/callback_xdr.c91
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/dir.c55
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c31
-rw-r--r--fs/nfs/nfs3acl.c2
-rw-r--r--fs/nfs/nfs42proc.c209
-rw-r--r--fs/nfs/nfs42xdr.c98
-rw-r--r--fs/nfs/nfs4_fs.h10
-rw-r--r--fs/nfs/nfs4client.c21
-rw-r--r--fs/nfs/nfs4file.c10
-rw-r--r--fs/nfs/nfs4idmap.c4
-rw-r--r--fs/nfs/nfs4proc.c158
-rw-r--r--fs/nfs/nfs4state.c40
-rw-r--r--fs/nfs/nfs4xdr.c1
-rw-r--r--fs/nfs/pagelist.c1
-rw-r--r--fs/nfs/pnfs.c123
-rw-r--r--fs/nfs/pnfs.h7
-rw-r--r--fs/nfs/pnfs_nfs.c16
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/netns.h1
-rw-r--r--fs/nfsd/nfs3proc.c5
-rw-r--r--fs/nfsd/nfs4callback.c30
-rw-r--r--fs/nfsd/nfs4layouts.c11
-rw-r--r--fs/nfsd/nfs4proc.c41
-rw-r--r--fs/nfsd/nfs4state.c70
-rw-r--r--fs/nfsd/nfs4xdr.c43
-rw-r--r--fs/nfsd/nfsctl.c7
-rw-r--r--fs/nfsd/nfsd.h1
-rw-r--r--fs/nfsd/nfsfh.c6
-rw-r--r--fs/nfsd/nfsproc.c5
-rw-r--r--fs/nfsd/state.h2
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nilfs2/file.c2
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/notify/dnotify/dnotify.c8
-rw-r--r--fs/notify/fanotify/fanotify.c15
-rw-r--r--fs/notify/fanotify/fanotify_user.c104
-rw-r--r--fs/notify/fdinfo.c8
-rw-r--r--fs/notify/fsnotify.h16
-rw-r--r--fs/notify/group.c3
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c7
-rw-r--r--fs/notify/inotify/inotify_user.c14
-rw-r--r--fs/notify/mark.c103
-rw-r--r--fs/ntfs/aops.c9
-rw-r--r--fs/ntfs/compress.c28
-rw-r--r--fs/ntfs/inode.c12
-rw-r--r--fs/ntfs/mft.c12
-rw-r--r--fs/ntfs/time.h27
-rw-r--r--fs/ocfs2/alloc.c60
-rw-r--r--fs/ocfs2/cluster/heartbeat.c12
-rw-r--r--fs/ocfs2/cluster/nodemanager.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/file.c17
-rw-r--r--fs/ocfs2/inode.c5
-rw-r--r--fs/ocfs2/localalloc.c9
-rw-r--r--fs/ocfs2/quota_local.c15
-rw-r--r--fs/open.c130
-rw-r--r--fs/orangefs/file.c19
-rw-r--r--fs/orangefs/inode.c3
-rw-r--r--fs/overlayfs/Kconfig19
-rw-r--r--fs/overlayfs/Makefile4
-rw-r--r--fs/overlayfs/copy_up.c190
-rw-r--r--fs/overlayfs/dir.c109
-rw-r--r--fs/overlayfs/export.c3
-rw-r--r--fs/overlayfs/file.c511
-rw-r--r--fs/overlayfs/inode.c175
-rw-r--r--fs/overlayfs/namei.c195
-rw-r--r--fs/overlayfs/overlayfs.h47
-rw-r--r--fs/overlayfs/ovl_entry.h6
-rw-r--r--fs/overlayfs/readdir.c19
-rw-r--r--fs/overlayfs/super.c103
-rw-r--r--fs/overlayfs/util.c252
-rw-r--r--fs/pipe.c43
-rw-r--r--fs/proc/Kconfig1
-rw-r--r--fs/proc/base.c50
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/internal.h25
-rw-r--r--fs/proc/kcore.c535
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/stat.c2
-rw-r--r--fs/proc/task_mmu.c319
-rw-r--r--fs/proc/task_nommu.c39
-rw-r--r--fs/proc/uptime.c4
-rw-r--r--fs/proc/vmcore.c4
-rw-r--r--fs/pstore/Kconfig17
-rw-r--r--fs/pstore/platform.c16
-rw-r--r--fs/read_write.c94
-rw-r--r--fs/reiserfs/item_ops.c16
-rw-r--r--fs/reiserfs/journal.c22
-rw-r--r--fs/reiserfs/procfs.c11
-rw-r--r--fs/reiserfs/reiserfs.h4
-rw-r--r--fs/reiserfs/xattr.c4
-rw-r--r--fs/seq_file.c54
-rw-r--r--fs/statfs.c14
-rw-r--r--fs/super.c11
-rw-r--r--fs/sysfs/dir.c7
-rw-r--r--fs/sysfs/file.c77
-rw-r--r--fs/sysfs/group.c47
-rw-r--r--fs/sysfs/sysfs.h5
-rw-r--r--fs/sysv/inode.c6
-rw-r--r--fs/timerfd.c14
-rw-r--r--fs/tracefs/inode.c5
-rw-r--r--fs/ubifs/Kconfig15
-rw-r--r--fs/ubifs/Makefile3
-rw-r--r--fs/ubifs/budget.c68
-rw-r--r--fs/ubifs/commit.c8
-rw-r--r--fs/ubifs/crypto.c4
-rw-r--r--fs/ubifs/debug.c42
-rw-r--r--fs/ubifs/debug.h14
-rw-r--r--fs/ubifs/dir.c42
-rw-r--r--fs/ubifs/file.c62
-rw-r--r--fs/ubifs/find.c59
-rw-r--r--fs/ubifs/gc.c66
-rw-r--r--fs/ubifs/io.c97
-rw-r--r--fs/ubifs/journal.c72
-rw-r--r--fs/ubifs/key.h14
-rw-r--r--fs/ubifs/log.c12
-rw-r--r--fs/ubifs/lprops.c94
-rw-r--r--fs/ubifs/lpt.c112
-rw-r--r--fs/ubifs/lpt_commit.c50
-rw-r--r--fs/ubifs/master.c2
-rw-r--r--fs/ubifs/misc.c11
-rw-r--r--fs/ubifs/misc.h16
-rw-r--r--fs/ubifs/orphan.c26
-rw-r--r--fs/ubifs/recovery.c14
-rw-r--r--fs/ubifs/replay.c13
-rw-r--r--fs/ubifs/sb.c18
-rw-r--r--fs/ubifs/scan.c2
-rw-r--r--fs/ubifs/shrinker.c12
-rw-r--r--fs/ubifs/super.c76
-rw-r--r--fs/ubifs/tnc.c111
-rw-r--r--fs/ubifs/tnc_commit.c28
-rw-r--r--fs/ubifs/tnc_misc.c40
-rw-r--r--fs/ubifs/ubifs.h39
-rw-r--r--fs/ubifs/xattr.c40
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c45
-rw-r--r--fs/udf/namei.c12
-rw-r--r--fs/udf/super.c8
-rw-r--r--fs/udf/udf_i.h2
-rw-r--r--fs/udf/udf_sb.h2
-rw-r--r--fs/udf/udfdecl.h4
-rw-r--r--fs/udf/udftime.c6
-rw-r--r--fs/ufs/balloc.c4
-rw-r--r--fs/ufs/ialloc.c5
-rw-r--r--fs/ufs/namei.c9
-rw-r--r--fs/ufs/super.c4
-rw-r--r--fs/ufs/util.h14
-rw-r--r--fs/userfaultfd.c15
-rw-r--r--fs/xattr.c11
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c13
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h4
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c41
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c120
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c70
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c26
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c444
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h48
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c22
-rw-r--r--fs/xfs/libxfs/xfs_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c37
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_defer.c323
-rw-r--r--fs/xfs/libxfs/xfs_defer.h41
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c74
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h10
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c17
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c33
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h1
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c12
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c20
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c74
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h6
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h13
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c107
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h25
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c13
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h7
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c99
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h22
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c5
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c278
-rw-r--r--fs/xfs/libxfs/xfs_shared.h12
-rw-r--r--fs/xfs/libxfs/xfs_types.c34
-rw-r--r--fs/xfs/libxfs/xfs_types.h1
-rw-r--r--fs/xfs/scrub/agheader.c518
-rw-r--r--fs/xfs/scrub/agheader_repair.c889
-rw-r--r--fs/xfs/scrub/alloc.c116
-rw-r--r--fs/xfs/scrub/attr.c130
-rw-r--r--fs/xfs/scrub/bitmap.c303
-rw-r--r--fs/xfs/scrub/bitmap.h36
-rw-r--r--fs/xfs/scrub/bmap.c331
-rw-r--r--fs/xfs/scrub/btree.c334
-rw-r--r--fs/xfs/scrub/btree.h45
-rw-r--r--fs/xfs/scrub/common.c394
-rw-r--r--fs/xfs/scrub/common.h115
-rw-r--r--fs/xfs/scrub/dabtree.c170
-rw-r--r--fs/xfs/scrub/dabtree.h33
-rw-r--r--fs/xfs/scrub/dir.c264
-rw-r--r--fs/xfs/scrub/ialloc.c214
-rw-r--r--fs/xfs/scrub/inode.c282
-rw-r--r--fs/xfs/scrub/parent.c132
-rw-r--r--fs/xfs/scrub/quota.c140
-rw-r--r--fs/xfs/scrub/refcount.c194
-rw-r--r--fs/xfs/scrub/repair.c565
-rw-r--r--fs/xfs/scrub/repair.h95
-rw-r--r--fs/xfs/scrub/rmap.c172
-rw-r--r--fs/xfs/scrub/rtbitmap.c78
-rw-r--r--fs/xfs/scrub/scrub.c210
-rw-r--r--fs/xfs/scrub/scrub.h134
-rw-r--r--fs/xfs/scrub/symlink.c28
-rw-r--r--fs/xfs/scrub/trace.c6
-rw-r--r--fs/xfs/scrub/trace.h141
-rw-r--r--fs/xfs/xfs.h1
-rw-r--r--fs/xfs/xfs_aops.c1007
-rw-r--r--fs/xfs/xfs_aops.h4
-rw-r--r--fs/xfs/xfs_attr_inactive.c1
-rw-r--r--fs/xfs/xfs_attr_list.c2
-rw-r--r--fs/xfs/xfs_bmap_item.c25
-rw-r--r--fs/xfs/xfs_bmap_item.h3
-rw-r--r--fs/xfs/xfs_bmap_util.c118
-rw-r--r--fs/xfs/xfs_buf.c212
-rw-r--r--fs/xfs/xfs_buf.h15
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_dquot.c49
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_file.c43
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_fsmap.c4
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_icache.c16
-rw-r--r--fs/xfs/xfs_inode.c177
-rw-r--r--fs/xfs/xfs_inode.h32
-rw-r--r--fs/xfs/xfs_inode_item.c4
-rw-r--r--fs/xfs/xfs_iomap.c56
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_iops.c7
-rw-r--r--fs/xfs/xfs_itable.c8
-rw-r--r--fs/xfs/xfs_log.c156
-rw-r--r--fs/xfs/xfs_log.h1
-rw-r--r--fs/xfs/xfs_log_recover.c68
-rw-r--r--fs/xfs/xfs_mount.c101
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_qm.c22
-rw-r--r--fs/xfs/xfs_qm_syscalls.c9
-rw-r--r--fs/xfs/xfs_quotaops.c2
-rw-r--r--fs/xfs/xfs_refcount_item.c28
-rw-r--r--fs/xfs/xfs_refcount_item.h3
-rw-r--r--fs/xfs/xfs_reflink.c173
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_rtalloc.c20
-rw-r--r--fs/xfs/xfs_super.c38
-rw-r--r--fs/xfs/xfs_symlink.c51
-rw-r--r--fs/xfs/xfs_trace.h78
-rw-r--r--fs/xfs/xfs_trans.c26
-rw-r--r--fs/xfs/xfs_trans.h21
-rw-r--r--fs/xfs/xfs_trans_bmap.c6
-rw-r--r--fs/xfs/xfs_trans_extfree.c2
-rw-r--r--fs/xfs/xfs_trans_refcount.c6
-rw-r--r--fs/xfs/xfs_trans_rmap.c1
524 files changed, 18024 insertions, 14232 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 0429c8ee58f1..89bac3d2f05b 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -343,7 +343,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
v9ses->uid = make_kuid(current_user_ns(), uid);
if (!uid_valid(v9ses->uid)) {
ret = -EINVAL;
- pr_info("Uknown uid %s\n", s);
+ pr_info("Unknown uid %s\n", s);
}
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 03c9e325bfbc..5f2e48d41d72 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -533,7 +533,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
return retval;
}
-static int
+static vm_fault_t
v9fs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct v9fs_inode *v9inode;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 42e102e2e74a..85ff859d3af5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -859,8 +859,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
static int
v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened)
+ struct file *file, unsigned flags, umode_t mode)
{
int err;
u32 perm;
@@ -917,7 +916,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
v9inode->writeback_fid = (void *) inode_fid;
}
mutex_unlock(&v9inode->v_mutex);
- err = finish_open(file, dentry, generic_file_open, opened);
+ err = finish_open(file, dentry, generic_file_open);
if (err)
goto error;
@@ -925,7 +924,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(d_inode(dentry), file);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
out:
dput(res);
return err;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 7f6ae21a27b3..4823e1c46999 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -241,8 +241,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
static int
v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t omode,
- int *opened)
+ struct file *file, unsigned flags, umode_t omode)
{
int err = 0;
kgid_t gid;
@@ -352,13 +351,13 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
}
mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
- err = finish_open(file, dentry, generic_file_open, opened);
+ err = finish_open(file, dentry, generic_file_open);
if (err)
goto err_clunk_old_fid;
file->private_data = ofid;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(inode, file);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
out:
v9fs_put_acl(dacl, pacl);
dput(res);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index f329eee6dc93..352abc39e891 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
{
struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
struct iov_iter from;
- int retval;
+ int retval, err;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
@@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
retval);
else
p9_client_write(fid, 0, &from, &retval);
- p9_client_clunk(fid);
+ err = p9_client_clunk(fid);
+ if (!retval && err)
+ retval = err;
return retval;
}
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 56df483de619..b795f8da81f3 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -1,3 +1,6 @@
+
+menu "Executable file formats"
+
config BINFMT_ELF
bool "Kernel support for ELF binaries"
depends on MMU
@@ -187,3 +190,5 @@ config COREDUMP
This option enables support for performing core dumps. You almost
certainly want to say Y here. Not necessary on systems that never
need debugging or only ever run flawless code.
+
+endmenu
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index c836c425ca94..66621e96f9af 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -167,7 +167,7 @@ adfs_mode2atts(struct super_block *sb, struct inode *inode)
* of time to convert from RISC OS epoch to Unix epoch.
*/
static void
-adfs_adfs2unix_time(struct timespec *tv, struct inode *inode)
+adfs_adfs2unix_time(struct timespec64 *tv, struct inode *inode)
{
unsigned int high, low;
/* 01 Jan 1970 00:00:00 (Unix epoch) as nanoseconds since
@@ -195,11 +195,11 @@ adfs_adfs2unix_time(struct timespec *tv, struct inode *inode)
/* convert from RISC OS to Unix epoch */
nsec -= nsec_unix_epoch_diff_risc_os_epoch;
- *tv = ns_to_timespec(nsec);
+ *tv = ns_to_timespec64(nsec);
return;
cur_time:
- *tv = timespec64_to_timespec(current_time(inode));
+ *tv = current_time(inode);
return;
too_early:
@@ -242,7 +242,6 @@ adfs_unix2adfs_time(struct inode *inode, unsigned int secs)
struct inode *
adfs_iget(struct super_block *sb, struct object_info *obj)
{
- struct timespec ts;
struct inode *inode;
inode = new_inode(sb);
@@ -271,9 +270,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
inode->i_mode = adfs_atts2mode(sb, inode);
- ts = timespec64_to_timespec(inode->i_mtime);
- adfs_adfs2unix_time(&ts, inode);
- inode->i_mtime = timespec_to_timespec64(ts);
+ adfs_adfs2unix_time(&inode->i_mtime, inode);
inode->i_atime = inode->i_mtime;
inode->i_ctime = inode->i_mtime;
@@ -287,7 +284,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->mmu_private = inode->i_size;
}
- insert_inode_hash(inode);
+ inode_fake_hash(inode);
out:
return inode;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 71fa525d63a0..7e099a7a4eb1 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -291,6 +291,7 @@ static void destroy_inodecache(void)
static const struct super_operations adfs_sops = {
.alloc_inode = adfs_alloc_inode,
.destroy_inode = adfs_destroy_inode,
+ .drop_inode = generic_delete_inode,
.write_inode = adfs_write_inode,
.put_super = adfs_put_super,
.statfs = adfs_statfs,
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 7d623008157f..855bf2b79fed 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -822,6 +822,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct inode *inode;
+ struct dentry *d;
struct key *key;
int ret;
@@ -862,43 +863,17 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
afs_stat_v(dvnode, n_lookup);
inode = afs_do_lookup(dir, dentry, key);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- if (ret == -ENOENT) {
- inode = afs_try_auto_mntpt(dentry, dir);
- if (!IS_ERR(inode)) {
- key_put(key);
- goto success;
- }
-
- ret = PTR_ERR(inode);
- }
-
- key_put(key);
- if (ret == -ENOENT) {
- d_add(dentry, NULL);
- _leave(" = NULL [negative]");
- return NULL;
- }
- _leave(" = %d [do]", ret);
- return ERR_PTR(ret);
- }
- dentry->d_fsdata = (void *)(unsigned long)dvnode->status.data_version;
-
- /* instantiate the dentry */
key_put(key);
- if (IS_ERR(inode)) {
- _leave(" = %ld", PTR_ERR(inode));
- return ERR_CAST(inode);
+ if (inode == ERR_PTR(-ENOENT)) {
+ inode = afs_try_auto_mntpt(dentry, dir);
+ } else {
+ dentry->d_fsdata =
+ (void *)(unsigned long)dvnode->status.data_version;
}
-
-success:
- d_add(dentry, inode);
- _leave(" = 0 { ino=%lu v=%u }",
- d_inode(dentry)->i_ino,
- d_inode(dentry)->i_generation);
-
- return NULL;
+ d = d_splice_alias(inode, dentry);
+ if (!IS_ERR_OR_NULL(d))
+ d->d_fsdata = dentry->d_fsdata;
+ return d;
}
/*
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 174e843f0633..1cde710a8013 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -83,7 +83,7 @@ struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
out:
_leave("= %d", ret);
- return ERR_PTR(ret);
+ return ret == -ENOENT ? NULL : ERR_PTR(ret);
}
/*
@@ -141,12 +141,6 @@ out_p:
static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct afs_vnode *vnode;
- struct inode *inode;
- int ret;
-
- vnode = AFS_FS_I(dir);
-
_enter("%pd", dentry);
ASSERTCMP(d_inode(dentry), ==, NULL);
@@ -160,22 +154,7 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
memcmp(dentry->d_name.name, "@cell", 5) == 0)
return afs_lookup_atcell(dentry);
- inode = afs_try_auto_mntpt(dentry, dir);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- if (ret == -ENOENT) {
- d_add(dentry, NULL);
- _leave(" = NULL [negative]");
- return NULL;
- }
- _leave(" = %d [do]", ret);
- return ERR_PTR(ret);
- }
-
- d_add(dentry, inode);
- _leave(" = 0 { ino=%lu v=%u }",
- d_inode(dentry)->i_ino, d_inode(dentry)->i_generation);
- return NULL;
+ return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry);
}
const struct inode_operations afs_dynroot_inode_operations = {
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 9778df135717..871a228d7f37 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -21,6 +21,7 @@
#include <linux/fscache.h>
#include <linux/backing-dev.h>
#include <linux/uuid.h>
+#include <linux/mm_types.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
@@ -1076,7 +1077,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
-extern int afs_page_mkwrite(struct vm_fault *);
+extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
extern void afs_prune_wb_keys(struct afs_vnode *);
extern int afs_launder_page(struct page *);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a1b18082991b..35f2ae30f31f 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -346,7 +346,6 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
struct rxrpc_call *rxcall;
struct msghdr msg;
struct kvec iov[1];
- size_t offset;
s64 tx_total_len;
int ret;
@@ -433,10 +432,10 @@ error_do_abort:
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret, "KSD");
} else {
- offset = 0;
- rxrpc_kernel_recv_data(call->net->socket, rxcall, NULL,
- 0, &offset, false, &call->abort_code,
- &call->service_id);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, NULL, 0, 0);
+ rxrpc_kernel_recv_data(call->net->socket, rxcall,
+ &msg.msg_iter, false,
+ &call->abort_code, &call->service_id);
ac->abort_code = call->abort_code;
ac->responded = true;
}
@@ -467,13 +466,14 @@ static void afs_deliver_to_call(struct afs_call *call)
state == AFS_CALL_SV_AWAIT_ACK
) {
if (state == AFS_CALL_SV_AWAIT_ACK) {
- size_t offset = 0;
+ struct iov_iter iter;
+
+ iov_iter_kvec(&iter, READ | ITER_KVEC, NULL, 0, 0);
ret = rxrpc_kernel_recv_data(call->net->socket,
- call->rxcall,
- NULL, 0, &offset, false,
+ call->rxcall, &iter, false,
&remote_abort,
&call->service_id);
- trace_afs_recv_data(call, 0, offset, false, ret);
+ trace_afs_recv_data(call, 0, 0, false, ret);
if (ret == -EINPROGRESS || ret == -EAGAIN)
return;
@@ -648,7 +648,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
trace_afs_notify_call(rxcall, call);
call->need_attention = true;
- u = __atomic_add_unless(&call->usage, 1, 0);
+ u = atomic_fetch_add_unless(&call->usage, 1, 0);
if (u != 0) {
trace_afs_call(call, afs_call_trace_wake, u,
atomic_read(&call->net->nr_outstanding_calls),
@@ -894,6 +894,8 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
bool want_more)
{
struct afs_net *net = call->net;
+ struct iov_iter iter;
+ struct kvec iov;
enum afs_call_state state;
u32 remote_abort = 0;
int ret;
@@ -903,10 +905,14 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
ASSERTCMP(call->offset, <=, count);
- ret = rxrpc_kernel_recv_data(net->socket, call->rxcall,
- buf, count, &call->offset,
+ iov.iov_base = buf + call->offset;
+ iov.iov_len = count - call->offset;
+ iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, count - call->offset);
+
+ ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, &iter,
want_more, &remote_abort,
&call->service_id);
+ call->offset += (count - call->offset) - iov_iter_count(&iter);
trace_afs_recv_data(call, count, call->offset, want_more, ret);
if (ret == 0 || ret == -EAGAIN)
return ret;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 8b39e6ebb40b..19c04caf3c01 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -753,7 +753,7 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
* notification that a previously read-only page is about to become writable
* - if it returns an error, the caller will deliver a bus error signal
*/
-int afs_page_mkwrite(struct vm_fault *vmf)
+vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
diff --git a/fs/aio.c b/fs/aio.c
index 27454594e37a..b9350f3360c6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,6 +5,7 @@
* Implements an efficient asynchronous io interface.
*
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright 2018 Christoph Hellwig.
*
* See ../COPYING for licensing terms.
*/
@@ -18,6 +19,7 @@
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/backing-dev.h>
+#include <linux/refcount.h>
#include <linux/uio.h>
#include <linux/sched/signal.h>
@@ -164,10 +166,21 @@ struct fsync_iocb {
bool datasync;
};
+struct poll_iocb {
+ struct file *file;
+ struct wait_queue_head *head;
+ __poll_t events;
+ bool woken;
+ bool cancelled;
+ struct wait_queue_entry wait;
+ struct work_struct work;
+};
+
struct aio_kiocb {
union {
struct kiocb rw;
struct fsync_iocb fsync;
+ struct poll_iocb poll;
};
struct kioctx *ki_ctx;
@@ -178,6 +191,7 @@ struct aio_kiocb {
struct list_head ki_list; /* the aio core uses this
* for cancellation */
+ refcount_t ki_refcnt;
/*
* If the aio_resfd field of the userspace iocb is not zero,
@@ -202,9 +216,7 @@ static const struct address_space_operations aio_ctx_aops;
static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
{
- struct qstr this = QSTR_INIT("[aio]", 5);
struct file *file;
- struct path path;
struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -213,31 +225,17 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
inode->i_mapping->private_data = ctx;
inode->i_size = PAGE_SIZE * nr_pages;
- path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
- if (!path.dentry) {
+ file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
+ O_RDWR, &aio_ring_fops);
+ if (IS_ERR(file))
iput(inode);
- return ERR_PTR(-ENOMEM);
- }
- path.mnt = mntget(aio_mnt);
-
- d_instantiate(path.dentry, inode);
- file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops);
- if (IS_ERR(file)) {
- path_put(&path);
- return file;
- }
-
- file->f_flags = O_RDWR;
return file;
}
static struct dentry *aio_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- static const struct dentry_operations ops = {
- .d_dname = simple_dname,
- };
- struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
+ struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL,
AIO_RING_MAGIC);
if (!IS_ERR(root))
@@ -1015,6 +1013,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
percpu_ref_get(&ctx->reqs);
INIT_LIST_HEAD(&req->ki_list);
+ refcount_set(&req->ki_refcnt, 0);
req->ki_ctx = ctx;
return req;
out_put:
@@ -1049,6 +1048,15 @@ out:
return ret;
}
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+ if (refcount_read(&iocb->ki_refcnt) == 0 ||
+ refcount_dec_and_test(&iocb->ki_refcnt)) {
+ percpu_ref_put(&iocb->ki_ctx->reqs);
+ kmem_cache_free(kiocb_cachep, iocb);
+ }
+}
+
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
@@ -1118,8 +1126,6 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
eventfd_ctx_put(iocb->ki_eventfd);
}
- kmem_cache_free(kiocb_cachep, iocb);
-
/*
* We have to order our ring_info tail store above and test
* of the wait list below outside the wait lock. This is
@@ -1130,8 +1136,7 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
-
- percpu_ref_put(&ctx->reqs);
+ iocb_put(iocb);
}
/* aio_read_events_ring
@@ -1592,6 +1597,182 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
return 0;
}
+static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
+{
+ struct file *file = iocb->poll.file;
+
+ aio_complete(iocb, mangle_poll(mask), 0);
+ fput(file);
+}
+
+static void aio_poll_complete_work(struct work_struct *work)
+{
+ struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+ struct poll_table_struct pt = { ._key = req->events };
+ struct kioctx *ctx = iocb->ki_ctx;
+ __poll_t mask = 0;
+
+ if (!READ_ONCE(req->cancelled))
+ mask = vfs_poll(req->file, &pt) & req->events;
+
+ /*
+ * Note that ->ki_cancel callers also delete iocb from active_reqs after
+ * calling ->ki_cancel. We need the ctx_lock roundtrip here to
+ * synchronize with them. In the cancellation case the list_del_init
+ * itself is not actually needed, but harmless so we keep it in to
+ * avoid further branches in the fast path.
+ */
+ spin_lock_irq(&ctx->ctx_lock);
+ if (!mask && !READ_ONCE(req->cancelled)) {
+ add_wait_queue(req->head, &req->wait);
+ spin_unlock_irq(&ctx->ctx_lock);
+ return;
+ }
+ list_del_init(&iocb->ki_list);
+ spin_unlock_irq(&ctx->ctx_lock);
+
+ aio_poll_complete(iocb, mask);
+}
+
+/* assumes we are called with irqs disabled */
+static int aio_poll_cancel(struct kiocb *iocb)
+{
+ struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
+ struct poll_iocb *req = &aiocb->poll;
+
+ spin_lock(&req->head->lock);
+ WRITE_ONCE(req->cancelled, true);
+ if (!list_empty(&req->wait.entry)) {
+ list_del_init(&req->wait.entry);
+ schedule_work(&aiocb->poll.work);
+ }
+ spin_unlock(&req->head->lock);
+
+ return 0;
+}
+
+static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ void *key)
+{
+ struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+ __poll_t mask = key_to_poll(key);
+
+ req->woken = true;
+
+ /* for instances that support it check for an event match first: */
+ if (mask) {
+ if (!(mask & req->events))
+ return 0;
+
+ /* try to complete the iocb inline if we can: */
+ if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+ list_del(&iocb->ki_list);
+ spin_unlock(&iocb->ki_ctx->ctx_lock);
+
+ list_del_init(&req->wait.entry);
+ aio_poll_complete(iocb, mask);
+ return 1;
+ }
+ }
+
+ list_del_init(&req->wait.entry);
+ schedule_work(&req->work);
+ return 1;
+}
+
+struct aio_poll_table {
+ struct poll_table_struct pt;
+ struct aio_kiocb *iocb;
+ int error;
+};
+
+static void
+aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
+ struct poll_table_struct *p)
+{
+ struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
+
+ /* multiple wait queues per file are not supported */
+ if (unlikely(pt->iocb->poll.head)) {
+ pt->error = -EINVAL;
+ return;
+ }
+
+ pt->error = 0;
+ pt->iocb->poll.head = head;
+ add_wait_queue(head, &pt->iocb->poll.wait);
+}
+
+static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
+{
+ struct kioctx *ctx = aiocb->ki_ctx;
+ struct poll_iocb *req = &aiocb->poll;
+ struct aio_poll_table apt;
+ __poll_t mask;
+
+ /* reject any unknown events outside the normal event mask. */
+ if ((u16)iocb->aio_buf != iocb->aio_buf)
+ return -EINVAL;
+ /* reject fields that are not defined for poll */
+ if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
+ return -EINVAL;
+
+ INIT_WORK(&req->work, aio_poll_complete_work);
+ req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
+ req->file = fget(iocb->aio_fildes);
+ if (unlikely(!req->file))
+ return -EBADF;
+
+ apt.pt._qproc = aio_poll_queue_proc;
+ apt.pt._key = req->events;
+ apt.iocb = aiocb;
+ apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
+
+ /* initialized the list so that we can do list_empty checks */
+ INIT_LIST_HEAD(&req->wait.entry);
+ init_waitqueue_func_entry(&req->wait, aio_poll_wake);
+
+ /* one for removal from waitqueue, one for this function */
+ refcount_set(&aiocb->ki_refcnt, 2);
+
+ mask = vfs_poll(req->file, &apt.pt) & req->events;
+ if (unlikely(!req->head)) {
+ /* we did not manage to set up a waitqueue, done */
+ goto out;
+ }
+
+ spin_lock_irq(&ctx->ctx_lock);
+ spin_lock(&req->head->lock);
+ if (req->woken) {
+ /* wake_up context handles the rest */
+ mask = 0;
+ apt.error = 0;
+ } else if (mask || apt.error) {
+ /* if we get an error or a mask we are done */
+ WARN_ON_ONCE(list_empty(&req->wait.entry));
+ list_del_init(&req->wait.entry);
+ } else {
+ /* actually waiting for an event */
+ list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+ aiocb->ki_cancel = aio_poll_cancel;
+ }
+ spin_unlock(&req->head->lock);
+ spin_unlock_irq(&ctx->ctx_lock);
+
+out:
+ if (unlikely(apt.error)) {
+ fput(req->file);
+ return apt.error;
+ }
+
+ if (mask)
+ aio_poll_complete(aiocb, mask);
+ iocb_put(aiocb);
+ return 0;
+}
+
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
@@ -1665,6 +1846,9 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
case IOCB_CMD_FDSYNC:
ret = aio_fsync(&req->fsync, &iocb, true);
break;
+ case IOCB_CMD_POLL:
+ ret = aio_poll(req, &iocb);
+ break;
default:
pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
ret = -EINVAL;
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 3168ee4e77f4..91262c34b797 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -71,8 +71,6 @@ struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags)
{
- struct qstr this;
- struct path path;
struct file *file;
if (IS_ERR(anon_inode_inode))
@@ -82,39 +80,23 @@ struct file *anon_inode_getfile(const char *name,
return ERR_PTR(-ENOENT);
/*
- * Link the inode to a directory entry by creating a unique name
- * using the inode sequence number.
- */
- file = ERR_PTR(-ENOMEM);
- this.name = name;
- this.len = strlen(name);
- this.hash = 0;
- path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
- if (!path.dentry)
- goto err_module;
-
- path.mnt = mntget(anon_inode_mnt);
- /*
* We know the anon_inode inode count is always greater than zero,
* so ihold() is safe.
*/
ihold(anon_inode_inode);
-
- d_instantiate(path.dentry, anon_inode_inode);
-
- file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ file = alloc_file_pseudo(anon_inode_inode, anon_inode_mnt, name,
+ flags & (O_ACCMODE | O_NONBLOCK), fops);
if (IS_ERR(file))
- goto err_dput;
+ goto err;
+
file->f_mapping = anon_inode_inode->i_mapping;
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
return file;
-err_dput:
- path_put(&path);
-err_module:
+err:
+ iput(anon_inode_inode);
module_put(fops->owner);
return file;
}
diff --git a/fs/attr.c b/fs/attr.c
index e3d53bf12240..d22e8187477f 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(setattr_prepare);
* inode_newsize_ok - may this inode be truncated to a given size
* @inode: the inode to be truncated
* @offset: the new size to assign to the inode
- * @Returns: 0 on success, -ve errno on failure
*
* inode_newsize_ok must be called with i_mutex held.
*
@@ -130,6 +129,8 @@ EXPORT_SYMBOL(setattr_prepare);
* returned. @inode must be a file (not directory), with appropriate
* permissions to allow truncate (inode_newsize_ok does NOT check these
* conditions).
+ *
+ * Return: 0 on success, -ve errno on failure
*/
int inode_newsize_ok(const struct inode *inode, loff_t offset)
{
@@ -205,7 +206,7 @@ EXPORT_SYMBOL(setattr_copy);
/**
* notify_change - modify attributes of a filesytem object
* @dentry: object affected
- * @iattr: new attributes
+ * @attr: new attributes
* @delegated_inode: returns inode, if the inode is delegated
*
* The caller must hold the i_mutex on the affected object.
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 9400a9f6318a..9f9cadbfbd7a 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/uaccess.h>
@@ -26,6 +27,7 @@
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/file.h>
+#include <linux/magic.h>
/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@@ -124,7 +126,8 @@ struct autofs_sb_info {
static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb)
{
- return (struct autofs_sb_info *)(sb->s_fs_info);
+ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
+ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
}
static inline struct autofs_info *autofs_dentry_ino(struct dentry *dentry)
@@ -151,15 +154,9 @@ int autofs_expire_run(struct super_block *, struct vfsmount *,
struct autofs_sb_info *,
struct autofs_packet_expire __user *);
int autofs_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
- struct autofs_sb_info *sbi, int when);
+ struct autofs_sb_info *sbi, unsigned int how);
int autofs_expire_multi(struct super_block *, struct vfsmount *,
struct autofs_sb_info *, int __user *);
-struct dentry *autofs_expire_direct(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi, int how);
-struct dentry *autofs_expire_indirect(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi, int how);
/* Device node initialization */
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index b332d3f6e730..d441244b79df 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -10,11 +10,9 @@
#include "autofs_i.h"
-static unsigned long now;
-
/* Check if a dentry can be expired */
static inline int autofs_can_expire(struct dentry *dentry,
- unsigned long timeout, int do_now)
+ unsigned long timeout, unsigned int how)
{
struct autofs_info *ino = autofs_dentry_ino(dentry);
@@ -22,16 +20,17 @@ static inline int autofs_can_expire(struct dentry *dentry,
if (ino == NULL)
return 0;
- if (!do_now) {
+ if (!(how & AUTOFS_EXP_IMMEDIATE)) {
/* Too young to die */
- if (!timeout || time_after(ino->last_used + timeout, now))
+ if (!timeout || time_after(ino->last_used + timeout, jiffies))
return 0;
}
return 1;
}
/* Check a mount point for busyness */
-static int autofs_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
+static int autofs_mount_busy(struct vfsmount *mnt,
+ struct dentry *dentry, unsigned int how)
{
struct dentry *top = dentry;
struct path path = {.mnt = mnt, .dentry = dentry};
@@ -52,6 +51,12 @@ static int autofs_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
goto done;
}
+ /* Not a submount, has a forced expire been requested */
+ if (how & AUTOFS_EXP_FORCED) {
+ status = 0;
+ goto done;
+ }
+
/* Update the expiry counter if fs is busy */
if (!may_umount_tree(path.mnt)) {
struct autofs_info *ino;
@@ -187,10 +192,14 @@ again:
static int autofs_direct_busy(struct vfsmount *mnt,
struct dentry *top,
unsigned long timeout,
- int do_now)
+ unsigned int how)
{
pr_debug("top %p %pd\n", top, top);
+ /* Forced expire, user space handles busy mounts */
+ if (how & AUTOFS_EXP_FORCED)
+ return 0;
+
/* If it's busy update the expiry counters */
if (!may_umount_tree(mnt)) {
struct autofs_info *ino;
@@ -202,7 +211,7 @@ static int autofs_direct_busy(struct vfsmount *mnt,
}
/* Timeout of a direct mount is determined by its top dentry */
- if (!autofs_can_expire(top, timeout, do_now))
+ if (!autofs_can_expire(top, timeout, how))
return 1;
return 0;
@@ -215,7 +224,7 @@ static int autofs_direct_busy(struct vfsmount *mnt,
static int autofs_tree_busy(struct vfsmount *mnt,
struct dentry *top,
unsigned long timeout,
- int do_now)
+ unsigned int how)
{
struct autofs_info *top_ino = autofs_dentry_ino(top);
struct dentry *p;
@@ -237,7 +246,7 @@ static int autofs_tree_busy(struct vfsmount *mnt,
* If the fs is busy update the expiry counter.
*/
if (d_mountpoint(p)) {
- if (autofs_mount_busy(mnt, p)) {
+ if (autofs_mount_busy(mnt, p, how)) {
top_ino->last_used = jiffies;
dput(p);
return 1;
@@ -260,8 +269,12 @@ static int autofs_tree_busy(struct vfsmount *mnt,
}
}
+ /* Forced expire, user space handles busy mounts */
+ if (how & AUTOFS_EXP_FORCED)
+ return 0;
+
/* Timeout of a tree mount is ultimately determined by its top dentry */
- if (!autofs_can_expire(top, timeout, do_now))
+ if (!autofs_can_expire(top, timeout, how))
return 1;
return 0;
@@ -270,7 +283,7 @@ static int autofs_tree_busy(struct vfsmount *mnt,
static struct dentry *autofs_check_leaves(struct vfsmount *mnt,
struct dentry *parent,
unsigned long timeout,
- int do_now)
+ unsigned int how)
{
struct dentry *p;
@@ -282,11 +295,17 @@ static struct dentry *autofs_check_leaves(struct vfsmount *mnt,
if (d_mountpoint(p)) {
/* Can we umount this guy */
- if (autofs_mount_busy(mnt, p))
+ if (autofs_mount_busy(mnt, p, how))
continue;
+ /* This isn't a submount so if a forced expire
+ * has been requested, user space handles busy
+ * mounts */
+ if (how & AUTOFS_EXP_FORCED)
+ return p;
+
/* Can we expire this guy */
- if (autofs_can_expire(p, timeout, do_now))
+ if (autofs_can_expire(p, timeout, how))
return p;
}
}
@@ -294,23 +313,21 @@ static struct dentry *autofs_check_leaves(struct vfsmount *mnt,
}
/* Check if we can expire a direct mount (possibly a tree) */
-struct dentry *autofs_expire_direct(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi,
- int how)
+static struct dentry *autofs_expire_direct(struct super_block *sb,
+ struct vfsmount *mnt,
+ struct autofs_sb_info *sbi,
+ unsigned int how)
{
- unsigned long timeout;
struct dentry *root = dget(sb->s_root);
- int do_now = how & AUTOFS_EXP_IMMEDIATE;
struct autofs_info *ino;
+ unsigned long timeout;
if (!root)
return NULL;
- now = jiffies;
timeout = sbi->exp_timeout;
- if (!autofs_direct_busy(mnt, root, timeout, do_now)) {
+ if (!autofs_direct_busy(mnt, root, timeout, how)) {
spin_lock(&sbi->fs_lock);
ino = autofs_dentry_ino(root);
/* No point expiring a pending mount */
@@ -321,7 +338,7 @@ struct dentry *autofs_expire_direct(struct super_block *sb,
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
- if (!autofs_direct_busy(mnt, root, timeout, do_now)) {
+ if (!autofs_direct_busy(mnt, root, timeout, how)) {
spin_lock(&sbi->fs_lock);
ino->flags |= AUTOFS_INF_EXPIRING;
init_completion(&ino->expire_complete);
@@ -346,10 +363,8 @@ out:
static struct dentry *should_expire(struct dentry *dentry,
struct vfsmount *mnt,
unsigned long timeout,
- int how)
+ unsigned int how)
{
- int do_now = how & AUTOFS_EXP_IMMEDIATE;
- int exp_leaves = how & AUTOFS_EXP_LEAVES;
struct autofs_info *ino = autofs_dentry_ino(dentry);
unsigned int ino_count;
@@ -367,22 +382,33 @@ static struct dentry *should_expire(struct dentry *dentry,
pr_debug("checking mountpoint %p %pd\n", dentry, dentry);
/* Can we umount this guy */
- if (autofs_mount_busy(mnt, dentry))
+ if (autofs_mount_busy(mnt, dentry, how))
return NULL;
+ /* This isn't a submount so if a forced expire
+ * has been requested, user space handles busy
+ * mounts */
+ if (how & AUTOFS_EXP_FORCED)
+ return dentry;
+
/* Can we expire this guy */
- if (autofs_can_expire(dentry, timeout, do_now))
+ if (autofs_can_expire(dentry, timeout, how))
return dentry;
return NULL;
}
if (d_really_is_positive(dentry) && d_is_symlink(dentry)) {
pr_debug("checking symlink %p %pd\n", dentry, dentry);
+
+ /* Forced expire, user space handles busy mounts */
+ if (how & AUTOFS_EXP_FORCED)
+ return dentry;
+
/*
* A symlink can't be "busy" in the usual sense so
* just check last used for expire timeout.
*/
- if (autofs_can_expire(dentry, timeout, do_now))
+ if (autofs_can_expire(dentry, timeout, how))
return dentry;
return NULL;
}
@@ -391,27 +417,33 @@ static struct dentry *should_expire(struct dentry *dentry,
return NULL;
/* Case 2: tree mount, expire iff entire tree is not busy */
- if (!exp_leaves) {
- /* Path walk currently on this dentry? */
- ino_count = atomic_read(&ino->count) + 1;
- if (d_count(dentry) > ino_count)
- return NULL;
+ if (!(how & AUTOFS_EXP_LEAVES)) {
+ /* Not a forced expire? */
+ if (!(how & AUTOFS_EXP_FORCED)) {
+ /* ref-walk currently on this dentry? */
+ ino_count = atomic_read(&ino->count) + 1;
+ if (d_count(dentry) > ino_count)
+ return NULL;
+ }
- if (!autofs_tree_busy(mnt, dentry, timeout, do_now))
+ if (!autofs_tree_busy(mnt, dentry, timeout, how))
return dentry;
/*
* Case 3: pseudo direct mount, expire individual leaves
* (autofs-4.1).
*/
} else {
- /* Path walk currently on this dentry? */
struct dentry *expired;
- ino_count = atomic_read(&ino->count) + 1;
- if (d_count(dentry) > ino_count)
- return NULL;
+ /* Not a forced expire? */
+ if (!(how & AUTOFS_EXP_FORCED)) {
+ /* ref-walk currently on this dentry? */
+ ino_count = atomic_read(&ino->count) + 1;
+ if (d_count(dentry) > ino_count)
+ return NULL;
+ }
- expired = autofs_check_leaves(mnt, dentry, timeout, do_now);
+ expired = autofs_check_leaves(mnt, dentry, timeout, how);
if (expired) {
if (expired == dentry)
dput(dentry);
@@ -427,10 +459,10 @@ static struct dentry *should_expire(struct dentry *dentry,
* - it is unused by any user process
* - it has been unused for exp_timeout time
*/
-struct dentry *autofs_expire_indirect(struct super_block *sb,
- struct vfsmount *mnt,
- struct autofs_sb_info *sbi,
- int how)
+static struct dentry *autofs_expire_indirect(struct super_block *sb,
+ struct vfsmount *mnt,
+ struct autofs_sb_info *sbi,
+ unsigned int how)
{
unsigned long timeout;
struct dentry *root = sb->s_root;
@@ -442,13 +474,10 @@ struct dentry *autofs_expire_indirect(struct super_block *sb,
if (!root)
return NULL;
- now = jiffies;
timeout = sbi->exp_timeout;
dentry = NULL;
while ((dentry = get_next_positive_subdir(dentry, root))) {
- int flags = how;
-
spin_lock(&sbi->fs_lock);
ino = autofs_dentry_ino(dentry);
if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
@@ -457,7 +486,7 @@ struct dentry *autofs_expire_indirect(struct super_block *sb,
}
spin_unlock(&sbi->fs_lock);
- expired = should_expire(dentry, mnt, timeout, flags);
+ expired = should_expire(dentry, mnt, timeout, how);
if (!expired)
continue;
@@ -470,7 +499,7 @@ struct dentry *autofs_expire_indirect(struct super_block *sb,
/* Make sure a reference is not taken on found if
* things have changed.
*/
- flags &= ~AUTOFS_EXP_LEAVES;
+ how &= ~AUTOFS_EXP_LEAVES;
found = should_expire(expired, mnt, timeout, how);
if (!found || found != expired)
/* Something has changed, continue */
@@ -575,7 +604,7 @@ int autofs_expire_run(struct super_block *sb,
spin_lock(&sbi->fs_lock);
ino = autofs_dentry_ino(dentry);
/* avoid rapid-fire expire attempts if expiry fails */
- ino->last_used = now;
+ ino->last_used = jiffies;
ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
@@ -584,15 +613,15 @@ int autofs_expire_run(struct super_block *sb,
}
int autofs_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
- struct autofs_sb_info *sbi, int when)
+ struct autofs_sb_info *sbi, unsigned int how)
{
struct dentry *dentry;
int ret = -EAGAIN;
if (autofs_type_trigger(sbi->type))
- dentry = autofs_expire_direct(sb, mnt, sbi, when);
+ dentry = autofs_expire_direct(sb, mnt, sbi, how);
else
- dentry = autofs_expire_indirect(sb, mnt, sbi, when);
+ dentry = autofs_expire_indirect(sb, mnt, sbi, how);
if (dentry) {
struct autofs_info *ino = autofs_dentry_ino(dentry);
@@ -605,7 +634,7 @@ int autofs_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
spin_lock(&sbi->fs_lock);
/* avoid rapid-fire expire attempts if expiry fails */
- ino->last_used = now;
+ ino->last_used = jiffies;
ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
@@ -622,10 +651,10 @@ int autofs_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
int autofs_expire_multi(struct super_block *sb, struct vfsmount *mnt,
struct autofs_sb_info *sbi, int __user *arg)
{
- int do_now = 0;
+ unsigned int how = 0;
- if (arg && get_user(do_now, arg))
+ if (arg && get_user(how, arg))
return -EFAULT;
- return autofs_do_expire_multi(sb, mnt, sbi, do_now);
+ return autofs_do_expire_multi(sb, mnt, sbi, how);
}
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index b51980fc274e..846c052569dd 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -10,7 +10,6 @@
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/parser.h>
-#include <linux/magic.h>
#include "autofs_i.h"
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index a3d414150578..782e57b911ab 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -559,6 +559,13 @@ static int autofs_dir_symlink(struct inode *dir,
if (!autofs_oz_mode(sbi))
return -EACCES;
+ /* autofs_oz_mode() needs to allow path walks when the
+ * autofs mount is catatonic but the state of an autofs
+ * file system needs to be preserved over restarts.
+ */
+ if (sbi->catatonic)
+ return -EACCES;
+
BUG_ON(!ino);
autofs_clean_ino(ino);
@@ -612,9 +619,15 @@ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry)
struct autofs_info *ino = autofs_dentry_ino(dentry);
struct autofs_info *p_ino;
- /* This allows root to remove symlinks */
- if (!autofs_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (!autofs_oz_mode(sbi))
+ return -EACCES;
+
+ /* autofs_oz_mode() needs to allow path walks when the
+ * autofs mount is catatonic but the state of an autofs
+ * file system needs to be preserved over restarts.
+ */
+ if (sbi->catatonic)
+ return -EACCES;
if (atomic_dec_and_test(&ino->count)) {
p_ino = autofs_dentry_ino(dentry->d_parent);
@@ -697,6 +710,13 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
if (!autofs_oz_mode(sbi))
return -EACCES;
+ /* autofs_oz_mode() needs to allow path walks when the
+ * autofs mount is catatonic but the state of an autofs
+ * file system needs to be preserved over restarts.
+ */
+ if (sbi->catatonic)
+ return -EACCES;
+
spin_lock(&sbi->lookup_lock);
if (!simple_empty(dentry)) {
spin_unlock(&sbi->lookup_lock);
@@ -735,6 +755,13 @@ static int autofs_dir_mkdir(struct inode *dir,
if (!autofs_oz_mode(sbi))
return -EACCES;
+ /* autofs_oz_mode() needs to allow path walks when the
+ * autofs mount is catatonic but the state of an autofs
+ * file system needs to be preserved over restarts.
+ */
+ if (sbi->catatonic)
+ return -EACCES;
+
pr_debug("dentry %p, creating %pd\n", dentry, dentry);
BUG_ON(!ino);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 125e8bbd22a2..8035d2a44561 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -134,7 +134,7 @@ static int bad_inode_update_time(struct inode *inode, struct timespec64 *time,
static int bad_inode_atomic_open(struct inode *inode, struct dentry *dentry,
struct file *file, unsigned int open_flag,
- umode_t create_mode, int *opened)
+ umode_t create_mode)
{
return -EIO;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 816cc921cf36..efae2fb0930a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1751,7 +1751,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
if (regset->core_note_type && regset->get &&
- (!regset->active || regset->active(t->task, regset))) {
+ (!regset->active || regset->active(t->task, regset) > 0)) {
int ret;
size_t size = regset_size(t->task, regset);
void *data = kmalloc(size, GFP_KERNEL);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 4b5fff31ef27..aa4a7a23ff99 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -205,7 +205,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
goto error;
if (fmt->flags & MISC_FMT_OPEN_FILE) {
- interp_file = filp_clone_open(fmt->interp_file);
+ interp_file = file_clone_open(fmt->interp_file);
if (!IS_ERR(interp_file))
deny_write_access(interp_file);
} else {
diff --git a/fs/block_dev.c b/fs/block_dev.c
index aba25414231a..38b8ce05cbc7 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -666,7 +666,8 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
result = blk_queue_enter(bdev->bd_queue, 0);
if (result)
return result;
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
+ REQ_OP_READ);
blk_queue_exit(bdev->bd_queue);
return result;
}
@@ -704,7 +705,8 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
return result;
set_page_writeback(page);
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
+ REQ_OP_WRITE);
if (result) {
end_page_writeback(page);
} else {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 15e1dfef56a5..3b66c957ea6f 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -30,23 +30,22 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
name = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
- BUG();
+ return ERR_PTR(-EINVAL);
}
- size = btrfs_getxattr(inode, name, "", 0);
+ size = btrfs_getxattr(inode, name, NULL, 0);
if (size > 0) {
value = kzalloc(size, GFP_KERNEL);
if (!value)
return ERR_PTR(-ENOMEM);
size = btrfs_getxattr(inode, name, value, size);
}
- if (size > 0) {
+ if (size > 0)
acl = posix_acl_from_xattr(&init_user_ns, value, size);
- } else if (size == -ERANGE || size == -ENODATA || size == 0) {
+ else if (size == -ENODATA || size == 0)
acl = NULL;
- } else {
- acl = ERR_PTR(-EIO);
- }
+ else
+ acl = ERR_PTR(size);
kfree(value);
return acl;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0a8e2e29a66b..ae750b1574a2 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -925,7 +925,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
type = btrfs_get_extent_inline_ref_type(leaf, iref,
BTRFS_REF_TYPE_ANY);
if (type == BTRFS_REF_TYPE_INVALID)
- return -EINVAL;
+ return -EUCLEAN;
offset = btrfs_extent_inline_ref_offset(leaf, iref);
@@ -1793,7 +1793,7 @@ static int get_extent_inline_ref(unsigned long *ptr,
*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
BTRFS_REF_TYPE_ANY);
if (*out_type == BTRFS_REF_TYPE_INVALID)
- return -EINVAL;
+ return -EUCLEAN;
*ptr += btrfs_extent_inline_ref_size(*out_type);
WARN_ON(*ptr > end);
@@ -2225,7 +2225,7 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
fspath = init_data_container(total_bytes);
if (IS_ERR(fspath))
- return (void *)fspath;
+ return ERR_CAST(fspath);
ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
if (!ifp) {
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 7e075343daa5..1343ac57b438 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -178,7 +178,7 @@ struct btrfs_inode {
struct btrfs_delayed_node *delayed_node;
/* File creation time. */
- struct timespec i_otime;
+ struct timespec64 i_otime;
/* Hook into fs_info->delayed_iputs */
struct list_head delayed_iput;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index a3fdb4fe967d..833cf3c35b4d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1539,7 +1539,12 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
}
device = multi->stripes[0].dev;
- block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev->bd_dev);
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
+ !device->bdev || !device->name)
+ block_ctx_out->dev = NULL;
+ else
+ block_ctx_out->dev = btrfsic_dev_state_lookup(
+ device->bdev->bd_dev);
block_ctx_out->dev_bytenr = multi->stripes[0].physical;
block_ctx_out->start = bytenr;
block_ctx_out->len = len;
@@ -1624,7 +1629,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
bio = btrfs_io_bio_alloc(num_pages - i);
bio_set_dev(bio, block_ctx->dev->bdev);
bio->bi_iter.bi_sector = dev_bytenr >> 9;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j],
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d3e447b45bf7..9bfa66592aa7 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -5,7 +5,6 @@
#include <linux/kernel.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
@@ -14,10 +13,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
-#include <linux/mpage.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
-#include <linux/bit_spinlock.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
#include <linux/log2.h>
@@ -303,7 +299,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct bio *bio = NULL;
struct compressed_bio *cb;
unsigned long bytes_left;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
int pg_index = 0;
struct page *page;
u64 first_byte = disk_start;
@@ -342,9 +337,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_iter.bi_size)
- submit = io_tree->ops->merge_bio_hook(page, 0,
- PAGE_SIZE,
- bio, 0);
+ submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0);
page->mapping = NULL;
if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
@@ -613,7 +606,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->len = bio->bi_iter.bi_size;
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
- bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
+ comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
refcount_set(&cb->pending_bios, 1);
@@ -626,9 +619,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->index = em_start >> PAGE_SHIFT;
if (comp_bio->bi_iter.bi_size)
- submit = tree->ops->merge_bio_hook(page, 0,
- PAGE_SIZE,
- comp_bio, 0);
+ submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE,
+ comp_bio, 0);
page->mapping = NULL;
if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
@@ -660,7 +652,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
- bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
+ comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 4bc326df472e..d436fb4c002e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -888,11 +888,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
btrfs_root_last_snapshot(&root->root_item) ||
btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
return 1;
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
- btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
- return 1;
-#endif
+
return 0;
}
@@ -3128,8 +3124,7 @@ again:
* higher levels
*
*/
-static void fixup_low_keys(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+static void fixup_low_keys(struct btrfs_path *path,
struct btrfs_disk_key *key, int level)
{
int i;
@@ -3181,7 +3176,7 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
btrfs_set_item_key(eb, &disk_key, slot);
btrfs_mark_buffer_dirty(eb);
if (slot == 0)
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
/*
@@ -3359,17 +3354,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
root_add_used(root, fs_info->nodesize);
- memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
btrfs_set_header_nritems(c, 1);
- btrfs_set_header_level(c, level);
- btrfs_set_header_bytenr(c, c->start);
- btrfs_set_header_generation(c, trans->transid);
- btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(c, root->root_key.objectid);
-
- write_extent_buffer_fsid(c, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
-
btrfs_set_node_key(c, &lower_key, 0);
btrfs_set_node_blockptr(c, 0, lower->start);
lower_gen = btrfs_header_generation(lower);
@@ -3498,15 +3483,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
return PTR_ERR(split);
root_add_used(root, fs_info->nodesize);
-
- memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
- btrfs_set_header_level(split, btrfs_header_level(c));
- btrfs_set_header_bytenr(split, split->start);
- btrfs_set_header_generation(split, trans->transid);
- btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(split, root->root_key.objectid);
- write_extent_buffer_fsid(split, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
+ ASSERT(btrfs_header_level(c) == level);
ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
if (ret) {
@@ -3945,7 +3922,7 @@ static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
clean_tree_block(fs_info, right);
btrfs_item_key(right, &disk_key, 0);
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
@@ -4292,15 +4269,6 @@ again:
root_add_used(root, fs_info->nodesize);
- memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
- btrfs_set_header_bytenr(right, right->start);
- btrfs_set_header_generation(right, trans->transid);
- btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(right, root->root_key.objectid);
- btrfs_set_header_level(right, 0);
- write_extent_buffer_fsid(right, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
-
if (split == 0) {
if (mid <= slot) {
btrfs_set_header_nritems(right, 0);
@@ -4320,7 +4288,7 @@ again:
path->nodes[0] = right;
path->slots[0] = 0;
if (path->slots[1] == 0)
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
/*
* We create a new leaf 'right' for the required ins_len and
@@ -4642,7 +4610,7 @@ void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
item = btrfs_item_nr(slot);
@@ -4744,7 +4712,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
if (path->slots[0] == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
@@ -4886,7 +4854,6 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
int level, int slot)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *parent = path->nodes[level];
u32 nritems;
int ret;
@@ -4919,7 +4886,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
- fixup_low_keys(fs_info, path, &disk_key, level + 1);
+ fixup_low_keys(path, &disk_key, level + 1);
}
btrfs_mark_buffer_dirty(parent);
}
@@ -5022,7 +4989,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
- fixup_low_keys(fs_info, path, &disk_key, 1);
+ fixup_low_keys(path, &disk_key, 1);
}
/* delete the leaf if it is mostly empty */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 118346aceea9..53af9f5253f4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -55,8 +55,6 @@ struct btrfs_ordered_sum;
#define BTRFS_OLDEST_GENERATION 0ULL
-#define BTRFS_COMPAT_EXTENT_TREE_V0
-
/*
* the max metadata block size. This limit is somewhat artificial,
* but the memmove costs go through the roof for larger blocks.
@@ -86,6 +84,14 @@ static const int btrfs_csum_sizes[] = { 4 };
#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
+/*
+ * Use large batch size to reduce overhead of metadata updates. On the reader
+ * side, we only read it when we are close to ENOSPC and the read overhead is
+ * mostly related to the number of CPUs, so it is OK to use arbitrary large
+ * value here.
+ */
+#define BTRFS_TOTAL_BYTES_PINNED_BATCH SZ_128M
+
#define BTRFS_MAX_EXTENT_SIZE SZ_128M
@@ -342,8 +348,8 @@ struct btrfs_path {
sizeof(struct btrfs_item))
struct btrfs_dev_replace {
u64 replace_state; /* see #define above */
- u64 time_started; /* seconds since 1-Jan-1970 */
- u64 time_stopped; /* seconds since 1-Jan-1970 */
+ time64_t time_started; /* seconds since 1-Jan-1970 */
+ time64_t time_stopped; /* seconds since 1-Jan-1970 */
atomic64_t num_write_errors;
atomic64_t num_uncorrectable_read_errors;
@@ -359,8 +365,6 @@ struct btrfs_dev_replace {
struct btrfs_device *srcdev;
struct btrfs_device *tgtdev;
- pid_t lock_owner;
- atomic_t nesting_level;
struct mutex lock_finishing_cancel_unmount;
rwlock_t lock;
atomic_t read_locks;
@@ -1213,7 +1217,6 @@ struct btrfs_root {
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
- char *name;
/* the dirty list is only used by non-reference counted roots */
struct list_head dirty_list;
@@ -2428,32 +2431,6 @@ static inline u32 btrfs_file_extent_inline_item_len(
return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
-/* this returns the number of file bytes represented by the inline item.
- * If an item is compressed, this is the uncompressed size
- */
-static inline u32 btrfs_file_extent_inline_len(const struct extent_buffer *eb,
- int slot,
- const struct btrfs_file_extent_item *fi)
-{
- struct btrfs_map_token token;
-
- btrfs_init_map_token(&token);
- /*
- * return the space used on disk if this item isn't
- * compressed or encoded
- */
- if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 &&
- btrfs_token_file_extent_encryption(eb, fi, &token) == 0 &&
- btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) {
- return btrfs_file_extent_inline_item_len(eb,
- btrfs_item_nr(slot));
- }
-
- /* otherwise use the ram bytes field */
- return btrfs_token_file_extent_ram_bytes(eb, fi, &token);
-}
-
-
/* btrfs_dev_stats_item */
static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
const struct btrfs_dev_stats_item *ptr,
@@ -2676,7 +2653,6 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins);
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
@@ -2716,15 +2692,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytes_used,
- u64 type, u64 chunk_offset, u64 size);
+ u64 bytes_used, u64 type, u64 chunk_offset,
+ u64 size);
void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 group_start,
- struct extent_map *em);
+ u64 group_start, struct extent_map *em);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
@@ -2786,7 +2761,6 @@ void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type);
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
-void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
@@ -2803,8 +2777,7 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
-int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache);
+int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
@@ -2812,8 +2785,7 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes);
-int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
@@ -2822,10 +2794,10 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
int btrfs_start_write_no_snapshotting(struct btrfs_root *root);
void btrfs_end_write_no_snapshotting(struct btrfs_root *root);
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
-void check_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, const u64 type);
+void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
u64 start, u64 end);
+void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
@@ -3011,16 +2983,14 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
-int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
- const char *name, int name_len);
-int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
- const char *name, int name_len);
+int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 sequence, const char *name,
+ int name_len);
+int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 *sequence, const char *name,
+ int name_len);
int btrfs_del_root(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, const struct btrfs_key *key);
+ const struct btrfs_key *key);
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key,
struct btrfs_root_item *item);
@@ -3196,7 +3166,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags);
-void btrfs_set_range_writeback(void *private_data, u64 start, u64 end);
+void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end);
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
@@ -3247,8 +3217,9 @@ void btrfs_get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space);
void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_balance_args *bargs);
-ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
- struct file *dst_file, u64 dst_loff);
+int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
+ struct file *dst_file, loff_t dst_loff,
+ u64 olen);
/* file.c */
int __init btrfs_auto_defrag_init(void);
@@ -3452,7 +3423,7 @@ do { \
#ifdef CONFIG_BTRFS_ASSERT
__cold
-static inline void assfail(char *expr, char *file, int line)
+static inline void assfail(const char *expr, const char *file, int line)
{
pr_err("assertion failed: %s, file: %s, line: %d\n",
expr, file, line);
@@ -3465,6 +3436,13 @@ static inline void assfail(char *expr, char *file, int line)
#define ASSERT(expr) ((void)0)
#endif
+__cold
+static inline void btrfs_print_v0_err(struct btrfs_fs_info *fs_info)
+{
+ btrfs_err(fs_info,
+"Unsupported V0 extent filesystem detected. Aborting. Please re-create your filesystem with a newer kernel");
+}
+
__printf(5, 6)
__cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index fe6caa7e698b..f51b509f2d9b 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1222,7 +1222,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
@@ -1418,7 +1418,6 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
const char *name, int name_len,
struct btrfs_inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
@@ -1458,11 +1457,10 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
*/
BUG_ON(ret);
-
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (unlikely(ret)) {
- btrfs_err(fs_info,
+ btrfs_err(trans->fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
name_len, name, delayed_node->root->objectid,
delayed_node->inode_id, ret);
@@ -1495,7 +1493,6 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
}
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_inode *dir, u64 index)
{
struct btrfs_delayed_node *node;
@@ -1511,7 +1508,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
item_key.type = BTRFS_DIR_INDEX_KEY;
item_key.offset = index;
- ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
+ ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
+ &item_key);
if (!ret)
goto end;
@@ -1533,7 +1531,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
if (unlikely(ret)) {
- btrfs_err(fs_info,
+ btrfs_err(trans->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
BUG();
@@ -1837,7 +1835,7 @@ release_node:
int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_delayed_node *delayed_node;
/*
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index ca7a97f3ab6b..33536cd681d4 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -86,14 +86,12 @@ static inline void btrfs_init_delayed_root(
}
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
const char *name, int name_len,
struct btrfs_inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_inode *dir, u64 index);
int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 03dec673d12a..62ff545ba1f7 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -709,13 +709,13 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
* to make sure the delayed ref is eventually processed before this
* transaction commits.
*/
-int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op,
int *old_ref_mod, int *new_ref_mod)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_tree_ref *ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -730,27 +730,33 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
if (!ref)
return -ENOMEM;
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+ if (!head_ref) {
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ return -ENOMEM;
+ }
+
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+ is_fstree(ref_root)) {
+ record = kmalloc(sizeof(*record), GFP_NOFS);
+ if (!record) {
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ return -ENOMEM;
+ }
+ }
+
if (parent)
ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
else
ref_type = BTRFS_TREE_BLOCK_REF_KEY;
+
init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
ref_root, action, ref_type);
ref->root = ref_root;
ref->parent = parent;
ref->level = level;
- head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
- if (!head_ref)
- goto free_ref;
-
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
- is_fstree(ref_root)) {
- record = kmalloc(sizeof(*record), GFP_NOFS);
- if (!record)
- goto free_head_ref;
- }
-
init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
ref_root, 0, action, false, is_system);
head_ref->extent_op = extent_op;
@@ -779,25 +785,18 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
btrfs_qgroup_trace_extent_post(fs_info, record);
return 0;
-
-free_head_ref:
- kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
-free_ref:
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
-
- return -ENOMEM;
}
/*
* add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
*/
-int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action,
int *old_ref_mod, int *new_ref_mod)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_data_ref *ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index ea1aecb6a50d..d9f2a4ebd5db 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -234,14 +234,12 @@ static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *hea
kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
}
-int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
u64 ref_root, int level, int action,
struct btrfs_delayed_extent_op *extent_op,
int *old_ref_mod, int *new_ref_mod);
-int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index e2ba0419297a..dec01970d8c5 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -6,14 +6,9 @@
#include <linux/sched.h>
#include <linux/bio.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/blkdev.h>
-#include <linux/random.h>
-#include <linux/iocontext.h>
-#include <linux/capability.h>
#include <linux/kthread.h>
#include <linux/math64.h>
-#include <asm/div64.h>
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
@@ -465,7 +460,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
* go to the tgtdev as well (refer to btrfs_map_block()).
*/
dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
- dev_replace->time_started = get_seconds();
+ dev_replace->time_started = ktime_get_real_seconds();
dev_replace->cursor_left = 0;
dev_replace->committed_cursor_left = 0;
dev_replace->cursor_left_last_write_of_item = 0;
@@ -511,7 +506,7 @@ leave:
dev_replace->srcdev = NULL;
dev_replace->tgtdev = NULL;
btrfs_dev_replace_write_unlock(dev_replace);
- btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ btrfs_destroy_dev_replace_tgtdev(tgt_device);
return ret;
}
@@ -618,7 +613,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
: BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED;
dev_replace->tgtdev = NULL;
dev_replace->srcdev = NULL;
- dev_replace->time_stopped = get_seconds();
+ dev_replace->time_stopped = ktime_get_real_seconds();
dev_replace->item_needs_writeback = 1;
/* replace old device with new one in mapping tree */
@@ -637,7 +632,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
btrfs_rm_dev_replace_blocked(fs_info);
if (tgt_device)
- btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ btrfs_destroy_dev_replace_tgtdev(tgt_device);
btrfs_rm_dev_replace_unblocked(fs_info);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
@@ -663,7 +658,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
tgt_device->commit_total_bytes = src_device->commit_total_bytes;
tgt_device->commit_bytes_used = src_device->bytes_used;
- btrfs_assign_next_active_device(fs_info, src_device, tgt_device);
+ btrfs_assign_next_active_device(src_device, tgt_device);
list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
fs_info->fs_devices->rw_devices++;
@@ -672,11 +667,17 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
btrfs_rm_dev_replace_blocked(fs_info);
- btrfs_rm_dev_replace_remove_srcdev(fs_info, src_device);
+ btrfs_rm_dev_replace_remove_srcdev(src_device);
btrfs_rm_dev_replace_unblocked(fs_info);
/*
+ * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
+ * update on-disk dev stats value during commit transaction
+ */
+ atomic_inc(&tgt_device->dev_stats_ccnt);
+
+ /*
* this is again a consistent state where no dev_replace procedure
* is running, the target device is part of the filesystem, the
* source device is not part of the filesystem anymore and its 1st
@@ -807,7 +808,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
break;
}
dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
- dev_replace->time_stopped = get_seconds();
+ dev_replace->time_stopped = ktime_get_real_seconds();
dev_replace->item_needs_writeback = 1;
btrfs_dev_replace_write_unlock(dev_replace);
btrfs_scrub_cancel(fs_info);
@@ -826,7 +827,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
btrfs_dev_name(tgt_device));
if (tgt_device)
- btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ btrfs_destroy_dev_replace_tgtdev(tgt_device);
leave:
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
@@ -848,7 +849,7 @@ void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info)
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
dev_replace->replace_state =
BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
- dev_replace->time_stopped = get_seconds();
+ dev_replace->time_stopped = ktime_get_real_seconds();
dev_replace->item_needs_writeback = 1;
btrfs_info(fs_info, "suspending dev_replace for unmount");
break;
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 39e9766d1cbd..a678b07fcf01 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -160,8 +160,8 @@ second_insert:
}
btrfs_release_path(path);
- ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
- name_len, dir, &disk_key, type, index);
+ ret2 = btrfs_insert_delayed_dir_index(trans, name, name_len, dir,
+ &disk_key, type, index);
out_free:
btrfs_free_path(path);
if (ret)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 205092dc9390..5124c15705ce 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -5,8 +5,6 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
-#include <linux/scatterlist.h>
-#include <linux/swap.h>
#include <linux/radix-tree.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>
@@ -54,7 +52,6 @@
static const struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
-static void free_fs_root(struct btrfs_root *root);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info);
@@ -108,12 +105,9 @@ void __cold btrfs_end_io_wq_exit(void)
*/
struct async_submit_bio {
void *private_data;
- struct btrfs_fs_info *fs_info;
struct bio *bio;
extent_submit_bio_start_t *submit_bio_start;
- extent_submit_bio_done_t *submit_bio_done;
int mirror_num;
- unsigned long bio_flags;
/*
* bio_offset is optional, can be used if the pages in the bio
* can't tell us where in the file the bio should go
@@ -212,7 +206,7 @@ struct extent_map *btree_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
int ret;
@@ -615,8 +609,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
found_start = btrfs_header_bytenr(eb);
if (found_start != eb->start) {
- btrfs_err_rl(fs_info, "bad tree block start %llu %llu",
- found_start, eb->start);
+ btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
+ eb->start, found_start);
ret = -EIO;
goto err;
}
@@ -628,8 +622,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
}
found_level = btrfs_header_level(eb);
if (found_level >= BTRFS_MAX_LEVEL) {
- btrfs_err(fs_info, "bad tree block level %d",
- (int)btrfs_header_level(eb));
+ btrfs_err(fs_info, "bad tree block level %d on %llu",
+ (int)btrfs_header_level(eb), eb->start);
ret = -EIO;
goto err;
}
@@ -779,7 +773,7 @@ static void run_one_async_done(struct btrfs_work *work)
return;
}
- async->submit_bio_done(async->private_data, async->bio, async->mirror_num);
+ btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num);
}
static void run_one_async_free(struct btrfs_work *work)
@@ -793,8 +787,7 @@ static void run_one_async_free(struct btrfs_work *work)
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset, void *private_data,
- extent_submit_bio_start_t *submit_bio_start,
- extent_submit_bio_done_t *submit_bio_done)
+ extent_submit_bio_start_t *submit_bio_start)
{
struct async_submit_bio *async;
@@ -803,16 +796,13 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
return BLK_STS_RESOURCE;
async->private_data = private_data;
- async->fs_info = fs_info;
async->bio = bio;
async->mirror_num = mirror_num;
async->submit_bio_start = submit_bio_start;
- async->submit_bio_done = submit_bio_done;
btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
run_one_async_done, run_one_async_free);
- async->bio_flags = bio_flags;
async->bio_offset = bio_offset;
async->status = 0;
@@ -851,24 +841,6 @@ static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
return btree_csum_one_bio(bio);
}
-static blk_status_t btree_submit_bio_done(void *private_data, struct bio *bio,
- int mirror_num)
-{
- struct inode *inode = private_data;
- blk_status_t ret;
-
- /*
- * when we're called for a write, we're already in the async
- * submission context. Just jump into btrfs_map_bio
- */
- ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
- if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
- }
- return ret;
-}
-
static int check_async_write(struct btrfs_inode *bi)
{
if (atomic_read(&bi->sync_writers))
@@ -911,8 +883,7 @@ static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
*/
ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
bio_offset, private_data,
- btree_submit_bio_start,
- btree_submit_bio_done);
+ btree_submit_bio_start);
}
if (ret)
@@ -961,8 +932,9 @@ static int btree_writepages(struct address_space *mapping,
fs_info = BTRFS_I(mapping->host)->root->fs_info;
/* this is a bit racy, but that's ok */
- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH);
+ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH,
+ fs_info->dirty_metadata_batch);
if (ret < 0)
return 0;
}
@@ -1181,7 +1153,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->highest_objectid = 0;
root->nr_delalloc_inodes = 0;
root->nr_ordered_extents = 0;
- root->name = NULL;
root->inode_tree = RB_ROOT;
INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
root->block_rsv = NULL;
@@ -1292,15 +1263,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
goto fail;
}
- memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
- btrfs_set_header_bytenr(leaf, leaf->start);
- btrfs_set_header_generation(leaf, trans->transid);
- btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(leaf, objectid);
root->node = leaf;
-
- write_extent_buffer_fsid(leaf, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
btrfs_mark_buffer_dirty(leaf);
root->commit_root = btrfs_root_node(root);
@@ -1374,14 +1337,8 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
return ERR_CAST(leaf);
}
- memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
- btrfs_set_header_bytenr(leaf, leaf->start);
- btrfs_set_header_generation(leaf, trans->transid);
- btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
root->node = leaf;
- write_extent_buffer_fsid(root->node, fs_info->fsid);
btrfs_mark_buffer_dirty(root->node);
btrfs_tree_unlock(root->node);
return root;
@@ -1546,7 +1503,7 @@ int btrfs_init_fs_root(struct btrfs_root *root)
return 0;
fail:
- /* the caller is responsible to call free_fs_root */
+ /* The caller is responsible to call btrfs_free_fs_root */
return ret;
}
@@ -1651,14 +1608,14 @@ again:
ret = btrfs_insert_fs_root(fs_info, root);
if (ret) {
if (ret == -EEXIST) {
- free_fs_root(root);
+ btrfs_free_fs_root(root);
goto again;
}
goto fail;
}
return root;
fail:
- free_fs_root(root);
+ btrfs_free_fs_root(root);
return ERR_PTR(ret);
}
@@ -1803,7 +1760,7 @@ static int transaction_kthread(void *arg)
struct btrfs_trans_handle *trans;
struct btrfs_transaction *cur;
u64 transid;
- unsigned long now;
+ time64_t now;
unsigned long delay;
bool cannot_commit;
@@ -1819,7 +1776,7 @@ static int transaction_kthread(void *arg)
goto sleep;
}
- now = get_seconds();
+ now = ktime_get_seconds();
if (cur->state < TRANS_STATE_BLOCKED &&
!test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
(now < cur->start_time ||
@@ -2196,8 +2153,6 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
{
- fs_info->dev_replace.lock_owner = 0;
- atomic_set(&fs_info->dev_replace.nesting_level, 0);
mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
rwlock_init(&fs_info->dev_replace.lock);
atomic_set(&fs_info->dev_replace.read_locks, 0);
@@ -3075,6 +3030,13 @@ retry_root_backup:
fs_info->generation = generation;
fs_info->last_trans_committed = generation;
+ ret = btrfs_verify_dev_extents(fs_info);
+ if (ret) {
+ btrfs_err(fs_info,
+ "failed to verify dev extents against chunks: %d",
+ ret);
+ goto fail_block_groups;
+ }
ret = btrfs_recover_balance(fs_info);
if (ret) {
btrfs_err(fs_info, "failed to recover balance: %d", ret);
@@ -3875,10 +3837,10 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
__btrfs_remove_free_space_cache(root->free_ino_pinned);
if (root->free_ino_ctl)
__btrfs_remove_free_space_cache(root->free_ino_ctl);
- free_fs_root(root);
+ btrfs_free_fs_root(root);
}
-static void free_fs_root(struct btrfs_root *root)
+void btrfs_free_fs_root(struct btrfs_root *root)
{
iput(root->ino_cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
@@ -3890,15 +3852,9 @@ static void free_fs_root(struct btrfs_root *root)
free_extent_buffer(root->commit_root);
kfree(root->free_ino_ctl);
kfree(root->free_ino_pinned);
- kfree(root->name);
btrfs_put_fs_root(root);
}
-void btrfs_free_fs_root(struct btrfs_root *root)
-{
- free_fs_root(root);
-}
-
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
{
u64 root_objectid = 0;
@@ -4104,10 +4060,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/*
* This is a fast path so only do this check if we have sanity tests
- * enabled. Normal people shouldn't be marking dummy buffers as dirty
+ * enabled. Normal people shouldn't be using umapped buffers as dirty
* outside of the sanity tests.
*/
- if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
+ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
return;
#endif
root = BTRFS_I(buf->pages[0]->mapping->host)->root;
@@ -4150,8 +4106,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
if (flush_delayed)
btrfs_balance_delayed_items(fs_info);
- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH);
+ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH,
+ fs_info->dirty_metadata_batch);
if (ret > 0) {
balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
}
@@ -4563,21 +4520,11 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
return 0;
}
-static struct btrfs_fs_info *btree_fs_info(void *private_data)
-{
- struct inode *inode = private_data;
- return btrfs_sb(inode->i_sb);
-}
-
static const struct extent_io_ops btree_extent_io_ops = {
/* mandatory callbacks */
.submit_bio_hook = btree_submit_bio_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
- /* note we're sharing with inode.c for the merge bio hook */
- .merge_bio_hook = btrfs_merge_bio_hook,
.readpage_io_failed_hook = btree_io_failed_hook,
- .set_range_writeback = btrfs_set_range_writeback,
- .tree_fs_info = btree_fs_info,
/* optional callbacks */
};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 1a3d277b027b..4cccba22640f 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -120,8 +120,9 @@ blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset, void *private_data,
- extent_submit_bio_start_t *submit_bio_start,
- extent_submit_bio_done_t *submit_bio_done);
+ extent_submit_bio_start_t *submit_bio_start);
+blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
+ int mirror_num);
int btrfs_write_tree_block(struct extent_buffer *buf);
void btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3d9fe58c0080..de6f75f5547b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -52,24 +52,21 @@ enum {
};
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_node *node, u64 parent,
- u64 root_objectid, u64 owner_objectid,
- u64 owner_offset, int refs_to_drop,
- struct btrfs_delayed_extent_op *extra_op);
+ struct btrfs_delayed_ref_node *node, u64 parent,
+ u64 root_objectid, u64 owner_objectid,
+ u64 owner_offset, int refs_to_drop,
+ struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op);
-static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 flags,
+static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
int force);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
@@ -220,9 +217,9 @@ static int add_excluded_extent(struct btrfs_fs_info *fs_info,
return 0;
}
-static void free_excluded_extents(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache)
+static void free_excluded_extents(struct btrfs_block_group_cache *cache)
{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
u64 start, end;
start = cache->key.objectid;
@@ -234,9 +231,9 @@ static void free_excluded_extents(struct btrfs_fs_info *fs_info,
start, end, EXTENT_UPTODATE);
}
-static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache)
+static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
u64 bytenr;
u64 *logical;
int stripe_len;
@@ -558,7 +555,7 @@ static noinline void caching_thread(struct btrfs_work *work)
caching_ctl->progress = (u64)-1;
up_read(&fs_info->commit_root_sem);
- free_excluded_extents(fs_info, block_group);
+ free_excluded_extents(block_group);
mutex_unlock(&caching_ctl->mutex);
wake_up(&caching_ctl->wait);
@@ -666,7 +663,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
wake_up(&caching_ctl->wait);
if (ret == 1) {
put_caching_control(caching_ctl);
- free_excluded_extents(fs_info, cache);
+ free_excluded_extents(cache);
return 0;
}
} else {
@@ -758,7 +755,8 @@ static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
space_info = __find_space_info(fs_info, flags);
ASSERT(space_info);
- percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned, num_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
}
/*
@@ -870,18 +868,16 @@ search_again:
num_refs = btrfs_extent_refs(leaf, ei);
extent_flags = btrfs_extent_flags(leaf, ei);
} else {
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- struct btrfs_extent_item_v0 *ei0;
- BUG_ON(item_size != sizeof(*ei0));
- ei0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_item_v0);
- num_refs = btrfs_extent_refs_v0(leaf, ei0);
- /* FIXME: this isn't correct for data */
- extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
-#else
- BUG();
-#endif
+ ret = -EINVAL;
+ btrfs_print_v0_err(fs_info);
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(fs_info, ret, NULL);
+
+ goto out_free;
}
+
BUG_ON(num_refs == 0);
} else {
num_refs = 0;
@@ -1039,89 +1035,6 @@ out_free:
* tree block info structure.
*/
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
-static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
- u64 owner, u32 extra_size)
-{
- struct btrfs_root *root = fs_info->extent_root;
- struct btrfs_extent_item *item;
- struct btrfs_extent_item_v0 *ei0;
- struct btrfs_extent_ref_v0 *ref0;
- struct btrfs_tree_block_info *bi;
- struct extent_buffer *leaf;
- struct btrfs_key key;
- struct btrfs_key found_key;
- u32 new_size = sizeof(*item);
- u64 refs;
- int ret;
-
- leaf = path->nodes[0];
- BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
-
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- ei0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_item_v0);
- refs = btrfs_extent_refs_v0(leaf, ei0);
-
- if (owner == (u64)-1) {
- while (1) {
- if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- return ret;
- BUG_ON(ret > 0); /* Corruption */
- leaf = path->nodes[0];
- }
- btrfs_item_key_to_cpu(leaf, &found_key,
- path->slots[0]);
- BUG_ON(key.objectid != found_key.objectid);
- if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
- path->slots[0]++;
- continue;
- }
- ref0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref_v0);
- owner = btrfs_ref_objectid_v0(leaf, ref0);
- break;
- }
- }
- btrfs_release_path(path);
-
- if (owner < BTRFS_FIRST_FREE_OBJECTID)
- new_size += sizeof(*bi);
-
- new_size -= sizeof(*ei0);
- ret = btrfs_search_slot(trans, root, &key, path,
- new_size + extra_size, 1);
- if (ret < 0)
- return ret;
- BUG_ON(ret); /* Corruption */
-
- btrfs_extend_item(fs_info, path, new_size);
-
- leaf = path->nodes[0];
- item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
- btrfs_set_extent_refs(leaf, item, refs);
- /* FIXME: get real generation */
- btrfs_set_extent_generation(leaf, item, 0);
- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- btrfs_set_extent_flags(leaf, item,
- BTRFS_EXTENT_FLAG_TREE_BLOCK |
- BTRFS_BLOCK_FLAG_FULL_BACKREF);
- bi = (struct btrfs_tree_block_info *)(item + 1);
- /* FIXME: get first key of the block */
- memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
- btrfs_set_tree_block_level(leaf, bi, (int)owner);
- } else {
- btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
- }
- btrfs_mark_buffer_dirty(leaf);
- return 0;
-}
-#endif
-
/*
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
* is_data == BTRFS_REF_TYPE_DATA, data type is requried,
@@ -1216,13 +1129,12 @@ static int match_extent_data_ref(struct extent_buffer *leaf,
}
static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid,
u64 owner, u64 offset)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = trans->fs_info->extent_root;
struct btrfs_key key;
struct btrfs_extent_data_ref *ref;
struct extent_buffer *leaf;
@@ -1251,17 +1163,6 @@ again:
if (parent) {
if (!ret)
return 0;
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- key.type = BTRFS_EXTENT_REF_V0_KEY;
- btrfs_release_path(path);
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0) {
- err = ret;
- goto fail;
- }
- if (!ret)
- return 0;
-#endif
goto fail;
}
@@ -1304,13 +1205,12 @@ fail:
}
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid, u64 owner,
u64 offset, int refs_to_add)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = trans->fs_info->extent_root;
struct btrfs_key key;
struct extent_buffer *leaf;
u32 size;
@@ -1384,7 +1284,6 @@ fail:
}
static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int refs_to_drop, int *last_ref)
{
@@ -1406,13 +1305,10 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
- struct btrfs_extent_ref_v0 *ref0;
- ref0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref_v0);
- num_refs = btrfs_ref_count_v0(leaf, ref0);
-#endif
+ } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+ btrfs_print_v0_err(trans->fs_info);
+ btrfs_abort_transaction(trans, -EINVAL);
+ return -EINVAL;
} else {
BUG();
}
@@ -1421,21 +1317,13 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
num_refs -= refs_to_drop;
if (num_refs == 0) {
- ret = btrfs_del_item(trans, fs_info->extent_root, path);
+ ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
*last_ref = 1;
} else {
if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- else {
- struct btrfs_extent_ref_v0 *ref0;
- ref0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref_v0);
- btrfs_set_ref_count_v0(leaf, ref0, num_refs);
- }
-#endif
btrfs_mark_buffer_dirty(leaf);
}
return ret;
@@ -1453,6 +1341,8 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+ BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
if (iref) {
/*
* If type is invalid, we should have bailed out earlier than
@@ -1475,13 +1365,6 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
- struct btrfs_extent_ref_v0 *ref0;
- ref0 = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_extent_ref_v0);
- num_refs = btrfs_ref_count_v0(leaf, ref0);
-#endif
} else {
WARN_ON(1);
}
@@ -1489,12 +1372,11 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
}
static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
{
- struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_root *root = trans->fs_info->extent_root;
struct btrfs_key key;
int ret;
@@ -1510,20 +1392,10 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -ENOENT;
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (ret == -ENOENT && parent) {
- btrfs_release_path(path);
- key.type = BTRFS_EXTENT_REF_V0_KEY;
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret > 0)
- ret = -ENOENT;
- }
-#endif
return ret;
}
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent,
u64 root_objectid)
@@ -1540,7 +1412,7 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
key.offset = root_objectid;
}
- ret = btrfs_insert_empty_item(trans, fs_info->extent_root,
+ ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root,
path, &key, 0);
btrfs_release_path(path);
return ret;
@@ -1599,13 +1471,13 @@ static int find_next_key(struct btrfs_path *path, int level,
*/
static noinline_for_stack
int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int insert)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_key key;
struct extent_buffer *leaf;
@@ -1635,8 +1507,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
extra_size = -1;
/*
- * Owner is our parent level, so we can just add one to get the level
- * for the block we are interested in.
+ * Owner is our level, so we can just add one to get the level for the
+ * block we are interested in.
*/
if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
key.type = BTRFS_METADATA_ITEM_KEY;
@@ -1684,23 +1556,12 @@ again:
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (item_size < sizeof(*ei)) {
- if (!insert) {
- err = -ENOENT;
- goto out;
- }
- ret = convert_extent_item_v0(trans, fs_info, path, owner,
- extra_size);
- if (ret < 0) {
- err = ret;
- goto out;
- }
- leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ if (unlikely(item_size < sizeof(*ei))) {
+ err = -EINVAL;
+ btrfs_print_v0_err(fs_info);
+ btrfs_abort_transaction(trans, err);
+ goto out;
}
-#endif
- BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
flags = btrfs_extent_flags(leaf, ei);
@@ -1727,7 +1588,7 @@ again:
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
if (type == BTRFS_REF_TYPE_INVALID) {
- err = -EINVAL;
+ err = -EUCLEAN;
goto out;
}
@@ -1863,7 +1724,6 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref **ref_ret,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -1871,9 +1731,9 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
{
int ret;
- ret = lookup_inline_extent_backref(trans, fs_info, path, ref_ret,
- bytenr, num_bytes, parent,
- root_objectid, owner, offset, 0);
+ ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
+ num_bytes, parent, root_objectid,
+ owner, offset, 0);
if (ret != -ENOENT)
return ret;
@@ -1881,12 +1741,11 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
*ref_ret = NULL;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- ret = lookup_tree_block_ref(trans, fs_info, path, bytenr,
- parent, root_objectid);
+ ret = lookup_tree_block_ref(trans, path, bytenr, parent,
+ root_objectid);
} else {
- ret = lookup_extent_data_ref(trans, fs_info, path, bytenr,
- parent, root_objectid, owner,
- offset);
+ ret = lookup_extent_data_ref(trans, path, bytenr, parent,
+ root_objectid, owner, offset);
}
return ret;
}
@@ -1895,14 +1754,14 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
* helper to update/remove inline back ref
*/
static noinline_for_stack
-void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
- struct btrfs_path *path,
+void update_inline_extent_backref(struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
struct btrfs_delayed_extent_op *extent_op,
int *last_ref)
{
- struct extent_buffer *leaf;
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei;
struct btrfs_extent_data_ref *dref = NULL;
struct btrfs_shared_data_ref *sref = NULL;
@@ -1913,7 +1772,6 @@ void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
int type;
u64 refs;
- leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
@@ -1965,7 +1823,6 @@ void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
static noinline_for_stack
int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner,
@@ -1975,15 +1832,15 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_extent_inline_ref *iref;
int ret;
- ret = lookup_inline_extent_backref(trans, fs_info, path, &iref,
- bytenr, num_bytes, parent,
- root_objectid, owner, offset, 1);
+ ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
+ num_bytes, parent, root_objectid,
+ owner, offset, 1);
if (ret == 0) {
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
- update_inline_extent_backref(fs_info, path, iref,
- refs_to_add, extent_op, NULL);
+ update_inline_extent_backref(path, iref, refs_to_add,
+ extent_op, NULL);
} else if (ret == -ENOENT) {
- setup_inline_extent_backref(fs_info, path, iref, parent,
+ setup_inline_extent_backref(trans->fs_info, path, iref, parent,
root_objectid, owner, offset,
refs_to_add, extent_op);
ret = 0;
@@ -1992,7 +1849,6 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
}
static int insert_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 bytenr, u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add)
@@ -2000,18 +1856,17 @@ static int insert_extent_backref(struct btrfs_trans_handle *trans,
int ret;
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
BUG_ON(refs_to_add != 1);
- ret = insert_tree_block_ref(trans, fs_info, path, bytenr,
- parent, root_objectid);
+ ret = insert_tree_block_ref(trans, path, bytenr, parent,
+ root_objectid);
} else {
- ret = insert_extent_data_ref(trans, fs_info, path, bytenr,
- parent, root_objectid,
- owner, offset, refs_to_add);
+ ret = insert_extent_data_ref(trans, path, bytenr, parent,
+ root_objectid, owner, offset,
+ refs_to_add);
}
return ret;
}
static int remove_extent_backref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_drop, int is_data, int *last_ref)
@@ -2020,14 +1875,14 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
BUG_ON(!is_data && refs_to_drop != 1);
if (iref) {
- update_inline_extent_backref(fs_info, path, iref,
- -refs_to_drop, NULL, last_ref);
+ update_inline_extent_backref(path, iref, -refs_to_drop, NULL,
+ last_ref);
} else if (is_data) {
- ret = remove_extent_data_ref(trans, fs_info, path, refs_to_drop,
+ ret = remove_extent_data_ref(trans, path, refs_to_drop,
last_ref);
} else {
*last_ref = 1;
- ret = btrfs_del_item(trans, fs_info->extent_root, path);
+ ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
}
return ret;
}
@@ -2185,13 +2040,13 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
owner, offset, BTRFS_ADD_DELAYED_REF);
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_tree_ref(trans, bytenr,
num_bytes, parent,
root_objectid, (int)owner,
BTRFS_ADD_DELAYED_REF, NULL,
&old_ref_mod, &new_ref_mod);
} else {
- ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_data_ref(trans, bytenr,
num_bytes, parent,
root_objectid, owner, offset,
0, BTRFS_ADD_DELAYED_REF,
@@ -2207,8 +2062,41 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
return ret;
}
+/*
+ * __btrfs_inc_extent_ref - insert backreference for a given extent
+ *
+ * @trans: Handle of transaction
+ *
+ * @node: The delayed ref node used to get the bytenr/length for
+ * extent whose references are incremented.
+ *
+ * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
+ * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
+ * bytenr of the parent block. Since new extents are always
+ * created with indirect references, this will only be the case
+ * when relocating a shared extent. In that case, root_objectid
+ * will be BTRFS_TREE_RELOC_OBJECTID. Otheriwse, parent must
+ * be 0
+ *
+ * @root_objectid: The id of the root where this modification has originated,
+ * this can be either one of the well-known metadata trees or
+ * the subvolume id which references this extent.
+ *
+ * @owner: For data extents it is the inode number of the owning file.
+ * For metadata extents this parameter holds the level in the
+ * tree of the extent.
+ *
+ * @offset: For metadata extents the offset is ignored and is currently
+ * always passed as 0. For data extents it is the fileoffset
+ * this extent belongs to.
+ *
+ * @refs_to_add Number of references to add
+ *
+ * @extent_op Pointer to a structure, holding information necessary when
+ * updating a tree block's flags
+ *
+ */
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
u64 parent, u64 root_objectid,
u64 owner, u64 offset, int refs_to_add,
@@ -2230,10 +2118,9 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
path->reada = READA_FORWARD;
path->leave_spinning = 1;
/* this will setup the path even if it fails to insert the back ref */
- ret = insert_inline_extent_backref(trans, fs_info, path, bytenr,
- num_bytes, parent, root_objectid,
- owner, offset,
- refs_to_add, extent_op);
+ ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
+ parent, root_objectid, owner,
+ offset, refs_to_add, extent_op);
if ((ret < 0 && ret != -EAGAIN) || !ret)
goto out;
@@ -2256,8 +2143,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
path->reada = READA_FORWARD;
path->leave_spinning = 1;
/* now insert the actual backref */
- ret = insert_extent_backref(trans, fs_info, path, bytenr, parent,
- root_objectid, owner, offset, refs_to_add);
+ ret = insert_extent_backref(trans, path, bytenr, parent, root_objectid,
+ owner, offset, refs_to_add);
if (ret)
btrfs_abort_transaction(trans, ret);
out:
@@ -2266,7 +2153,6 @@ out:
}
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2283,7 +2169,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
ref = btrfs_delayed_node_to_data_ref(node);
- trace_run_delayed_data_ref(fs_info, node, ref, node->action);
+ trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
@@ -2292,17 +2178,16 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
if (extent_op)
flags |= extent_op->flags_to_set;
- ret = alloc_reserved_file_extent(trans, fs_info,
- parent, ref_root, flags,
- ref->objectid, ref->offset,
- &ins, node->ref_mod);
+ ret = alloc_reserved_file_extent(trans, parent, ref_root,
+ flags, ref->objectid,
+ ref->offset, &ins,
+ node->ref_mod);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
- ref_root, ref->objectid,
- ref->offset, node->ref_mod,
- extent_op);
+ ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
+ ref->objectid, ref->offset,
+ node->ref_mod, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, fs_info, node, parent,
+ ret = __btrfs_free_extent(trans, node, parent,
ref_root, ref->objectid,
ref->offset, node->ref_mod,
extent_op);
@@ -2331,10 +2216,10 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
}
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_extent_op *extent_op)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_extent_item *ei;
@@ -2400,18 +2285,14 @@ again:
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (item_size < sizeof(*ei)) {
- ret = convert_extent_item_v0(trans, fs_info, path, (u64)-1, 0);
- if (ret < 0) {
- err = ret;
- goto out;
- }
- leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+
+ if (unlikely(item_size < sizeof(*ei))) {
+ err = -EINVAL;
+ btrfs_print_v0_err(fs_info);
+ btrfs_abort_transaction(trans, err);
+ goto out;
}
-#endif
- BUG_ON(item_size < sizeof(*ei));
+
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
__run_delayed_extent_op(extent_op, leaf, ei);
@@ -2422,7 +2303,6 @@ out:
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2433,14 +2313,14 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 ref_root = 0;
ref = btrfs_delayed_node_to_tree_ref(node);
- trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
+ trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
parent = ref->parent;
ref_root = ref->root;
if (node->ref_mod != 1) {
- btrfs_err(fs_info,
+ btrfs_err(trans->fs_info,
"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
@@ -2450,13 +2330,10 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, node, extent_op);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, fs_info, node,
- parent, ref_root,
- ref->level, 0, 1,
- extent_op);
+ ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
+ ref->level, 0, 1, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, fs_info, node,
- parent, ref_root,
+ ret = __btrfs_free_extent(trans, node, parent, ref_root,
ref->level, 0, 1, extent_op);
} else {
BUG();
@@ -2466,7 +2343,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
int insert_reserved)
@@ -2475,18 +2351,18 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
if (trans->aborted) {
if (insert_reserved)
- btrfs_pin_extent(fs_info, node->bytenr,
+ btrfs_pin_extent(trans->fs_info, node->bytenr,
node->num_bytes, 1);
return 0;
}
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
- ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
+ ret = run_delayed_tree_ref(trans, node, extent_op,
insert_reserved);
else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
- ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
+ ret = run_delayed_data_ref(trans, node, extent_op,
insert_reserved);
else
BUG();
@@ -2528,7 +2404,6 @@ static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_ref
}
static int cleanup_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
@@ -2542,21 +2417,22 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans,
return 0;
}
spin_unlock(&head->lock);
- ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
+ ret = run_delayed_extent_op(trans, head, extent_op);
btrfs_free_delayed_extent_op(extent_op);
return ret ? ret : 1;
}
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head)
{
+
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
delayed_refs = &trans->transaction->delayed_refs;
- ret = cleanup_extent_op(trans, fs_info, head);
+ ret = cleanup_extent_op(trans, head);
if (ret < 0) {
unselect_delayed_ref_head(delayed_refs, head);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
@@ -2598,8 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
flags = BTRFS_BLOCK_GROUP_METADATA;
space_info = __find_space_info(fs_info, flags);
ASSERT(space_info);
- percpu_counter_add(&space_info->total_bytes_pinned,
- -head->num_bytes);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned,
+ -head->num_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
if (head->is_data) {
spin_lock(&delayed_refs->lock);
@@ -2705,7 +2582,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
* up and move on to the next ref_head.
*/
if (!ref) {
- ret = cleanup_ref_head(trans, fs_info, locked_ref);
+ ret = cleanup_ref_head(trans, locked_ref);
if (ret > 0 ) {
/* We dropped our lock, we need to loop. */
ret = 0;
@@ -2752,7 +2629,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
locked_ref->extent_op = NULL;
spin_unlock(&locked_ref->lock);
- ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
+ ret = run_one_delayed_ref(trans, ref, extent_op,
must_insert_reserved);
btrfs_free_delayed_extent_op(extent_op);
@@ -3227,12 +3104,6 @@ static noinline int check_committed_ref(struct btrfs_root *root,
ret = 1;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (item_size < sizeof(*ei)) {
- WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
- goto out;
- }
-#endif
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
if (item_size != sizeof(*ei) +
@@ -4060,11 +3931,7 @@ static void update_space_info(struct btrfs_fs_info *info, u64 flags,
struct btrfs_space_info *found;
int factor;
- if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- factor = 2;
- else
- factor = 1;
+ factor = btrfs_bg_type_to_factor(flags);
found = __find_space_info(info, flags);
ASSERT(found);
@@ -4289,7 +4156,7 @@ again:
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = do_chunk_alloc(trans, fs_info, alloc_target,
+ ret = do_chunk_alloc(trans, alloc_target,
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans);
if (ret < 0) {
@@ -4309,9 +4176,10 @@ again:
* allocation, and no removed chunk in current transaction,
* don't bother committing the transaction.
*/
- have_pinned_space = percpu_counter_compare(
+ have_pinned_space = __percpu_counter_compare(
&data_sinfo->total_bytes_pinned,
- used + bytes - data_sinfo->total_bytes);
+ used + bytes - data_sinfo->total_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
spin_unlock(&data_sinfo->lock);
/* commit the current transaction and try again */
@@ -4358,7 +4226,7 @@ commit_trans:
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
- return ret;
+ return 0;
}
int btrfs_check_data_free_space(struct inode *inode,
@@ -4511,9 +4379,9 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
* for allocating a chunk, otherwise if it's false, reserve space necessary for
* removing a chunk.
*/
-void check_system_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type)
+void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_space_info *info;
u64 left;
u64 thresh;
@@ -4552,7 +4420,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
* the paths we visit in the chunk tree (they were already COWed
* or created in the current transaction for example).
*/
- ret = btrfs_alloc_chunk(trans, fs_info, flags);
+ ret = btrfs_alloc_chunk(trans, flags);
}
if (!ret) {
@@ -4573,11 +4441,13 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
* - return 1 if it successfully allocates a chunk,
* - return errors including -ENOSPC otherwise.
*/
-static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 flags, int force)
+static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
+ int force)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_space_info *space_info;
- int wait_for_alloc = 0;
+ bool wait_for_alloc = false;
+ bool should_alloc = false;
int ret = 0;
/* Don't re-enter if we're already allocating a chunk */
@@ -4587,45 +4457,44 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info = __find_space_info(fs_info, flags);
ASSERT(space_info);
-again:
- spin_lock(&space_info->lock);
- if (force < space_info->force_alloc)
- force = space_info->force_alloc;
- if (space_info->full) {
- if (should_alloc_chunk(fs_info, space_info, force))
- ret = -ENOSPC;
- else
- ret = 0;
- spin_unlock(&space_info->lock);
- return ret;
- }
-
- if (!should_alloc_chunk(fs_info, space_info, force)) {
- spin_unlock(&space_info->lock);
- return 0;
- } else if (space_info->chunk_alloc) {
- wait_for_alloc = 1;
- } else {
- space_info->chunk_alloc = 1;
- }
-
- spin_unlock(&space_info->lock);
-
- mutex_lock(&fs_info->chunk_mutex);
+ do {
+ spin_lock(&space_info->lock);
+ if (force < space_info->force_alloc)
+ force = space_info->force_alloc;
+ should_alloc = should_alloc_chunk(fs_info, space_info, force);
+ if (space_info->full) {
+ /* No more free physical space */
+ if (should_alloc)
+ ret = -ENOSPC;
+ else
+ ret = 0;
+ spin_unlock(&space_info->lock);
+ return ret;
+ } else if (!should_alloc) {
+ spin_unlock(&space_info->lock);
+ return 0;
+ } else if (space_info->chunk_alloc) {
+ /*
+ * Someone is already allocating, so we need to block
+ * until this someone is finished and then loop to
+ * recheck if we should continue with our allocation
+ * attempt.
+ */
+ wait_for_alloc = true;
+ spin_unlock(&space_info->lock);
+ mutex_lock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->chunk_mutex);
+ } else {
+ /* Proceed with allocation */
+ space_info->chunk_alloc = 1;
+ wait_for_alloc = false;
+ spin_unlock(&space_info->lock);
+ }
- /*
- * The chunk_mutex is held throughout the entirety of a chunk
- * allocation, so once we've acquired the chunk_mutex we know that the
- * other guy is done and we need to recheck and see if we should
- * allocate.
- */
- if (wait_for_alloc) {
- mutex_unlock(&fs_info->chunk_mutex);
- wait_for_alloc = 0;
cond_resched();
- goto again;
- }
+ } while (wait_for_alloc);
+ mutex_lock(&fs_info->chunk_mutex);
trans->allocating_chunk = true;
/*
@@ -4651,9 +4520,9 @@ again:
* Check if we have enough space in SYSTEM chunk because we may need
* to update devices.
*/
- check_system_chunk(trans, fs_info, flags);
+ check_system_chunk(trans, flags);
- ret = btrfs_alloc_chunk(trans, fs_info, flags);
+ ret = btrfs_alloc_chunk(trans, flags);
trans->allocating_chunk = false;
spin_lock(&space_info->lock);
@@ -4703,6 +4572,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
u64 space_size;
u64 avail;
u64 used;
+ int factor;
/* Don't overcommit when in mixed mode. */
if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
@@ -4737,10 +4607,8 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
* doesn't include the parity drive, so we don't have to
* change the math
*/
- if (profile & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- avail >>= 1;
+ factor = btrfs_bg_type_to_factor(profile);
+ avail = div_u64(avail, factor);
/*
* If we aren't flushing all things, let us overcommit up to
@@ -4912,8 +4780,9 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
return 0;
/* See if there is enough pinned space to make this reservation */
- if (percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes) >= 0)
+ if (__percpu_counter_compare(&space_info->total_bytes_pinned,
+ bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
goto commit;
/*
@@ -4930,8 +4799,9 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
bytes -= delayed_rsv->size;
spin_unlock(&delayed_rsv->lock);
- if (percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes) < 0) {
+ if (__percpu_counter_compare(&space_info->total_bytes_pinned,
+ bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) {
return -ENOSPC;
}
@@ -4984,7 +4854,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
ret = PTR_ERR(trans);
break;
}
- ret = do_chunk_alloc(trans, fs_info,
+ ret = do_chunk_alloc(trans,
btrfs_metadata_alloc_profile(fs_info),
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans);
@@ -5659,11 +5529,6 @@ void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
kfree(rsv);
}
-void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
-{
- kfree(rsv);
-}
-
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush)
@@ -6019,7 +5884,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned nr_extents;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
@@ -6092,7 +5957,7 @@ out_fail:
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
bool qgroup_free)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
spin_lock(&inode->lock);
@@ -6121,7 +5986,7 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
bool qgroup_free)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
unsigned num_extents;
spin_lock(&inode->lock);
@@ -6219,12 +6084,8 @@ static int update_block_group(struct btrfs_trans_handle *trans,
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache)
return -ENOENT;
- if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- factor = 2;
- else
- factor = 1;
+ factor = btrfs_bg_type_to_factor(cache->flags);
+
/*
* If this block group has free space cache written out, we
* need to make sure to load it if we are removing space. This
@@ -6268,8 +6129,9 @@ static int update_block_group(struct btrfs_trans_handle *trans,
trace_btrfs_space_reservation(info, "pinned",
cache->space_info->flags,
num_bytes, 1);
- percpu_counter_add(&cache->space_info->total_bytes_pinned,
- num_bytes);
+ percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
+ num_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
set_extent_dirty(info->pinned_extents,
bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
@@ -6279,7 +6141,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list,
&trans->transaction->dirty_bgs);
- trans->transaction->num_dirty_bgs++;
+ trans->transaction->num_dirty_bgs++;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
@@ -6290,16 +6152,8 @@ static int update_block_group(struct btrfs_trans_handle *trans,
* dirty list to avoid races between cleaner kthread and space
* cache writeout.
*/
- if (!alloc && old_val == 0) {
- spin_lock(&info->unused_bgs_lock);
- if (list_empty(&cache->bg_list)) {
- btrfs_get_block_group(cache);
- trace_btrfs_add_unused_block_group(cache);
- list_add_tail(&cache->bg_list,
- &info->unused_bgs);
- }
- spin_unlock(&info->unused_bgs_lock);
- }
+ if (!alloc && old_val == 0)
+ btrfs_mark_bg_unused(cache);
btrfs_put_block_group(cache);
total -= num_bytes;
@@ -6347,7 +6201,8 @@ static int pin_down_extent(struct btrfs_fs_info *fs_info,
trace_btrfs_space_reservation(fs_info, "pinned",
cache->space_info->flags, num_bytes, 1);
- percpu_counter_add(&cache->space_info->total_bytes_pinned, num_bytes);
+ percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
+ num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
set_extent_dirty(fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
return 0;
@@ -6711,7 +6566,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
trace_btrfs_space_reservation(fs_info, "pinned",
space_info->flags, len, 0);
space_info->max_extent_size = 0;
- percpu_counter_add(&space_info->total_bytes_pinned, -len);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned,
+ -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
if (cache->ro) {
space_info->bytes_readonly += len;
readonly = true;
@@ -6815,12 +6671,12 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
}
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *info,
- struct btrfs_delayed_ref_node *node, u64 parent,
- u64 root_objectid, u64 owner_objectid,
- u64 owner_offset, int refs_to_drop,
- struct btrfs_delayed_extent_op *extent_op)
+ struct btrfs_delayed_ref_node *node, u64 parent,
+ u64 root_objectid, u64 owner_objectid,
+ u64 owner_offset, int refs_to_drop,
+ struct btrfs_delayed_extent_op *extent_op)
{
+ struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_key key;
struct btrfs_path *path;
struct btrfs_root *extent_root = info->extent_root;
@@ -6852,9 +6708,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
if (is_data)
skinny_metadata = false;
- ret = lookup_extent_backref(trans, info, path, &iref,
- bytenr, num_bytes, parent,
- root_objectid, owner_objectid,
+ ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
+ parent, root_objectid, owner_objectid,
owner_offset);
if (ret == 0) {
extent_slot = path->slots[0];
@@ -6877,14 +6732,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
break;
extent_slot--;
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
- if (found_extent && item_size < sizeof(*ei))
- found_extent = 0;
-#endif
+
if (!found_extent) {
BUG_ON(iref);
- ret = remove_extent_backref(trans, info, path, NULL,
+ ret = remove_extent_backref(trans, path, NULL,
refs_to_drop,
is_data, &last_ref);
if (ret) {
@@ -6957,42 +6808,12 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, extent_slot);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (item_size < sizeof(*ei)) {
- BUG_ON(found_extent || extent_slot != path->slots[0]);
- ret = convert_extent_item_v0(trans, info, path, owner_objectid,
- 0);
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
-
- btrfs_release_path(path);
- path->leave_spinning = 1;
-
- key.objectid = bytenr;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = num_bytes;
-
- ret = btrfs_search_slot(trans, extent_root, &key, path,
- -1, 1);
- if (ret) {
- btrfs_err(info,
- "umm, got %d back from search, was looking for %llu",
- ret, bytenr);
- btrfs_print_leaf(path->nodes[0]);
- }
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
-
- extent_slot = path->slots[0];
- leaf = path->nodes[0];
- item_size = btrfs_item_size_nr(leaf, extent_slot);
+ if (unlikely(item_size < sizeof(*ei))) {
+ ret = -EINVAL;
+ btrfs_print_v0_err(info);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
}
-#endif
- BUG_ON(item_size < sizeof(*ei));
ei = btrfs_item_ptr(leaf, extent_slot,
struct btrfs_extent_item);
if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
@@ -7028,9 +6849,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
}
if (found_extent) {
- ret = remove_extent_backref(trans, info, path,
- iref, refs_to_drop,
- is_data, &last_ref);
+ ret = remove_extent_backref(trans, path, iref,
+ refs_to_drop, is_data,
+ &last_ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -7172,7 +6993,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
root->root_key.objectid,
btrfs_header_level(buf), 0,
BTRFS_DROP_DELAYED_REF);
- ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
+ ret = btrfs_add_delayed_tree_ref(trans, buf->start,
buf->len, parent,
root->root_key.objectid,
btrfs_header_level(buf),
@@ -7251,13 +7072,13 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
old_ref_mod = new_ref_mod = 0;
ret = 0;
} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_tree_ref(trans, bytenr,
num_bytes, parent,
root_objectid, (int)owner,
BTRFS_DROP_DELAYED_REF, NULL,
&old_ref_mod, &new_ref_mod);
} else {
- ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+ ret = btrfs_add_delayed_data_ref(trans, bytenr,
num_bytes, parent,
root_objectid, owner, offset,
0, BTRFS_DROP_DELAYED_REF,
@@ -7534,7 +7355,7 @@ search:
* for the proper type.
*/
if (!block_group_bits(block_group, flags)) {
- u64 extra = BTRFS_BLOCK_GROUP_DUP |
+ u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6 |
@@ -7738,7 +7559,7 @@ unclustered_alloc:
goto loop;
}
checks:
- search_start = ALIGN(offset, fs_info->stripesize);
+ search_start = round_up(offset, fs_info->stripesize);
/* move on to the next group */
if (search_start + num_bytes >
@@ -7750,7 +7571,6 @@ checks:
if (offset < search_start)
btrfs_add_free_space(block_group, offset,
search_start - offset);
- BUG_ON(offset > search_start);
ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
num_bytes, delalloc);
@@ -7826,8 +7646,7 @@ loop:
goto out;
}
- ret = do_chunk_alloc(trans, fs_info, flags,
- CHUNK_ALLOC_FORCE);
+ ret = do_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
/*
* If we can't allocate a new chunk we've already looped
@@ -8053,11 +7872,11 @@ int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
}
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
struct btrfs_key *ins, int ref_mod)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_extent_item *extent_item;
struct btrfs_extent_inline_ref *iref;
@@ -8231,7 +8050,6 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
@@ -8240,7 +8058,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
root->root_key.objectid, owner, offset,
BTRFS_ADD_DELAYED_EXTENT);
- ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
+ ret = btrfs_add_delayed_data_ref(trans, ins->objectid,
ins->offset, 0,
root->root_key.objectid, owner,
offset, ram_bytes,
@@ -8254,10 +8072,10 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
* space cache bits as well
*/
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_block_group_cache *block_group;
struct btrfs_space_info *space_info;
@@ -8285,15 +8103,15 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
- ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
- 0, owner, offset, ins, 1);
+ ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
+ offset, ins, 1);
btrfs_put_block_group(block_group);
return ret;
}
static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- u64 bytenr, int level)
+ u64 bytenr, int level, u64 owner)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *buf;
@@ -8302,7 +8120,6 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (IS_ERR(buf))
return buf;
- btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
clean_tree_block(fs_info, buf);
@@ -8311,6 +8128,14 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_lock_blocking(buf);
set_extent_buffer_uptodate(buf);
+ memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
+ btrfs_set_header_level(buf, level);
+ btrfs_set_header_bytenr(buf, buf->start);
+ btrfs_set_header_generation(buf, trans->transid);
+ btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
+ btrfs_set_header_owner(buf, owner);
+ write_extent_buffer_fsid(buf, fs_info->fsid);
+ write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
buf->log_index = root->log_transid % 2;
/*
@@ -8419,7 +8244,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (btrfs_is_testing(fs_info)) {
buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
- level);
+ level, root_objectid);
if (!IS_ERR(buf))
root->alloc_bytenr += blocksize;
return buf;
@@ -8435,7 +8260,8 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
if (ret)
goto out_unuse;
- buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
+ buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
+ root_objectid);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_free_reserved;
@@ -8467,7 +8293,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
root_objectid, level, 0,
BTRFS_ADD_DELAYED_EXTENT);
- ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
+ ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
ins.offset, parent,
root_objectid, level,
BTRFS_ADD_DELAYED_EXTENT,
@@ -8499,7 +8325,6 @@ struct walk_control {
int keep_locks;
int reada_slot;
int reada_count;
- int for_reloc;
};
#define DROP_REFERENCE 1
@@ -8819,7 +8644,7 @@ skip:
}
if (need_account) {
- ret = btrfs_qgroup_trace_subtree(trans, root, next,
+ ret = btrfs_qgroup_trace_subtree(trans, next,
generation, level - 1);
if (ret) {
btrfs_err_rl(fs_info,
@@ -8919,7 +8744,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
else
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
+ ret = btrfs_qgroup_trace_leaf_items(trans, eb);
if (ret) {
btrfs_err_rl(fs_info,
"error %d accounting leaf items. Quota is out of sync, rescan required.",
@@ -9136,7 +8961,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
wc->stage = DROP_REFERENCE;
wc->update_ref = update_ref;
wc->keep_locks = 0;
- wc->for_reloc = for_reloc;
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
@@ -9199,7 +9023,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
if (err)
goto out_end_trans;
- ret = btrfs_del_root(trans, fs_info, &root->root_key);
+ ret = btrfs_del_root(trans, &root->root_key);
if (ret) {
btrfs_abort_transaction(trans, ret);
err = ret;
@@ -9302,7 +9126,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
wc->stage = DROP_REFERENCE;
wc->update_ref = 0;
wc->keep_locks = 1;
- wc->for_reloc = 1;
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
@@ -9417,10 +9240,10 @@ out:
return ret;
}
-int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *cache)
+int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_trans_handle *trans;
u64 alloc_flags;
int ret;
@@ -9454,7 +9277,7 @@ again:
*/
alloc_flags = update_block_group_flags(fs_info, cache->flags);
if (alloc_flags != cache->flags) {
- ret = do_chunk_alloc(trans, fs_info, alloc_flags,
+ ret = do_chunk_alloc(trans, alloc_flags,
CHUNK_ALLOC_FORCE);
/*
* ENOSPC is allowed here, we may have enough space
@@ -9471,8 +9294,7 @@ again:
if (!ret)
goto out;
alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
- ret = do_chunk_alloc(trans, fs_info, alloc_flags,
- CHUNK_ALLOC_FORCE);
+ ret = do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
ret = inc_block_group_ro(cache, 0);
@@ -9480,7 +9302,7 @@ out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(fs_info, cache->flags);
mutex_lock(&fs_info->chunk_mutex);
- check_system_chunk(trans, fs_info, alloc_flags);
+ check_system_chunk(trans, alloc_flags);
mutex_unlock(&fs_info->chunk_mutex);
}
mutex_unlock(&fs_info->ro_block_group_mutex);
@@ -9489,12 +9311,11 @@ out:
return ret;
}
-int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type)
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
{
- u64 alloc_flags = get_alloc_profile(fs_info, type);
+ u64 alloc_flags = get_alloc_profile(trans->fs_info, type);
- return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
+ return do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
}
/*
@@ -9520,13 +9341,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
continue;
}
- if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_DUP))
- factor = 2;
- else
- factor = 1;
-
+ factor = btrfs_bg_type_to_factor(block_group->flags);
free_bytes += (block_group->key.offset -
btrfs_block_group_used(&block_group->item)) *
factor;
@@ -9717,6 +9532,8 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info,
int ret = 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
+ struct btrfs_block_group_item bg;
+ u64 flags;
int slot;
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
@@ -9751,8 +9568,32 @@ static int find_first_block_group(struct btrfs_fs_info *fs_info,
"logical %llu len %llu found bg but no related chunk",
found_key.objectid, found_key.offset);
ret = -ENOENT;
+ } else if (em->start != found_key.objectid ||
+ em->len != found_key.offset) {
+ btrfs_err(fs_info,
+ "block group %llu len %llu mismatch with chunk %llu len %llu",
+ found_key.objectid, found_key.offset,
+ em->start, em->len);
+ ret = -EUCLEAN;
} else {
- ret = 0;
+ read_extent_buffer(leaf, &bg,
+ btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bg));
+ flags = btrfs_block_group_flags(&bg) &
+ BTRFS_BLOCK_GROUP_TYPE_MASK;
+
+ if (flags != (em->map_lookup->type &
+ BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(fs_info,
+"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
+ found_key.objectid,
+ found_key.offset, flags,
+ (BTRFS_BLOCK_GROUP_TYPE_MASK &
+ em->map_lookup->type));
+ ret = -EUCLEAN;
+ } else {
+ ret = 0;
+ }
}
free_extent_map(em);
goto out;
@@ -9847,7 +9688,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
*/
if (block_group->cached == BTRFS_CACHE_NO ||
block_group->cached == BTRFS_CACHE_ERROR)
- free_excluded_extents(info, block_group);
+ free_excluded_extents(block_group);
btrfs_remove_free_space_cache(block_group);
ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
@@ -10003,6 +9844,62 @@ btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
return cache;
}
+
+/*
+ * Iterate all chunks and verify that each of them has the corresponding block
+ * group
+ */
+static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+ struct extent_map *em;
+ struct btrfs_block_group_cache *bg;
+ u64 start = 0;
+ int ret = 0;
+
+ while (1) {
+ read_lock(&map_tree->map_tree.lock);
+ /*
+ * lookup_extent_mapping will return the first extent map
+ * intersecting the range, so setting @len to 1 is enough to
+ * get the first chunk.
+ */
+ em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
+ read_unlock(&map_tree->map_tree.lock);
+ if (!em)
+ break;
+
+ bg = btrfs_lookup_block_group(fs_info, em->start);
+ if (!bg) {
+ btrfs_err(fs_info,
+ "chunk start=%llu len=%llu doesn't have corresponding block group",
+ em->start, em->len);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ break;
+ }
+ if (bg->key.objectid != em->start ||
+ bg->key.offset != em->len ||
+ (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
+ (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(fs_info,
+"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
+ em->start, em->len,
+ em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
+ bg->key.objectid, bg->key.offset,
+ bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ break;
+ }
+ start = em->start + em->len;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ }
+ return ret;
+}
+
int btrfs_read_block_groups(struct btrfs_fs_info *info)
{
struct btrfs_path *path;
@@ -10089,13 +9986,13 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
* info has super bytes accounted for, otherwise we'll think
* we have more space than we actually do.
*/
- ret = exclude_super_stripes(info, cache);
+ ret = exclude_super_stripes(cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
- free_excluded_extents(info, cache);
+ free_excluded_extents(cache);
btrfs_put_block_group(cache);
goto error;
}
@@ -10110,14 +10007,14 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- free_excluded_extents(info, cache);
+ free_excluded_extents(cache);
} else if (btrfs_block_group_used(&cache->item) == 0) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, found_key.objectid,
found_key.objectid +
found_key.offset);
- free_excluded_extents(info, cache);
+ free_excluded_extents(cache);
}
ret = btrfs_add_block_group_cache(info, cache);
@@ -10140,15 +10037,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
if (btrfs_chunk_readonly(info, cache->key.objectid)) {
inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) {
- spin_lock(&info->unused_bgs_lock);
- /* Should always be true but just in case. */
- if (list_empty(&cache->bg_list)) {
- btrfs_get_block_group(cache);
- trace_btrfs_add_unused_block_group(cache);
- list_add_tail(&cache->bg_list,
- &info->unused_bgs);
- }
- spin_unlock(&info->unused_bgs_lock);
+ ASSERT(list_empty(&cache->bg_list));
+ btrfs_mark_bg_unused(cache);
}
}
@@ -10176,7 +10066,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
btrfs_add_raid_kobjects(info);
init_global_block_rsv(info);
- ret = 0;
+ ret = check_chunk_block_group_mappings(info);
error:
btrfs_free_path(path);
return ret;
@@ -10206,8 +10096,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
sizeof(item));
if (ret)
btrfs_abort_transaction(trans, ret);
- ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
- key.offset);
+ ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
if (ret)
btrfs_abort_transaction(trans, ret);
add_block_group_free_space(trans, block_group);
@@ -10218,10 +10107,10 @@ next:
trans->can_flush_pending_bgs = can_flush_pending_bgs;
}
-int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytes_used,
+int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
u64 type, u64 chunk_offset, u64 size)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group_cache *cache;
int ret;
@@ -10240,20 +10129,20 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
cache->needs_free_space = 1;
- ret = exclude_super_stripes(fs_info, cache);
+ ret = exclude_super_stripes(cache);
if (ret) {
/*
* We may have excluded something, so call this just in
* case.
*/
- free_excluded_extents(fs_info, cache);
+ free_excluded_extents(cache);
btrfs_put_block_group(cache);
return ret;
}
add_new_free_space(cache, chunk_offset, chunk_offset + size);
- free_excluded_extents(fs_info, cache);
+ free_excluded_extents(cache);
#ifdef CONFIG_BTRFS_DEBUG
if (btrfs_should_fragment_free_space(cache)) {
@@ -10311,9 +10200,9 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 group_start,
- struct extent_map *em)
+ u64 group_start, struct extent_map *em)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_path *path;
struct btrfs_block_group_cache *block_group;
@@ -10337,18 +10226,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* Free the reserved super bytes from this block group before
* remove it.
*/
- free_excluded_extents(fs_info, block_group);
+ free_excluded_extents(block_group);
btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
block_group->key.offset);
memcpy(&key, &block_group->key, sizeof(key));
index = btrfs_bg_flags_to_raid_index(block_group->flags);
- if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- factor = 2;
- else
- factor = 1;
+ factor = btrfs_bg_type_to_factor(block_group->flags);
/* make sure this block group isn't part of an allocation cluster */
cluster = &fs_info->data_alloc_cluster;
@@ -10687,7 +10571,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
/* Don't want to race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
spin_lock(&block_group->lock);
- if (block_group->reserved ||
+ if (block_group->reserved || block_group->pinned ||
btrfs_block_group_used(&block_group->item) ||
block_group->ro ||
list_is_singular(&block_group->list)) {
@@ -10764,8 +10648,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
space_info->bytes_pinned -= block_group->pinned;
space_info->bytes_readonly += block_group->pinned;
- percpu_counter_add(&space_info->total_bytes_pinned,
- -block_group->pinned);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned,
+ -block_group->pinned,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
block_group->pinned = 0;
spin_unlock(&block_group->lock);
@@ -10782,8 +10667,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* Btrfs_remove_chunk will abort the transaction if things go
* horribly wrong.
*/
- ret = btrfs_remove_chunk(trans, fs_info,
- block_group->key.objectid);
+ ret = btrfs_remove_chunk(trans, block_group->key.objectid);
if (ret) {
if (trimming)
@@ -11066,3 +10950,16 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
!atomic_read(&root->will_be_snapshotted));
}
}
+
+void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ if (list_empty(&bg->bg_list)) {
+ btrfs_get_block_group(bg);
+ trace_btrfs_add_unused_block_group(bg);
+ list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b3e45714d28f..4dd6faab02bb 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -140,14 +140,6 @@ static int add_extent_changeset(struct extent_state *state, unsigned bits,
static void flush_write_bio(struct extent_page_data *epd);
-static inline struct btrfs_fs_info *
-tree_fs_info(struct extent_io_tree *tree)
-{
- if (tree->ops)
- return tree->ops->tree_fs_info(tree->private_data);
- return NULL;
-}
-
int __init extent_io_init(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
@@ -564,8 +556,10 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
{
- btrfs_panic(tree_fs_info(tree), err,
- "Locking error: Extent tree was modified by another thread while locked.");
+ struct inode *inode = tree->private_data;
+
+ btrfs_panic(btrfs_sb(inode->i_sb), err,
+ "locking error: extent tree was modified by another thread while locked");
}
/*
@@ -1386,14 +1380,6 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
}
}
-/*
- * helper function to set both pages and extents in the tree writeback
- */
-static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
-{
- tree->ops->set_range_writeback(tree->private_data, start, end);
-}
-
/* find the first state struct with 'bits' set after 'start', and
* return it. tree->lock must be held. NULL will returned if
* nothing was found after 'start'
@@ -2059,7 +2045,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int mirror_num)
{
u64 start = eb->start;
- unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
+ int i, num_pages = num_extent_pages(eb);
int ret = 0;
if (sb_rdonly(fs_info->sb))
@@ -2398,7 +2384,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
start - page_offset(page),
(int)phy_offset, failed_bio->bi_end_io,
NULL);
- bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
+ bio->bi_opf = REQ_OP_READ | read_mode;
btrfs_debug(btrfs_sb(inode->i_sb),
"Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
@@ -2790,8 +2776,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
else
contig = bio_end_sector(bio) == sector;
- if (tree->ops && tree->ops->merge_bio_hook(page, offset,
- page_size, bio, bio_flags))
+ if (tree->ops && btrfs_merge_bio_hook(page, offset, page_size,
+ bio, bio_flags))
can_merge = false;
if (prev_bio_flags != bio_flags || !contig || !can_merge ||
@@ -3116,7 +3102,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
for (index = 0; index < nr_pages; index++) {
__do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
- bio, 0, bio_flags, 0, prev_em_start);
+ bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
put_page(pages[index]);
}
}
@@ -3422,7 +3408,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
continue;
}
- set_range_writeback(tree, cur, cur + iosize - 1);
+ btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
if (!PageWriteback(page)) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"page %lu not writeback, cur %llu end %llu",
@@ -3538,7 +3524,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
struct btrfs_fs_info *fs_info,
struct extent_page_data *epd)
{
- unsigned long i, num_pages;
+ int i, num_pages;
int flush = 0;
int ret = 0;
@@ -3588,7 +3574,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
if (!ret)
return ret;
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
@@ -3712,13 +3698,13 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
u32 nritems;
- unsigned long i, num_pages;
+ int i, num_pages;
unsigned long start, end;
unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
int ret = 0;
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
atomic_set(&eb->io_pages, num_pages);
/* set btree blocks beyond nritems with 0 to avoid stale content. */
@@ -4643,23 +4629,20 @@ int extent_buffer_under_io(struct extent_buffer *eb)
}
/*
- * Helper for releasing extent buffer page.
+ * Release all pages attached to the extent buffer.
*/
-static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
+static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
{
- unsigned long index;
- struct page *page;
- int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+ int i;
+ int num_pages;
+ int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
BUG_ON(extent_buffer_under_io(eb));
- index = num_extent_pages(eb->start, eb->len);
- if (index == 0)
- return;
+ num_pages = num_extent_pages(eb);
+ for (i = 0; i < num_pages; i++) {
+ struct page *page = eb->pages[i];
- do {
- index--;
- page = eb->pages[index];
if (!page)
continue;
if (mapped)
@@ -4691,7 +4674,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
/* One for when we allocated the page */
put_page(page);
- } while (index != 0);
+ }
}
/*
@@ -4699,7 +4682,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
*/
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
- btrfs_release_extent_buffer_page(eb);
+ btrfs_release_extent_buffer_pages(eb);
__free_extent_buffer(eb);
}
@@ -4743,10 +4726,10 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
{
- unsigned long i;
+ int i;
struct page *p;
struct extent_buffer *new;
- unsigned long num_pages = num_extent_pages(src->start, src->len);
+ int num_pages = num_extent_pages(src);
new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
if (new == NULL)
@@ -4766,7 +4749,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
}
set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
- set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
+ set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
return new;
}
@@ -4775,15 +4758,14 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len)
{
struct extent_buffer *eb;
- unsigned long num_pages;
- unsigned long i;
-
- num_pages = num_extent_pages(start, len);
+ int num_pages;
+ int i;
eb = __alloc_extent_buffer(fs_info, start, len);
if (!eb)
return NULL;
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
eb->pages[i] = alloc_page(GFP_NOFS);
if (!eb->pages[i])
@@ -4791,7 +4773,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
}
set_extent_buffer_uptodate(eb);
btrfs_set_header_nritems(eb, 0);
- set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
+ set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
return eb;
err:
@@ -4843,11 +4825,11 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
static void mark_extent_buffer_accessed(struct extent_buffer *eb,
struct page *accessed)
{
- unsigned long num_pages, i;
+ int num_pages, i;
check_buffer_tree_ref(eb);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
@@ -4944,8 +4926,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
unsigned long len = fs_info->nodesize;
- unsigned long num_pages = num_extent_pages(start, len);
- unsigned long i;
+ int num_pages;
+ int i;
unsigned long index = start >> PAGE_SHIFT;
struct extent_buffer *eb;
struct extent_buffer *exists = NULL;
@@ -4967,6 +4949,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
if (!eb)
return ERR_PTR(-ENOMEM);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++, index++) {
p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
if (!p) {
@@ -5009,8 +4992,11 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
uptodate = 0;
/*
- * see below about how we avoid a nasty race with release page
- * and why we unlock later
+ * We can't unlock the pages just yet since the extent buffer
+ * hasn't been properly inserted in the radix tree, this
+ * opens a race with btree_releasepage which can free a page
+ * while we are still filling in all pages for the buffer and
+ * we could crash.
*/
}
if (uptodate)
@@ -5039,21 +5025,12 @@ again:
set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
/*
- * there is a race where release page may have
- * tried to find this extent buffer in the radix
- * but failed. It will tell the VM it is safe to
- * reclaim the, and it will clear the page private bit.
- * We must make sure to set the page private bit properly
- * after the extent buffer is in the radix tree so
- * it doesn't get lost
+ * Now it's safe to unlock the pages because any calls to
+ * btree_releasepage will correctly detect that a page belongs to a
+ * live buffer and won't free them prematurely.
*/
- SetPageChecked(eb->pages[0]);
- for (i = 1; i < num_pages; i++) {
- p = eb->pages[i];
- ClearPageChecked(p);
- unlock_page(p);
- }
- unlock_page(eb->pages[0]);
+ for (i = 0; i < num_pages; i++)
+ unlock_page(eb->pages[i]);
return eb;
free_eb:
@@ -5075,9 +5052,10 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
__free_extent_buffer(eb);
}
-/* Expects to have eb->eb_lock already held */
static int release_extent_buffer(struct extent_buffer *eb)
{
+ lockdep_assert_held(&eb->refs_lock);
+
WARN_ON(atomic_read(&eb->refs) == 0);
if (atomic_dec_and_test(&eb->refs)) {
if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
@@ -5094,9 +5072,9 @@ static int release_extent_buffer(struct extent_buffer *eb)
}
/* Should be safe to release our pages at this point */
- btrfs_release_extent_buffer_page(eb);
+ btrfs_release_extent_buffer_pages(eb);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
+ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
__free_extent_buffer(eb);
return 1;
}
@@ -5127,7 +5105,7 @@ void free_extent_buffer(struct extent_buffer *eb)
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) == 2 &&
- test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
+ test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))
atomic_dec(&eb->refs);
if (atomic_read(&eb->refs) == 2 &&
@@ -5159,11 +5137,11 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
void clear_extent_buffer_dirty(struct extent_buffer *eb)
{
- unsigned long i;
- unsigned long num_pages;
+ int i;
+ int num_pages;
struct page *page;
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
@@ -5189,15 +5167,15 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
int set_extent_buffer_dirty(struct extent_buffer *eb)
{
- unsigned long i;
- unsigned long num_pages;
+ int i;
+ int num_pages;
int was_dirty = 0;
check_buffer_tree_ref(eb);
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
WARN_ON(atomic_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
@@ -5208,12 +5186,12 @@ int set_extent_buffer_dirty(struct extent_buffer *eb)
void clear_extent_buffer_uptodate(struct extent_buffer *eb)
{
- unsigned long i;
+ int i;
struct page *page;
- unsigned long num_pages;
+ int num_pages;
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (page)
@@ -5223,12 +5201,12 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb)
void set_extent_buffer_uptodate(struct extent_buffer *eb)
{
- unsigned long i;
+ int i;
struct page *page;
- unsigned long num_pages;
+ int num_pages;
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
SetPageUptodate(page);
@@ -5238,13 +5216,13 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb)
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, int wait, int mirror_num)
{
- unsigned long i;
+ int i;
struct page *page;
int err;
int ret = 0;
int locked_pages = 0;
int all_uptodate = 1;
- unsigned long num_pages;
+ int num_pages;
unsigned long num_reads = 0;
struct bio *bio = NULL;
unsigned long bio_flags = 0;
@@ -5252,7 +5230,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
- num_pages = num_extent_pages(eb->start, eb->len);
+ num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
page = eb->pages[i];
if (wait == WAIT_NONE) {
@@ -5576,11 +5554,11 @@ void copy_extent_buffer_full(struct extent_buffer *dst,
struct extent_buffer *src)
{
int i;
- unsigned num_pages;
+ int num_pages;
ASSERT(dst->len == src->len);
- num_pages = num_extent_pages(dst->start, dst->len);
+ num_pages = num_extent_pages(dst);
for (i = 0; i < num_pages; i++)
copy_page(page_address(dst->pages[i]),
page_address(src->pages[i]));
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 0bfd4aeb822d..b4d03e677e1d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -46,7 +46,7 @@
#define EXTENT_BUFFER_STALE 6
#define EXTENT_BUFFER_WRITEBACK 7
#define EXTENT_BUFFER_READ_ERR 8 /* read IO error */
-#define EXTENT_BUFFER_DUMMY 9
+#define EXTENT_BUFFER_UNMAPPED 9
#define EXTENT_BUFFER_IN_TREE 10
#define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */
@@ -92,9 +92,6 @@ typedef blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *
typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
struct bio *bio, u64 bio_offset);
-typedef blk_status_t (extent_submit_bio_done_t)(void *private_data,
- struct bio *bio, int mirror_num);
-
struct extent_io_ops {
/*
* The following callbacks must be allways defined, the function
@@ -104,12 +101,7 @@ struct extent_io_ops {
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
struct page *page, u64 start, u64 end,
int mirror);
- int (*merge_bio_hook)(struct page *page, unsigned long offset,
- size_t size, struct bio *bio,
- unsigned long bio_flags);
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
- struct btrfs_fs_info *(*tree_fs_info)(void *private_data);
- void (*set_range_writeback)(void *private_data, u64 start, u64 end);
/*
* Optional hooks, called if the pointer is not NULL
@@ -440,10 +432,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
int mirror_num);
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
-static inline unsigned long num_extent_pages(u64 start, u64 len)
+static inline int num_extent_pages(const struct extent_buffer *eb)
{
- return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
- (start >> PAGE_SHIFT);
+ return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
+ (eb->start >> PAGE_SHIFT);
}
static inline void extent_buffer_get(struct extent_buffer *eb)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index f9dd6d1836a3..ba74827beb32 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -922,7 +922,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const bool new_inline,
struct extent_map *em)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf = path->nodes[0];
const int slot = path->slots[0];
@@ -942,7 +942,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
btrfs_file_extent_num_bytes(leaf, fi);
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
- size = btrfs_file_extent_inline_len(leaf, slot, fi);
+ size = btrfs_file_extent_ram_bytes(leaf, fi);
extent_end = ALIGN(extent_start + size,
fs_info->sectorsize);
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 51e77d72068a..2be00e873e92 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -5,14 +5,11 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
-#include <linux/mpage.h>
#include <linux/falloc.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/compat.h>
#include <linux/slab.h>
@@ -83,7 +80,7 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
struct inode_defrag *defrag)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct inode_defrag *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
@@ -135,8 +132,8 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct inode_defrag *defrag;
u64 transid;
int ret;
@@ -185,7 +182,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
struct inode_defrag *defrag)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
if (!__need_auto_defrag(fs_info))
@@ -833,8 +830,7 @@ next_slot:
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = key.offset +
- btrfs_file_extent_inline_len(leaf,
- path->slots[0], fi);
+ btrfs_file_extent_ram_bytes(leaf, fi);
} else {
/* can't happen */
BUG();
@@ -1133,7 +1129,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u64 start, u64 end)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
@@ -1470,7 +1466,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
u64 *lockstart, u64 *lockend,
struct extent_state **cached_state)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 start_pos;
u64 last_pos;
int i;
@@ -1526,7 +1522,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
size_t *write_bytes)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend;
@@ -1569,10 +1565,11 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
return ret;
}
-static noinline ssize_t __btrfs_buffered_write(struct file *file,
- struct iov_iter *i,
- loff_t pos)
+static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
+ struct iov_iter *i)
{
+ struct file *file = iocb->ki_filp;
+ loff_t pos = iocb->ki_pos;
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -1804,7 +1801,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- loff_t pos = iocb->ki_pos;
+ loff_t pos;
ssize_t written;
ssize_t written_buffered;
loff_t endbyte;
@@ -1815,8 +1812,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
if (written < 0 || !iov_iter_count(from))
return written;
- pos += written;
- written_buffered = __btrfs_buffered_write(file, from, pos);
+ pos = iocb->ki_pos;
+ written_buffered = btrfs_buffered_write(iocb, from);
if (written_buffered < 0) {
err = written_buffered;
goto out;
@@ -1953,7 +1950,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (iocb->ki_flags & IOCB_DIRECT) {
num_written = __btrfs_direct_write(iocb, from);
} else {
- num_written = __btrfs_buffered_write(file, from, pos);
+ num_written = btrfs_buffered_write(iocb, from);
if (num_written > 0)
iocb->ki_pos = pos + num_written;
if (clean_page)
@@ -2042,7 +2039,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
int ret = 0, err;
- bool full_sync = false;
u64 len;
/*
@@ -2066,96 +2062,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
inode_lock(inode);
atomic_inc(&root->log_batch);
- full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
+
/*
- * We might have have had more pages made dirty after calling
- * start_ordered_ops and before acquiring the inode's i_mutex.
+ * We have to do this here to avoid the priority inversion of waiting on
+ * IO of a lower priority task while holding a transaciton open.
*/
- if (full_sync) {
- /*
- * For a full sync, we need to make sure any ordered operations
- * start and finish before we start logging the inode, so that
- * all extents are persisted and the respective file extent
- * items are in the fs/subvol btree.
- */
- ret = btrfs_wait_ordered_range(inode, start, len);
- } else {
- /*
- * Start any new ordered operations before starting to log the
- * inode. We will wait for them to finish in btrfs_sync_log().
- *
- * Right before acquiring the inode's mutex, we might have new
- * writes dirtying pages, which won't immediately start the
- * respective ordered operations - that is done through the
- * fill_delalloc callbacks invoked from the writepage and
- * writepages address space operations. So make sure we start
- * all ordered operations before starting to log our inode. Not
- * doing this means that while logging the inode, writeback
- * could start and invoke writepage/writepages, which would call
- * the fill_delalloc callbacks (cow_file_range,
- * submit_compressed_extents). These callbacks add first an
- * extent map to the modified list of extents and then create
- * the respective ordered operation, which means in
- * tree-log.c:btrfs_log_inode() we might capture all existing
- * ordered operations (with btrfs_get_logged_extents()) before
- * the fill_delalloc callback adds its ordered operation, and by
- * the time we visit the modified list of extent maps (with
- * btrfs_log_changed_extents()), we see and process the extent
- * map they created. We then use the extent map to construct a
- * file extent item for logging without waiting for the
- * respective ordered operation to finish - this file extent
- * item points to a disk location that might not have yet been
- * written to, containing random data - so after a crash a log
- * replay will make our inode have file extent items that point
- * to disk locations containing invalid data, as we returned
- * success to userspace without waiting for the respective
- * ordered operation to finish, because it wasn't captured by
- * btrfs_get_logged_extents().
- */
- ret = start_ordered_ops(inode, start, end);
- }
+ ret = btrfs_wait_ordered_range(inode, start, len);
if (ret) {
inode_unlock(inode);
goto out;
}
atomic_inc(&root->log_batch);
- /*
- * If the last transaction that changed this file was before the current
- * transaction and we have the full sync flag set in our inode, we can
- * bail out now without any syncing.
- *
- * Note that we can't bail out if the full sync flag isn't set. This is
- * because when the full sync flag is set we start all ordered extents
- * and wait for them to fully complete - when they complete they update
- * the inode's last_trans field through:
- *
- * btrfs_finish_ordered_io() ->
- * btrfs_update_inode_fallback() ->
- * btrfs_update_inode() ->
- * btrfs_set_inode_last_trans()
- *
- * So we are sure that last_trans is up to date and can do this check to
- * bail out safely. For the fast path, when the full sync flag is not
- * set in our inode, we can not do it because we start only our ordered
- * extents and don't wait for them to complete (that is when
- * btrfs_finish_ordered_io runs), so here at this point their last_trans
- * value might be less than or equals to fs_info->last_trans_committed,
- * and setting a speculative last_trans for an inode when a buffered
- * write is made (such as fs_info->generation + 1 for example) would not
- * be reliable since after setting the value and before fsync is called
- * any number of transactions can start and commit (transaction kthread
- * commits the current transaction periodically), and a transaction
- * commit does not start nor waits for ordered extents to complete.
- */
smp_mb();
if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
- (full_sync && BTRFS_I(inode)->last_trans <=
- fs_info->last_trans_committed) ||
- (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
- BTRFS_I(inode)->last_trans
- <= fs_info->last_trans_committed)) {
+ BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed) {
/*
* We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
@@ -2239,13 +2160,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
}
- if (!full_sync) {
- ret = btrfs_wait_ordered_range(inode, start, len);
- if (ret) {
- btrfs_end_transaction(trans);
- goto out;
- }
- }
ret = btrfs_commit_transaction(trans);
} else {
ret = btrfs_end_transaction(trans);
@@ -2310,7 +2224,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_path *path, u64 offset, u64 end)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d5f80cb300be..0adf38b00fa0 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -71,10 +71,6 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
inode = btrfs_iget(fs_info->sb, &location, root, NULL);
if (IS_ERR(inode))
return inode;
- if (is_bad_inode(inode)) {
- iput(inode);
- return ERR_PTR(-ENOENT);
- }
mapping_set_gfp_mask(inode->i_mapping,
mapping_gfp_constraint(inode->i_mapping,
@@ -300,9 +296,9 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID)
check_crcs = 1;
- /* Make sure we can fit our crcs into the first page */
+ /* Make sure we can fit our crcs and generation into the first page */
if (write && check_crcs &&
- (num_pages * sizeof(u32)) >= PAGE_SIZE)
+ (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
return -ENOSPC;
memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -547,7 +543,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
io_ctl_map_page(io_ctl, 0);
}
- memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
+ copy_page(io_ctl->cur, bitmap);
io_ctl_set_crc(io_ctl, io_ctl->index - 1);
if (io_ctl->index < io_ctl->num_pages)
io_ctl_map_page(io_ctl, 0);
@@ -607,7 +603,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
if (ret)
return ret;
- memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
+ copy_page(entry->bitmap, io_ctl->cur);
io_ctl_unmap_page(io_ctl);
return 0;
@@ -655,7 +651,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_path *path, u64 offset)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct btrfs_io_ctl io_ctl;
@@ -1123,13 +1119,10 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
{
int ret;
struct inode *inode = io_ctl->inode;
- struct btrfs_fs_info *fs_info;
if (!inode)
return 0;
- fs_info = btrfs_sb(inode->i_sb);
-
/* Flush the dirty pages in the cache file. */
ret = flush_dirty_cache(inode);
if (ret)
@@ -1145,7 +1138,7 @@ out:
BTRFS_I(inode)->generation = 0;
if (block_group) {
#ifdef DEBUG
- btrfs_err(fs_info,
+ btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
#endif
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index b5950aacd697..d6736595ec57 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -1236,7 +1236,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
if (ret)
goto abort;
- ret = btrfs_del_root(trans, fs_info, &free_space_root->root_key);
+ ret = btrfs_del_root(trans, &free_space_root->root_key);
if (ret)
goto abort;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 12fcd8897c33..ffca2abf13d0 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -3,7 +3,6 @@
* Copyright (C) 2007 Oracle. All rights reserved.
*/
-#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
@@ -244,8 +243,6 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
return;
while (1) {
- bool add_to_ctl = true;
-
spin_lock(rbroot_lock);
n = rb_first(rbroot);
if (!n) {
@@ -257,15 +254,14 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
BUG_ON(info->bitmap); /* Logic error */
if (info->offset > root->ino_cache_progress)
- add_to_ctl = false;
- else if (info->offset + info->bytes > root->ino_cache_progress)
- count = root->ino_cache_progress - info->offset + 1;
+ count = 0;
else
- count = info->bytes;
+ count = min(root->ino_cache_progress - info->offset + 1,
+ info->bytes);
rb_erase(&info->offset_index, rbroot);
spin_unlock(rbroot_lock);
- if (add_to_ctl)
+ if (count)
__btrfs_add_free_space(root->fs_info, ctl,
info->offset, count);
kmem_cache_free(btrfs_free_space_cachep, info);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index eba61bcb9bb3..9357a19d2bff 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -14,17 +14,13 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
-#include <linux/mpage.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/compat.h>
-#include <linux/bit_spinlock.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
-#include <linux/mount.h>
#include <linux/btrfs.h>
#include <linux/blkdev.h>
#include <linux/posix_acl_xattr.h>
@@ -1443,8 +1439,7 @@ next_slot:
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
- btrfs_file_extent_inline_len(leaf,
- path->slots[0], fi);
+ btrfs_file_extent_ram_bytes(leaf, fi);
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
} else {
@@ -1752,7 +1747,7 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
void __btrfs_del_delalloc_inode(struct btrfs_root *root,
struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = root->fs_info;
if (!list_empty(&inode->delalloc_inodes)) {
list_del_init(&inode->delalloc_inodes);
@@ -1903,8 +1898,8 @@ static void btrfs_clear_bit_hook(void *private_data,
}
/*
- * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
- * we don't create bios that span stripes or chunks
+ * Merge bio hook, this must check the chunk tree to make sure we don't create
+ * bios that span stripes or chunks
*
* return 1 if page cannot be merged to bio
* return 0 if page can be merged to bio
@@ -1962,7 +1957,7 @@ static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
-static blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
+blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
int mirror_num)
{
struct inode *inode = private_data;
@@ -2035,8 +2030,7 @@ static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
/* we're doing a write, do the async checksumming */
ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, bio_flags,
bio_offset, inode,
- btrfs_submit_bio_start,
- btrfs_submit_bio_done);
+ btrfs_submit_bio_start);
goto out;
} else if (!skip_sum) {
ret = btrfs_csum_one_bio(inode, bio, 0, 0);
@@ -3610,18 +3604,15 @@ static int btrfs_read_locked_inode(struct inode *inode)
filled = true;
path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto make_bad;
- }
+ if (!path)
+ return -ENOMEM;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
- if (ret > 0)
- ret = -ENOENT;
- goto make_bad;
+ btrfs_free_path(path);
+ return ret;
}
leaf = path->nodes[0];
@@ -3774,11 +3765,6 @@ cache_acl:
btrfs_sync_inode_flags_to_i_flags(inode);
return 0;
-
-make_bad:
- btrfs_free_path(path);
- make_bad_inode(inode);
- return ret;
}
/*
@@ -3984,7 +3970,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
goto err;
}
skip_backref:
- ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
+ ret = btrfs_delete_delayed_dir_index(trans, dir, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto err;
@@ -4087,11 +4073,10 @@ out:
}
static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *dir, u64 objectid,
- const char *name, int name_len)
+ struct inode *dir, u64 objectid,
+ const char *name, int name_len)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
@@ -4124,9 +4109,8 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- ret = btrfs_del_root_ref(trans, fs_info, objectid,
- root->root_key.objectid, dir_ino,
- &index, name, name_len);
+ ret = btrfs_del_root_ref(trans, objectid, root->root_key.objectid,
+ dir_ino, &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
@@ -4145,12 +4129,11 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- btrfs_release_path(path);
index = key.offset;
}
btrfs_release_path(path);
- ret = btrfs_delete_delayed_dir_index(trans, fs_info, BTRFS_I(dir), index);
+ ret = btrfs_delete_delayed_dir_index(trans, BTRFS_I(dir), index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -4243,9 +4226,9 @@ again:
prev = node;
entry = rb_entry(node, struct btrfs_inode, rb_node);
- if (objectid < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
+ if (objectid < btrfs_ino(entry))
node = node->rb_left;
- else if (objectid > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
+ else if (objectid > btrfs_ino(entry))
node = node->rb_right;
else
break;
@@ -4253,7 +4236,7 @@ again:
if (!node) {
while (prev) {
entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= btrfs_ino(BTRFS_I(&entry->vfs_inode))) {
+ if (objectid <= btrfs_ino(entry)) {
node = prev;
break;
}
@@ -4262,7 +4245,7 @@ again:
}
while (node) {
entry = rb_entry(node, struct btrfs_inode, rb_node);
- objectid = btrfs_ino(BTRFS_I(&entry->vfs_inode)) + 1;
+ objectid = btrfs_ino(entry) + 1;
inode = igrab(&entry->vfs_inode);
if (inode) {
spin_unlock(&root->inode_lock);
@@ -4343,10 +4326,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
btrfs_record_snapshot_destroy(trans, BTRFS_I(dir));
- ret = btrfs_unlink_subvol(trans, root, dir,
- dest->root_key.objectid,
- dentry->d_name.name,
- dentry->d_name.len);
+ ret = btrfs_unlink_subvol(trans, dir, dest->root_key.objectid,
+ dentry->d_name.name, dentry->d_name.len);
if (ret) {
err = ret;
btrfs_abort_transaction(trans, ret);
@@ -4441,7 +4422,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
return PTR_ERR(trans);
if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- err = btrfs_unlink_subvol(trans, root, dir,
+ err = btrfs_unlink_subvol(trans, dir,
BTRFS_I(inode)->location.objectid,
dentry->d_name.name,
dentry->d_name.len);
@@ -4643,8 +4624,8 @@ search_again:
BTRFS_I(inode), leaf, fi,
found_key.offset);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- item_end += btrfs_file_extent_inline_len(leaf,
- path->slots[0], fi);
+ item_end += btrfs_file_extent_ram_bytes(leaf,
+ fi);
trace_btrfs_truncate_show_fi_inline(
BTRFS_I(inode), leaf, fi, path->slots[0],
@@ -5615,9 +5596,9 @@ static void inode_tree_add(struct inode *inode)
parent = *p;
entry = rb_entry(parent, struct btrfs_inode, rb_node);
- if (ino < btrfs_ino(BTRFS_I(&entry->vfs_inode)))
+ if (ino < btrfs_ino(entry))
p = &parent->rb_left;
- else if (ino > btrfs_ino(BTRFS_I(&entry->vfs_inode)))
+ else if (ino > btrfs_ino(entry))
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
@@ -5708,16 +5689,21 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
int ret;
ret = btrfs_read_locked_inode(inode);
- if (!is_bad_inode(inode)) {
+ if (!ret) {
inode_tree_add(inode);
unlock_new_inode(inode);
if (new)
*new = 1;
} else {
- unlock_new_inode(inode);
- iput(inode);
- ASSERT(ret < 0);
- inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
+ iget_failed(inode);
+ /*
+ * ret > 0 can come from btrfs_search_slot called by
+ * btrfs_read_locked_inode, this means the inode item
+ * was not found.
+ */
+ if (ret > 0)
+ ret = -ENOENT;
+ inode = ERR_PTR(ret);
}
}
@@ -5745,7 +5731,7 @@ static struct inode *new_simple_dir(struct super_block *s,
inode->i_mtime = current_time(inode);
inode->i_atime = inode->i_mtime;
inode->i_ctime = inode->i_mtime;
- BTRFS_I(inode)->i_otime = timespec64_to_timespec(inode->i_mtime);
+ BTRFS_I(inode)->i_otime = inode->i_mtime;
return inode;
}
@@ -6027,32 +6013,6 @@ err:
return ret;
}
-int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_trans_handle *trans;
- int ret = 0;
- bool nolock = false;
-
- if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
- return 0;
-
- if (btrfs_fs_closing(root->fs_info) &&
- btrfs_is_free_space_inode(BTRFS_I(inode)))
- nolock = true;
-
- if (wbc->sync_mode == WB_SYNC_ALL) {
- if (nolock)
- trans = btrfs_join_transaction_nolock(root);
- else
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans);
- }
- return ret;
-}
-
/*
* This is somewhat expensive, updating the tree every time the
* inode changes. But, it is most likely to find the inode in cache.
@@ -6335,8 +6295,10 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
location->type = BTRFS_INODE_ITEM_KEY;
ret = btrfs_insert_inode_locked(inode);
- if (ret < 0)
+ if (ret < 0) {
+ iput(inode);
goto fail;
+ }
path->leave_spinning = 1;
ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
@@ -6349,7 +6311,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
inode->i_mtime = current_time(inode);
inode->i_atime = inode->i_mtime;
inode->i_ctime = inode->i_mtime;
- BTRFS_I(inode)->i_otime = timespec64_to_timespec(inode->i_mtime);
+ BTRFS_I(inode)->i_otime = inode->i_mtime;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
@@ -6395,12 +6357,11 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
return inode;
fail_unlock:
- unlock_new_inode(inode);
+ discard_new_inode(inode);
fail:
if (dir && name)
BTRFS_I(dir)->index_cnt--;
btrfs_free_path(path);
- iput(inode);
return ERR_PTR(ret);
}
@@ -6419,7 +6380,6 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
const char *name, int name_len, int add_backref, u64 index)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
int ret = 0;
struct btrfs_key key;
struct btrfs_root *root = parent_inode->root;
@@ -6435,7 +6395,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
}
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
- ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
+ ret = btrfs_add_root_ref(trans, key.objectid,
root->root_key.objectid, parent_ino,
index, name, name_len);
} else if (add_backref) {
@@ -6471,7 +6431,7 @@ fail_dir_item:
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
u64 local_index;
int err;
- err = btrfs_del_root_ref(trans, fs_info, key.objectid,
+ err = btrfs_del_root_ref(trans, key.objectid,
root->root_key.objectid, parent_ino,
&local_index, name, name_len);
@@ -6505,7 +6465,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
int err;
- int drop_inode = 0;
u64 objectid;
u64 index = 0;
@@ -6527,6 +6486,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
@@ -6541,31 +6501,24 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
0, index);
- if (err) {
- goto out_unlock_inode;
- } else {
- btrfs_update_inode(trans, root, inode);
- d_instantiate_new(dentry, inode);
- }
+ if (err)
+ goto out_unlock;
+
+ btrfs_update_inode(trans, root, inode);
+ d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- if (drop_inode) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
return err;
-
-out_unlock_inode:
- drop_inode = 1;
- unlock_new_inode(inode);
- goto out_unlock;
-
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
@@ -6575,7 +6528,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
- int drop_inode_on_err = 0;
int err;
u64 objectid;
u64 index = 0;
@@ -6598,9 +6550,9 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
- drop_inode_on_err = 1;
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
@@ -6613,33 +6565,28 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_update_inode(trans, root, inode);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode),
0, index);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
- if (err && drop_inode_on_err) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_unlock_inode:
- unlock_new_inode(inode);
- goto out_unlock;
-
}
static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -6748,6 +6695,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
S_IFDIR | mode, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_fail;
}
@@ -6758,34 +6706,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_fail_inode;
+ goto out_fail;
btrfs_i_size_write(BTRFS_I(inode), 0);
err = btrfs_update_inode(trans, root, inode);
if (err)
- goto out_fail_inode;
+ goto out_fail;
err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
dentry->d_name.name,
dentry->d_name.len, 0, index);
if (err)
- goto out_fail_inode;
+ goto out_fail;
d_instantiate_new(dentry, inode);
drop_on_err = 0;
out_fail:
btrfs_end_transaction(trans);
- if (drop_on_err) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_fail_inode:
- unlock_new_inode(inode);
- goto out_fail;
}
static noinline int uncompress_inline(struct btrfs_path *path,
@@ -6847,7 +6791,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
size_t pg_offset, u64 start, u64 len,
int create)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
int err = 0;
u64 extent_start = 0;
@@ -6943,7 +6887,8 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
extent_start);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
- size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
+
+ size = btrfs_file_extent_ram_bytes(leaf, item);
extent_end = ALIGN(extent_start + size,
fs_info->sectorsize);
@@ -6994,7 +6939,7 @@ next:
if (new_inline)
goto out;
- size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
+ size = btrfs_file_extent_ram_bytes(leaf, item);
extent_offset = page_offset(page) + pg_offset - extent_start;
copy_size = min_t(u64, PAGE_SIZE - pg_offset,
size - extent_offset);
@@ -7865,7 +7810,7 @@ static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
isector >>= inode->i_sb->s_blocksize_bits;
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
pgoff, isector, repair_endio, repair_arg);
- bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
+ bio->bi_opf = REQ_OP_READ | read_mode;
btrfs_debug(BTRFS_I(inode)->root->fs_info,
"repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
@@ -8299,8 +8244,7 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
if (write && async_submit) {
ret = btrfs_wq_submit_bio(fs_info, bio, 0, 0,
file_offset, inode,
- btrfs_submit_bio_start_direct_io,
- btrfs_submit_bio_done);
+ btrfs_submit_bio_start_direct_io);
goto err;
} else if (write) {
/*
@@ -9540,8 +9484,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* src is a subvolume */
if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, root, old_dir,
- root_objectid,
+ ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else { /* src is an inode */
@@ -9560,8 +9503,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
/* dest is a subvolume */
if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, dest, new_dir,
- root_objectid,
+ ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
} else { /* dest is an inode */
@@ -9821,7 +9763,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
- ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
+ ret = btrfs_unlink_subvol(trans, old_dir, root_objectid,
old_dentry->d_name.name,
old_dentry->d_name.len);
} else {
@@ -9843,8 +9785,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
root_objectid = BTRFS_I(new_inode)->location.objectid;
- ret = btrfs_unlink_subvol(trans, dest, new_dir,
- root_objectid,
+ ret = btrfs_unlink_subvol(trans, new_dir, root_objectid,
new_dentry->d_name.name,
new_dentry->d_name.len);
BUG_ON(new_inode->i_nlink == 0);
@@ -10115,7 +10056,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
struct btrfs_key key;
struct inode *inode = NULL;
int err;
- int drop_inode = 0;
u64 objectid;
u64 index = 0;
int name_len;
@@ -10148,6 +10088,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
objectid, S_IFLNK|S_IRWXUGO, &index);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
+ inode = NULL;
goto out_unlock;
}
@@ -10164,12 +10105,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
if (err)
- goto out_unlock_inode;
+ goto out_unlock;
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
- goto out_unlock_inode;
+ goto out_unlock;
}
key.objectid = btrfs_ino(BTRFS_I(inode));
key.offset = 0;
@@ -10179,7 +10120,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
datasize);
if (err) {
btrfs_free_path(path);
- goto out_unlock_inode;
+ goto out_unlock;
}
leaf = path->nodes[0];
ei = btrfs_item_ptr(leaf, path->slots[0],
@@ -10211,26 +10152,19 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (!err)
err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry,
BTRFS_I(inode), 0, index);
- if (err) {
- drop_inode = 1;
- goto out_unlock_inode;
- }
+ if (err)
+ goto out_unlock;
d_instantiate_new(dentry, inode);
out_unlock:
btrfs_end_transaction(trans);
- if (drop_inode) {
+ if (err && inode) {
inode_dec_link_count(inode);
- iput(inode);
+ discard_new_inode(inode);
}
btrfs_btree_balance_dirty(fs_info);
return err;
-
-out_unlock_inode:
- drop_inode = 1;
- unlock_new_inode(inode);
- goto out_unlock;
}
static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -10439,14 +10373,14 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
ret = btrfs_init_inode_security(trans, inode, dir, NULL);
if (ret)
- goto out_inode;
+ goto out;
ret = btrfs_update_inode(trans, root, inode);
if (ret)
- goto out_inode;
+ goto out;
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret)
- goto out_inode;
+ goto out;
/*
* We set number of links to 0 in btrfs_new_inode(), and here we set
@@ -10456,21 +10390,15 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
* d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
*/
set_nlink(inode, 1);
- unlock_new_inode(inode);
d_tmpfile(dentry, inode);
+ unlock_new_inode(inode);
mark_inode_dirty(inode);
-
out:
btrfs_end_transaction(trans);
- if (ret)
- iput(inode);
+ if (ret && inode)
+ discard_new_inode(inode);
btrfs_btree_balance_dirty(fs_info);
return ret;
-
-out_inode:
- unlock_new_inode(inode);
- goto out;
-
}
__attribute__((const))
@@ -10479,12 +10407,6 @@ static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
return -EAGAIN;
}
-static struct btrfs_fs_info *iotree_fs_info(void *private_data)
-{
- struct inode *inode = private_data;
- return btrfs_sb(inode->i_sb);
-}
-
static void btrfs_check_extent_io_range(void *private_data, const char *caller,
u64 start, u64 end)
{
@@ -10499,9 +10421,9 @@ static void btrfs_check_extent_io_range(void *private_data, const char *caller,
}
}
-void btrfs_set_range_writeback(void *private_data, u64 start, u64 end)
+void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
- struct inode *inode = private_data;
+ struct inode *inode = tree->private_data;
unsigned long index = start >> PAGE_SHIFT;
unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
@@ -10557,10 +10479,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
/* mandatory callbacks */
.submit_bio_hook = btrfs_submit_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
- .merge_bio_hook = btrfs_merge_bio_hook,
.readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
- .tree_fs_info = iotree_fs_info,
- .set_range_writeback = btrfs_set_range_writeback,
/* optional callbacks */
.fill_delalloc = run_delalloc_range,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b077544b5232..63600dc2ac4c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5,23 +5,18 @@
#include <linux/kernel.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
-#include <linux/mpage.h>
#include <linux/namei.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/compat.h>
-#include <linux/bit_spinlock.h>
#include <linux/security.h>
#include <linux/xattr.h>
#include <linux/mm.h>
@@ -606,7 +601,7 @@ static noinline int create_subvol(struct inode *dir,
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
- ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit);
+ ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
if (ret)
goto fail;
@@ -616,14 +611,6 @@ static noinline int create_subvol(struct inode *dir,
goto fail;
}
- memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
- btrfs_set_header_bytenr(leaf, leaf->start);
- btrfs_set_header_generation(leaf, trans->transid);
- btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(leaf, objectid);
-
- write_extent_buffer_fsid(leaf, fs_info->fsid);
- write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
btrfs_mark_buffer_dirty(leaf);
inode_item = &root_item->inode;
@@ -711,8 +698,7 @@ static noinline int create_subvol(struct inode *dir,
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
- ret = btrfs_add_root_ref(trans, fs_info,
- objectid, root->root_key.objectid,
+ ret = btrfs_add_root_ref(trans, objectid, root->root_key.objectid,
btrfs_ino(BTRFS_I(dir)), index, name, namelen);
BUG_ON(ret);
@@ -2507,8 +2493,8 @@ out:
static noinline int btrfs_ioctl_ino_lookup(struct file *file,
void __user *argp)
{
- struct btrfs_ioctl_ino_lookup_args *args;
- struct inode *inode;
+ struct btrfs_ioctl_ino_lookup_args *args;
+ struct inode *inode;
int ret = 0;
args = memdup_user(argp, sizeof(*args));
@@ -2941,8 +2927,14 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
ret = btrfs_defrag_root(root);
break;
case S_IFREG:
- if (!(file->f_mode & FMODE_WRITE)) {
- ret = -EINVAL;
+ /*
+ * Note that this does not check the file descriptor for write
+ * access. This prevents defragmenting executables that are
+ * running and allows defrag on files open in read-only mode.
+ */
+ if (!capable(CAP_SYS_ADMIN) &&
+ inode_permission(inode, MAY_WRITE)) {
+ ret = -EPERM;
goto out;
}
@@ -3165,10 +3157,8 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
di_args->total_bytes = btrfs_device_get_total_bytes(dev);
memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
if (dev->name) {
- struct rcu_string *name;
-
- name = rcu_dereference(dev->name);
- strncpy(di_args->path, name->str, sizeof(di_args->path) - 1);
+ strncpy(di_args->path, rcu_str_deref(dev->name),
+ sizeof(di_args->path) - 1);
di_args->path[sizeof(di_args->path) - 1] = 0;
} else {
di_args->path[0] = '\0';
@@ -3602,13 +3592,13 @@ out_unlock:
return ret;
}
-ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
- struct file *dst_file, u64 dst_loff)
+int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
+ struct file *dst_file, loff_t dst_loff,
+ u64 olen)
{
struct inode *src = file_inode(src_file);
struct inode *dst = file_inode(dst_file);
u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
- ssize_t res;
if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
/*
@@ -3619,10 +3609,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
return -EINVAL;
}
- res = btrfs_extent_same(src, loff, olen, dst, dst_loff);
- if (res)
- return res;
- return olen;
+ return btrfs_extent_same(src, src_loff, olen, dst, dst_loff);
}
static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
@@ -5118,9 +5105,7 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_quota_ctl_args *sa;
- struct btrfs_trans_handle *trans = NULL;
int ret;
- int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -5136,28 +5121,19 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
}
down_write(&fs_info->subvol_sem);
- trans = btrfs_start_transaction(fs_info->tree_root, 2);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- goto out;
- }
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
- ret = btrfs_quota_enable(trans, fs_info);
+ ret = btrfs_quota_enable(fs_info);
break;
case BTRFS_QUOTA_CTL_DISABLE:
- ret = btrfs_quota_disable(trans, fs_info);
+ ret = btrfs_quota_disable(fs_info);
break;
default:
ret = -EINVAL;
break;
}
- err = btrfs_commit_transaction(trans);
- if (err && !ret)
- ret = err;
-out:
kfree(sa);
up_write(&fs_info->subvol_sem);
drop_write:
@@ -5195,15 +5171,13 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
}
if (sa->assign) {
- ret = btrfs_add_qgroup_relation(trans, fs_info,
- sa->src, sa->dst);
+ ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst);
} else {
- ret = btrfs_del_qgroup_relation(trans, fs_info,
- sa->src, sa->dst);
+ ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst);
}
/* update qgroup status and info */
- err = btrfs_run_qgroups(trans, fs_info);
+ err = btrfs_run_qgroups(trans);
if (err < 0)
btrfs_handle_fs_error(fs_info, err,
"failed to update qgroup status and info");
@@ -5221,7 +5195,6 @@ drop_write:
static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_create_args *sa;
struct btrfs_trans_handle *trans;
@@ -5253,9 +5226,9 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
}
if (sa->create) {
- ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
+ ret = btrfs_create_qgroup(trans, sa->qgroupid);
} else {
- ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid);
+ ret = btrfs_remove_qgroup(trans, sa->qgroupid);
}
err = btrfs_end_transaction(trans);
@@ -5272,7 +5245,6 @@ drop_write:
static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_qgroup_limit_args *sa;
struct btrfs_trans_handle *trans;
@@ -5305,7 +5277,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
qgroupid = root->root_key.objectid;
}
- ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
+ ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
err = btrfs_end_transaction(trans);
if (err && !ret)
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 2e1a1694a33d..0c4ef208b8b9 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -6,7 +6,6 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
-#include <linux/pagevec.h>
#include "ctree.h"
#include "transaction.h"
#include "btrfs_inode.h"
@@ -421,129 +420,6 @@ out:
return ret == 0;
}
-/* Needs to either be called under a log transaction or the log_mutex */
-void btrfs_get_logged_extents(struct btrfs_inode *inode,
- struct list_head *logged_list,
- const loff_t start,
- const loff_t end)
-{
- struct btrfs_ordered_inode_tree *tree;
- struct btrfs_ordered_extent *ordered;
- struct rb_node *n;
- struct rb_node *prev;
-
- tree = &inode->ordered_tree;
- spin_lock_irq(&tree->lock);
- n = __tree_search(&tree->tree, end, &prev);
- if (!n)
- n = prev;
- for (; n; n = rb_prev(n)) {
- ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
- if (ordered->file_offset > end)
- continue;
- if (entry_end(ordered) <= start)
- break;
- if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
- continue;
- list_add(&ordered->log_list, logged_list);
- refcount_inc(&ordered->refs);
- }
- spin_unlock_irq(&tree->lock);
-}
-
-void btrfs_put_logged_extents(struct list_head *logged_list)
-{
- struct btrfs_ordered_extent *ordered;
-
- while (!list_empty(logged_list)) {
- ordered = list_first_entry(logged_list,
- struct btrfs_ordered_extent,
- log_list);
- list_del_init(&ordered->log_list);
- btrfs_put_ordered_extent(ordered);
- }
-}
-
-void btrfs_submit_logged_extents(struct list_head *logged_list,
- struct btrfs_root *log)
-{
- int index = log->log_transid % 2;
-
- spin_lock_irq(&log->log_extents_lock[index]);
- list_splice_tail(logged_list, &log->logged_list[index]);
- spin_unlock_irq(&log->log_extents_lock[index]);
-}
-
-void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *log, u64 transid)
-{
- struct btrfs_ordered_extent *ordered;
- int index = transid % 2;
-
- spin_lock_irq(&log->log_extents_lock[index]);
- while (!list_empty(&log->logged_list[index])) {
- struct inode *inode;
- ordered = list_first_entry(&log->logged_list[index],
- struct btrfs_ordered_extent,
- log_list);
- list_del_init(&ordered->log_list);
- inode = ordered->inode;
- spin_unlock_irq(&log->log_extents_lock[index]);
-
- if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
- !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
- u64 start = ordered->file_offset;
- u64 end = ordered->file_offset + ordered->len - 1;
-
- WARN_ON(!inode);
- filemap_fdatawrite_range(inode->i_mapping, start, end);
- }
- wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
- &ordered->flags));
-
- /*
- * In order to keep us from losing our ordered extent
- * information when committing the transaction we have to make
- * sure that any logged extents are completed when we go to
- * commit the transaction. To do this we simply increase the
- * current transactions pending_ordered counter and decrement it
- * when the ordered extent completes.
- */
- if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
- struct btrfs_ordered_inode_tree *tree;
-
- tree = &BTRFS_I(inode)->ordered_tree;
- spin_lock_irq(&tree->lock);
- if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
- set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
- atomic_inc(&trans->transaction->pending_ordered);
- }
- spin_unlock_irq(&tree->lock);
- }
- btrfs_put_ordered_extent(ordered);
- spin_lock_irq(&log->log_extents_lock[index]);
- }
- spin_unlock_irq(&log->log_extents_lock[index]);
-}
-
-void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
-{
- struct btrfs_ordered_extent *ordered;
- int index = transid % 2;
-
- spin_lock_irq(&log->log_extents_lock[index]);
- while (!list_empty(&log->logged_list[index])) {
- ordered = list_first_entry(&log->logged_list[index],
- struct btrfs_ordered_extent,
- log_list);
- list_del_init(&ordered->log_list);
- spin_unlock_irq(&log->log_extents_lock[index]);
- btrfs_put_ordered_extent(ordered);
- spin_lock_irq(&log->log_extents_lock[index]);
- }
- spin_unlock_irq(&log->log_extents_lock[index]);
-}
-
/*
* used to drop a reference on an ordered extent. This will free
* the extent if the last reference is dropped
@@ -913,20 +789,6 @@ out:
return entry;
}
-bool btrfs_have_ordered_extents_in_range(struct inode *inode,
- u64 file_offset,
- u64 len)
-{
- struct btrfs_ordered_extent *oe;
-
- oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len);
- if (oe) {
- btrfs_put_ordered_extent(oe);
- return true;
- }
- return false;
-}
-
/*
* lookup and return any extent before 'file_offset'. NULL is returned
* if none is found
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 3be443fb3001..02d813aaa261 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -54,15 +54,11 @@ struct btrfs_ordered_sum {
#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
* has done its due diligence in updating
* the isize. */
-#define BTRFS_ORDERED_LOGGED_CSUM 8 /* We've logged the csums on this ordered
- ordered extent */
-#define BTRFS_ORDERED_TRUNCATED 9 /* Set when we have to truncate an extent */
+#define BTRFS_ORDERED_TRUNCATED 8 /* Set when we have to truncate an extent */
-#define BTRFS_ORDERED_LOGGED 10 /* Set when we've waited on this ordered extent
- * in the logging code. */
-#define BTRFS_ORDERED_PENDING 11 /* We are waiting for this ordered extent to
+#define BTRFS_ORDERED_PENDING 9 /* We are waiting for this ordered extent to
* complete in the current transaction. */
-#define BTRFS_ORDERED_REGULAR 12 /* Regular IO for COW */
+#define BTRFS_ORDERED_REGULAR 10 /* Regular IO for COW */
struct btrfs_ordered_extent {
/* logical offset in the file */
@@ -182,9 +178,6 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct btrfs_inode *inode,
u64 file_offset,
u64 len);
-bool btrfs_have_ordered_extents_in_range(struct inode *inode,
- u64 file_offset,
- u64 len);
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
@@ -193,16 +186,6 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len);
u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len);
-void btrfs_get_logged_extents(struct btrfs_inode *inode,
- struct list_head *logged_list,
- const loff_t start,
- const loff_t end);
-void btrfs_put_logged_extents(struct list_head *logged_list);
-void btrfs_submit_logged_extents(struct list_head *logged_list,
- struct btrfs_root *log);
-void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *log, u64 transid);
-void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
int __init ordered_data_init(void);
void __cold ordered_data_exit(void);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index a4e11cf04671..df49931ffe92 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -52,17 +52,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
u64 offset;
int ref_index = 0;
- if (item_size < sizeof(*ei)) {
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- struct btrfs_extent_item_v0 *ei0;
- BUG_ON(item_size != sizeof(*ei0));
- ei0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_item_v0);
- pr_info("\t\textent refs %u\n",
- btrfs_extent_refs_v0(eb, ei0));
- return;
-#else
- BUG();
-#endif
+ if (unlikely(item_size < sizeof(*ei))) {
+ btrfs_print_v0_err(eb->fs_info);
+ btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
}
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
@@ -133,20 +125,6 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
WARN_ON(ptr > end);
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
-static void print_extent_ref_v0(struct extent_buffer *eb, int slot)
-{
- struct btrfs_extent_ref_v0 *ref0;
-
- ref0 = btrfs_item_ptr(eb, slot, struct btrfs_extent_ref_v0);
- printk("\t\textent back ref root %llu gen %llu owner %llu num_refs %lu\n",
- btrfs_ref_root_v0(eb, ref0),
- btrfs_ref_generation_v0(eb, ref0),
- btrfs_ref_objectid_v0(eb, ref0),
- (unsigned long)btrfs_ref_count_v0(eb, ref0));
-}
-#endif
-
static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
u32 item_size)
{
@@ -267,8 +245,8 @@ void btrfs_print_leaf(struct extent_buffer *l)
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(l, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
- pr_info("\t\tinline extent data size %u\n",
- btrfs_file_extent_inline_len(l, i, fi));
+ pr_info("\t\tinline extent data size %llu\n",
+ btrfs_file_extent_ram_bytes(l, fi));
break;
}
pr_info("\t\textent data disk bytenr %llu nr %llu\n",
@@ -280,11 +258,8 @@ void btrfs_print_leaf(struct extent_buffer *l)
btrfs_file_extent_ram_bytes(l, fi));
break;
case BTRFS_EXTENT_REF_V0_KEY:
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- print_extent_ref_v0(l, i);
-#else
- BUG();
-#endif
+ btrfs_print_v0_err(fs_info);
+ btrfs_handle_fs_error(fs_info, -EINVAL, NULL);
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index c25dc47210a3..4353bb69bb86 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -530,11 +530,11 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
fs_info->qgroup_ulist = NULL;
}
-static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *quota_root,
- u64 src, u64 dst)
+static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
int ret;
+ struct btrfs_root *quota_root = trans->fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -554,11 +554,11 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
return ret;
}
-static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *quota_root,
- u64 src, u64 dst)
+static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
int ret;
+ struct btrfs_root *quota_root = trans->fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -653,10 +653,10 @@ out:
return ret;
}
-static int del_qgroup_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *quota_root, u64 qgroupid)
+static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
{
int ret;
+ struct btrfs_root *quota_root = trans->fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -700,9 +700,9 @@ out:
}
static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_qgroup *qgroup)
{
+ struct btrfs_root *quota_root = trans->fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *l;
@@ -718,7 +718,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -742,9 +742,10 @@ out:
}
static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_qgroup *qgroup)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *quota_root = fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *l;
@@ -752,7 +753,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
int ret;
int slot;
- if (btrfs_is_testing(root->fs_info))
+ if (btrfs_is_testing(fs_info))
return 0;
key.objectid = 0;
@@ -763,7 +764,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -786,10 +787,10 @@ out:
return ret;
}
-static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- struct btrfs_root *root)
+static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *quota_root = fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *l;
@@ -805,7 +806,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -875,8 +876,7 @@ out:
return ret;
}
-int btrfs_quota_enable(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *quota_root;
struct btrfs_root *tree_root = fs_info->tree_root;
@@ -886,6 +886,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_qgroup *qgroup = NULL;
+ struct btrfs_trans_handle *trans = NULL;
int ret = 0;
int slot;
@@ -893,9 +894,25 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
if (fs_info->quota_root)
goto out;
+ /*
+ * 1 for quota root item
+ * 1 for BTRFS_QGROUP_STATUS item
+ *
+ * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
+ * per subvolume. However those are not currently reserved since it
+ * would be a lot of overkill.
+ */
+ trans = btrfs_start_transaction(tree_root, 2);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
if (!fs_info->qgroup_ulist) {
ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -906,12 +923,14 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
BTRFS_QUOTA_TREE_OBJECTID);
if (IS_ERR(quota_root)) {
ret = PTR_ERR(quota_root);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
goto out_free_root;
}
@@ -921,8 +940,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*ptr));
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
+ }
leaf = path->nodes[0];
ptr = btrfs_item_ptr(leaf, path->slots[0],
@@ -944,9 +965,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
if (ret > 0)
goto out_add_root;
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
-
+ }
while (1) {
slot = path->slots[0];
@@ -956,18 +978,23 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
if (found_key.type == BTRFS_ROOT_REF_KEY) {
ret = add_qgroup_item(trans, quota_root,
found_key.offset);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
+ }
qgroup = add_qgroup_rb(fs_info, found_key.offset);
if (IS_ERR(qgroup)) {
ret = PTR_ERR(qgroup);
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
}
ret = btrfs_next_item(tree_root, path);
- if (ret < 0)
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
+ }
if (ret)
break;
}
@@ -975,18 +1002,28 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
out_add_root:
btrfs_release_path(path);
ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
+ }
qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
if (IS_ERR(qgroup)) {
ret = PTR_ERR(qgroup);
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
spin_unlock(&fs_info->qgroup_lock);
+
+ ret = btrfs_commit_transaction(trans);
+ if (ret) {
+ trans = NULL;
+ goto out_free_path;
+ }
+
ret = qgroup_rescan_init(fs_info, 0, 1);
if (!ret) {
qgroup_rescan_zero_tracking(fs_info);
@@ -1006,20 +1043,35 @@ out:
if (ret) {
ulist_free(fs_info->qgroup_ulist);
fs_info->qgroup_ulist = NULL;
+ if (trans)
+ btrfs_end_transaction(trans);
}
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
-int btrfs_quota_disable(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *quota_root;
+ struct btrfs_trans_handle *trans = NULL;
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
+
+ /*
+ * 1 For the root item
+ *
+ * We should also reserve enough items for the quota tree deletion in
+ * btrfs_clean_quota_tree but this is not done.
+ */
+ trans = btrfs_start_transaction(fs_info->tree_root, 1);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
@@ -1031,12 +1083,16 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
btrfs_free_qgroup_config(fs_info);
ret = btrfs_clean_quota_tree(trans, quota_root);
- if (ret)
- goto out;
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto end_trans;
+ }
- ret = btrfs_del_root(trans, fs_info, &quota_root->root_key);
- if (ret)
- goto out;
+ ret = btrfs_del_root(trans, &quota_root->root_key);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto end_trans;
+ }
list_del(&quota_root->dirty_list);
@@ -1048,6 +1104,9 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
free_extent_buffer(quota_root->node);
free_extent_buffer(quota_root->commit_root);
kfree(quota_root);
+
+end_trans:
+ ret = btrfs_end_transaction(trans);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
@@ -1177,9 +1236,10 @@ out:
return ret;
}
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
@@ -1216,13 +1276,13 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
}
}
- ret = add_qgroup_relation_item(trans, quota_root, src, dst);
+ ret = add_qgroup_relation_item(trans, src, dst);
if (ret)
goto out;
- ret = add_qgroup_relation_item(trans, quota_root, dst, src);
+ ret = add_qgroup_relation_item(trans, dst, src);
if (ret) {
- del_qgroup_relation_item(trans, quota_root, src, dst);
+ del_qgroup_relation_item(trans, src, dst);
goto out;
}
@@ -1240,9 +1300,10 @@ out:
return ret;
}
-static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
@@ -1276,8 +1337,8 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans,
ret = -ENOENT;
goto out;
exist:
- ret = del_qgroup_relation_item(trans, quota_root, src, dst);
- err = del_qgroup_relation_item(trans, quota_root, dst, src);
+ ret = del_qgroup_relation_item(trans, src, dst);
+ err = del_qgroup_relation_item(trans, dst, src);
if (err && !ret)
ret = err;
@@ -1290,21 +1351,22 @@ out:
return ret;
}
-int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret = 0;
mutex_lock(&fs_info->qgroup_ioctl_lock);
- ret = __del_qgroup_relation(trans, fs_info, src, dst);
+ ret = __del_qgroup_relation(trans, src, dst);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
-int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid)
+int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
int ret = 0;
@@ -1336,9 +1398,9 @@ out:
return ret;
}
-int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid)
+int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
struct btrfs_qgroup_list *list;
@@ -1362,16 +1424,15 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
goto out;
}
}
- ret = del_qgroup_item(trans, quota_root, qgroupid);
+ ret = del_qgroup_item(trans, qgroupid);
if (ret && ret != -ENOENT)
goto out;
while (!list_empty(&qgroup->groups)) {
list = list_first_entry(&qgroup->groups,
struct btrfs_qgroup_list, next_group);
- ret = __del_qgroup_relation(trans, fs_info,
- qgroupid,
- list->group->qgroupid);
+ ret = __del_qgroup_relation(trans, qgroupid,
+ list->group->qgroupid);
if (ret)
goto out;
}
@@ -1384,10 +1445,10 @@ out:
return ret;
}
-int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid,
+int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
int ret = 0;
@@ -1451,7 +1512,7 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
spin_unlock(&fs_info->qgroup_lock);
- ret = update_qgroup_limit_item(trans, quota_root, qgroup);
+ ret = update_qgroup_limit_item(trans, qgroup);
if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_info(fs_info, "unable to update quota limit for %llu",
@@ -1519,10 +1580,10 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
- gfp_t gfp_flag)
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes, gfp_t gfp_flag)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_qgroup_extent_record *record;
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
@@ -1530,8 +1591,6 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
|| bytenr == 0 || num_bytes == 0)
return 0;
- if (WARN_ON(trans == NULL))
- return -EINVAL;
record = kmalloc(sizeof(*record), gfp_flag);
if (!record)
return -ENOMEM;
@@ -1552,9 +1611,9 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
}
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int nr = btrfs_header_nritems(eb);
int i, extent_type, ret;
struct btrfs_key key;
@@ -1584,8 +1643,8 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
- ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
- num_bytes, GFP_NOFS);
+ ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
+ GFP_NOFS);
if (ret)
return ret;
}
@@ -1655,11 +1714,10 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
}
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct extent_buffer *root_eb,
u64 root_gen, int root_level)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret = 0;
int level;
struct extent_buffer *eb = root_eb;
@@ -1678,7 +1736,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
}
if (root_level == 0) {
- ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
+ ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
goto out;
}
@@ -1736,8 +1794,7 @@ walk_down:
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
- ret = btrfs_qgroup_trace_extent(trans, fs_info,
- child_bytenr,
+ ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
fs_info->nodesize,
GFP_NOFS);
if (ret)
@@ -1745,8 +1802,8 @@ walk_down:
}
if (level == 0) {
- ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
- path->nodes[level]);
+ ret = btrfs_qgroup_trace_leaf_items(trans,
+ path->nodes[level]);
if (ret)
goto out;
@@ -1981,12 +2038,11 @@ static int maybe_fs_roots(struct ulist *roots)
return is_fstree(unode->val);
}
-int
-btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 bytenr, u64 num_bytes,
- struct ulist *old_roots, struct ulist *new_roots)
+int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes, struct ulist *old_roots,
+ struct ulist *new_roots)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct ulist *qgroups = NULL;
struct ulist *tmp = NULL;
u64 seq;
@@ -2116,9 +2172,10 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
ulist_del(record->old_roots, qgroup_to_skip,
0);
}
- ret = btrfs_qgroup_account_extent(trans, fs_info,
- record->bytenr, record->num_bytes,
- record->old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(trans, record->bytenr,
+ record->num_bytes,
+ record->old_roots,
+ new_roots);
record->old_roots = NULL;
new_roots = NULL;
}
@@ -2136,9 +2193,9 @@ cleanup:
/*
* called from commit_transaction. Writes all changed qgroups to disk.
*/
-int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root = fs_info->quota_root;
int ret = 0;
@@ -2152,11 +2209,11 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
struct btrfs_qgroup, dirty);
list_del_init(&qgroup->dirty);
spin_unlock(&fs_info->qgroup_lock);
- ret = update_qgroup_info_item(trans, quota_root, qgroup);
+ ret = update_qgroup_info_item(trans, qgroup);
if (ret)
fs_info->qgroup_flags |=
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- ret = update_qgroup_limit_item(trans, quota_root, qgroup);
+ ret = update_qgroup_limit_item(trans, qgroup);
if (ret)
fs_info->qgroup_flags |=
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2168,7 +2225,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
spin_unlock(&fs_info->qgroup_lock);
- ret = update_qgroup_status_item(trans, fs_info, quota_root);
+ ret = update_qgroup_status_item(trans);
if (ret)
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2181,13 +2238,13 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
* cause a transaction abort so we take extra care here to only error
* when a readonly fs is a reasonable outcome.
*/
-int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
- struct btrfs_qgroup_inherit *inherit)
+int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
+ u64 objectid, struct btrfs_qgroup_inherit *inherit)
{
int ret = 0;
int i;
u64 *i_qgroups;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root = fs_info->quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
@@ -2229,22 +2286,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- if (srcid) {
- struct btrfs_root *srcroot;
- struct btrfs_key srckey;
-
- srckey.objectid = srcid;
- srckey.type = BTRFS_ROOT_ITEM_KEY;
- srckey.offset = (u64)-1;
- srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
- if (IS_ERR(srcroot)) {
- ret = PTR_ERR(srcroot);
- goto out;
- }
-
- level_size = fs_info->nodesize;
- }
-
/*
* add qgroup to all inherited groups
*/
@@ -2253,12 +2294,12 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
if (*i_qgroups == 0)
continue;
- ret = add_qgroup_relation_item(trans, quota_root,
- objectid, *i_qgroups);
+ ret = add_qgroup_relation_item(trans, objectid,
+ *i_qgroups);
if (ret && ret != -EEXIST)
goto out;
- ret = add_qgroup_relation_item(trans, quota_root,
- *i_qgroups, objectid);
+ ret = add_qgroup_relation_item(trans, *i_qgroups,
+ objectid);
if (ret && ret != -EEXIST)
goto out;
}
@@ -2281,7 +2322,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
dstgroup->rsv_excl = inherit->lim.rsv_excl;
- ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
+ ret = update_qgroup_limit_item(trans, dstgroup);
if (ret) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
btrfs_info(fs_info,
@@ -2301,6 +2342,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
* our counts don't go crazy, so at this point the only
* difference between the two roots should be the root node.
*/
+ level_size = fs_info->nodesize;
dstgroup->rfer = srcgroup->rfer;
dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
dstgroup->excl = level_size;
@@ -2598,10 +2640,10 @@ static bool is_last_leaf(struct btrfs_path *path)
* returns < 0 on error, 0 when more leafs are to be scanned.
* returns 1 when done.
*/
-static int
-qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
- struct btrfs_trans_handle *trans)
+static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_key found;
struct extent_buffer *scratch_leaf = NULL;
struct ulist *roots = NULL;
@@ -2669,8 +2711,8 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
if (ret < 0)
goto out;
/* For rescan, just pass old_roots as NULL */
- ret = btrfs_qgroup_account_extent(trans, fs_info,
- found.objectid, num_bytes, NULL, roots);
+ ret = btrfs_qgroup_account_extent(trans, found.objectid,
+ num_bytes, NULL, roots);
if (ret < 0)
goto out;
}
@@ -2716,7 +2758,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
err = -EINTR;
} else {
- err = qgroup_rescan_leaf(fs_info, path, trans);
+ err = qgroup_rescan_leaf(trans, path);
}
if (err > 0)
btrfs_commit_transaction(trans);
@@ -2751,7 +2793,7 @@ out:
err);
goto done;
}
- ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
+ ret = update_qgroup_status_item(trans);
if (ret < 0) {
err = ret;
btrfs_err(fs_info, "fail to update qgroup status: %d", err);
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index d60dd06445ce..54b8bb282c0e 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -141,24 +141,19 @@ struct btrfs_qgroup {
#define QGROUP_RELEASE (1<<1)
#define QGROUP_FREE (1<<2)
-int btrfs_quota_enable(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
-int btrfs_quota_disable(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
+int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
bool interruptible);
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst);
-int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 src, u64 dst);
-int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid);
-int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid);
-int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 qgroupid,
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst);
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
+ u64 dst);
+int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
+int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
+int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
struct btrfs_qgroup_limit *limit);
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
@@ -217,9 +212,8 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
* Return <0 for error, like memory allocation failure or invalid parameter
* (NULL trans)
*/
-int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
- gfp_t gfp_flag);
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes, gfp_t gfp_flag);
/*
* Inform qgroup to trace all leaf items of data
@@ -228,7 +222,6 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
* Return <0 for error(ENOMEM)
*/
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
/*
* Inform qgroup to trace a whole subtree, including all its child tree
@@ -241,20 +234,15 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
* Return <0 for error(ENOMEM or tree search error)
*/
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
-int
-btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 bytenr, u64 num_bytes,
- struct ulist *old_roots, struct ulist *new_roots);
+int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ u64 num_bytes, struct ulist *old_roots,
+ struct ulist *new_roots);
int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
-int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
-int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
- struct btrfs_qgroup_inherit *inherit);
+int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
+int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
+ u64 objectid, struct btrfs_qgroup_inherit *inherit);
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes,
enum btrfs_qgroup_rsv_type type);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 5e4ad134b9ad..df41d7049936 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -5,32 +5,19 @@
*/
#include <linux/sched.h>
-#include <linux/wait.h>
#include <linux/bio.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/blkdev.h>
-#include <linux/random.h>
-#include <linux/iocontext.h>
-#include <linux/capability.h>
-#include <linux/ratelimit.h>
-#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/hash.h>
#include <linux/list_sort.h>
#include <linux/raid/xor.h>
#include <linux/mm.h>
-#include <asm/div64.h>
#include "ctree.h"
-#include "extent_map.h"
#include "disk-io.h"
-#include "transaction.h"
-#include "print-tree.h"
#include "volumes.h"
#include "raid56.h"
#include "async-thread.h"
-#include "check-integrity.h"
-#include "rcu-string.h"
/* set when additional merges to this rbio are not allowed */
#define RBIO_RMW_LOCKED_BIT 1
@@ -175,8 +162,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
static void rmw_work(struct btrfs_work *work);
static void read_rebuild_work(struct btrfs_work *work);
-static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
-static void async_read_rebuild(struct btrfs_raid_bio *rbio);
static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
static void __free_raid_bio(struct btrfs_raid_bio *rbio);
@@ -185,7 +170,13 @@ static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int need_check);
-static void async_scrub_parity(struct btrfs_raid_bio *rbio);
+static void scrub_parity_work(struct btrfs_work *work);
+
+static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
+{
+ btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
+ btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
+}
/*
* the stripe hash table is used for locking, and to collect
@@ -260,7 +251,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
s = kmap(rbio->bio_pages[i]);
d = kmap(rbio->stripe_pages[i]);
- memcpy(d, s, PAGE_SIZE);
+ copy_page(d, s);
kunmap(rbio->bio_pages[i]);
kunmap(rbio->stripe_pages[i]);
@@ -516,32 +507,21 @@ static void run_xor(void **pages, int src_cnt, ssize_t len)
}
/*
- * returns true if the bio list inside this rbio
- * covers an entire stripe (no rmw required).
- * Must be called with the bio list lock held, or
- * at a time when you know it is impossible to add
- * new bios into the list
+ * Returns true if the bio list inside this rbio covers an entire stripe (no
+ * rmw required).
*/
-static int __rbio_is_full(struct btrfs_raid_bio *rbio)
+static int rbio_is_full(struct btrfs_raid_bio *rbio)
{
+ unsigned long flags;
unsigned long size = rbio->bio_list_bytes;
int ret = 1;
+ spin_lock_irqsave(&rbio->bio_list_lock, flags);
if (size != rbio->nr_data * rbio->stripe_len)
ret = 0;
-
BUG_ON(size > rbio->nr_data * rbio->stripe_len);
- return ret;
-}
-
-static int rbio_is_full(struct btrfs_raid_bio *rbio)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&rbio->bio_list_lock, flags);
- ret = __rbio_is_full(rbio);
spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+
return ret;
}
@@ -812,16 +792,16 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
spin_unlock_irqrestore(&h->lock, flags);
if (next->operation == BTRFS_RBIO_READ_REBUILD)
- async_read_rebuild(next);
+ start_async_work(next, read_rebuild_work);
else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
steal_rbio(rbio, next);
- async_read_rebuild(next);
+ start_async_work(next, read_rebuild_work);
} else if (next->operation == BTRFS_RBIO_WRITE) {
steal_rbio(rbio, next);
- async_rmw_stripe(next);
+ start_async_work(next, rmw_work);
} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
steal_rbio(rbio, next);
- async_scrub_parity(next);
+ start_async_work(next, scrub_parity_work);
}
goto done_nolock;
@@ -1275,7 +1255,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
pointers);
} else {
/* raid5 */
- memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+ copy_page(pointers[nr_data], pointers[0]);
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
@@ -1343,7 +1323,7 @@ write_data:
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE;
submit_bio(bio);
}
@@ -1508,20 +1488,6 @@ cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
}
-static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
-{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
- btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
-}
-
-static void async_read_rebuild(struct btrfs_raid_bio *rbio)
-{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper,
- read_rebuild_work, NULL, NULL);
-
- btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
-}
-
/*
* the stripe must be locked by the caller. It will
* unlock after all the writes are done
@@ -1599,7 +1565,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid_rmw_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
@@ -1652,7 +1618,7 @@ static int partial_stripe_write(struct btrfs_raid_bio *rbio)
ret = lock_stripe_add(rbio);
if (ret == 0)
- async_rmw_stripe(rbio);
+ start_async_work(rbio, rmw_work);
return 0;
}
@@ -1720,8 +1686,11 @@ static void run_plug(struct btrfs_plug_cb *plug)
list_del_init(&cur->plug_list);
if (rbio_is_full(cur)) {
+ int ret;
+
/* we have a full stripe, send it down */
- full_stripe_write(cur);
+ ret = full_stripe_write(cur);
+ BUG_ON(ret);
continue;
}
if (last) {
@@ -1941,9 +1910,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
BUG_ON(failb != -1);
pstripe:
/* Copy parity block into failed block to start with */
- memcpy(pointers[faila],
- pointers[rbio->nr_data],
- PAGE_SIZE);
+ copy_page(pointers[faila], pointers[rbio->nr_data]);
/* rearrange the pointer array */
p = pointers[faila];
@@ -2145,7 +2112,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid_recover_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
@@ -2448,7 +2415,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
pointers);
} else {
/* raid5 */
- memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+ copy_page(pointers[nr_data], pointers[0]);
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
@@ -2456,7 +2423,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
parity = kmap(p);
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
- memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
+ copy_page(parity, pointers[rbio->scrubp]);
else
/* Parity is right, needn't writeback */
bitmap_clear(rbio->dbitmap, pagenr, 1);
@@ -2517,7 +2484,7 @@ submit_write:
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE;
submit_bio(bio);
}
@@ -2699,7 +2666,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid56_parity_scrub_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
@@ -2728,18 +2695,10 @@ static void scrub_parity_work(struct btrfs_work *work)
raid56_parity_scrub_stripe(rbio);
}
-static void async_scrub_parity(struct btrfs_raid_bio *rbio)
-{
- btrfs_init_work(&rbio->work, btrfs_rmw_helper,
- scrub_parity_work, NULL, NULL);
-
- btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
-}
-
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
{
if (!lock_stripe_add(rbio))
- async_scrub_parity(rbio);
+ start_async_work(rbio, scrub_parity_work);
}
/* The following code is used for dev replace of a missing RAID 5/6 device. */
@@ -2781,5 +2740,5 @@ raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
{
if (!lock_stripe_add(rbio))
- async_read_rebuild(rbio);
+ start_async_work(rbio, read_rebuild_work);
}
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 40f1bcef394d..dec14b739b10 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -7,7 +7,6 @@
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
-#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "ctree.h"
@@ -355,7 +354,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
dev = bbio->stripes[nzones].dev;
/* cannot read ahead on missing device. */
- if (!dev->bdev)
+ if (!dev->bdev)
continue;
zone = reada_find_zone(dev, logical, bbio);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 879b76fa881a..8783a1776540 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -586,29 +586,6 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
return btrfs_get_fs_root(fs_info, &key, false);
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
-static noinline_for_stack
-struct btrfs_root *find_tree_root(struct reloc_control *rc,
- struct extent_buffer *leaf,
- struct btrfs_extent_ref_v0 *ref0)
-{
- struct btrfs_root *root;
- u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
- u64 generation = btrfs_ref_generation_v0(leaf, ref0);
-
- BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID);
-
- root = read_fs_root(rc->extent_root->fs_info, root_objectid);
- BUG_ON(IS_ERR(root));
-
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
- generation != btrfs_root_generation(&root->root_item))
- return NULL;
-
- return root;
-}
-#endif
-
static noinline_for_stack
int find_inline_backref(struct extent_buffer *leaf, int slot,
unsigned long *ptr, unsigned long *end)
@@ -621,12 +598,11 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
btrfs_item_key_to_cpu(leaf, &key, slot);
item_size = btrfs_item_size_nr(leaf, slot);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
if (item_size < sizeof(*ei)) {
- WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
+ btrfs_print_v0_err(leaf->fs_info);
+ btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
return 1;
}
-#endif
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
WARN_ON(!(btrfs_extent_flags(leaf, ei) &
BTRFS_EXTENT_FLAG_TREE_BLOCK));
@@ -792,7 +768,7 @@ again:
type = btrfs_get_extent_inline_ref_type(eb, iref,
BTRFS_REF_TYPE_BLOCK);
if (type == BTRFS_REF_TYPE_INVALID) {
- err = -EINVAL;
+ err = -EUCLEAN;
goto out;
}
key.type = type;
@@ -811,29 +787,7 @@ again:
goto next;
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
- key.type == BTRFS_EXTENT_REF_V0_KEY) {
- if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
- struct btrfs_extent_ref_v0 *ref0;
- ref0 = btrfs_item_ptr(eb, path1->slots[0],
- struct btrfs_extent_ref_v0);
- if (key.objectid == key.offset) {
- root = find_tree_root(rc, eb, ref0);
- if (root && !should_ignore_root(root))
- cur->root = root;
- else
- list_add(&cur->list, &useless);
- break;
- }
- if (is_cowonly_root(btrfs_ref_root_v0(eb,
- ref0)))
- cur->cowonly = 1;
- }
-#else
- ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
-#endif
if (key.objectid == key.offset) {
/*
* only root blocks of reloc trees use
@@ -876,6 +830,12 @@ again:
edge->node[UPPER] = upper;
goto next;
+ } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+ err = -EINVAL;
+ btrfs_print_v0_err(rc->extent_root->fs_info);
+ btrfs_handle_fs_error(rc->extent_root->fs_info, err,
+ NULL);
+ goto out;
} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
goto next;
}
@@ -1321,18 +1281,19 @@ static void __del_reloc_root(struct btrfs_root *root)
struct mapping_node *node = NULL;
struct reloc_control *rc = fs_info->reloc_ctl;
- spin_lock(&rc->reloc_root_tree.lock);
- rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->node->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct mapping_node, rb_node);
- rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+ if (rc) {
+ spin_lock(&rc->reloc_root_tree.lock);
+ rb_node = tree_search(&rc->reloc_root_tree.rb_root,
+ root->node->start);
+ if (rb_node) {
+ node = rb_entry(rb_node, struct mapping_node, rb_node);
+ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+ }
+ spin_unlock(&rc->reloc_root_tree.lock);
+ if (!node)
+ return;
+ BUG_ON((struct btrfs_root *)node->data != root);
}
- spin_unlock(&rc->reloc_root_tree.lock);
-
- if (!node)
- return;
- BUG_ON((struct btrfs_root *)node->data != root);
spin_lock(&fs_info->trans_lock);
list_del_init(&root->root_list);
@@ -1918,13 +1879,12 @@ again:
* and tree block numbers, if current trans doesn't free
* data reloc tree inode.
*/
- ret = btrfs_qgroup_trace_subtree(trans, src, parent,
+ ret = btrfs_qgroup_trace_subtree(trans, parent,
btrfs_header_generation(parent),
btrfs_header_level(parent));
if (ret < 0)
break;
- ret = btrfs_qgroup_trace_subtree(trans, dest,
- path->nodes[level],
+ ret = btrfs_qgroup_trace_subtree(trans, path->nodes[level],
btrfs_header_generation(path->nodes[level]),
btrfs_header_level(path->nodes[level]));
if (ret < 0)
@@ -3333,48 +3293,6 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
return 0;
}
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
-static int get_ref_objectid_v0(struct reloc_control *rc,
- struct btrfs_path *path,
- struct btrfs_key *extent_key,
- u64 *ref_objectid, int *path_change)
-{
- struct btrfs_key key;
- struct extent_buffer *leaf;
- struct btrfs_extent_ref_v0 *ref0;
- int ret;
- int slot;
-
- leaf = path->nodes[0];
- slot = path->slots[0];
- while (1) {
- if (slot >= btrfs_header_nritems(leaf)) {
- ret = btrfs_next_leaf(rc->extent_root, path);
- if (ret < 0)
- return ret;
- BUG_ON(ret > 0);
- leaf = path->nodes[0];
- slot = path->slots[0];
- if (path_change)
- *path_change = 1;
- }
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != extent_key->objectid)
- return -ENOENT;
-
- if (key.type != BTRFS_EXTENT_REF_V0_KEY) {
- slot++;
- continue;
- }
- ref0 = btrfs_item_ptr(leaf, slot,
- struct btrfs_extent_ref_v0);
- *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
- break;
- }
- return 0;
-}
-#endif
-
/*
* helper to add a tree block to the list.
* the major work is getting the generation and level of the block
@@ -3407,23 +3325,12 @@ static int add_tree_block(struct reloc_control *rc,
level = (int)extent_key->offset;
}
generation = btrfs_extent_generation(eb, ei);
+ } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
+ btrfs_print_v0_err(eb->fs_info);
+ btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
+ return -EINVAL;
} else {
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- u64 ref_owner;
- int ret;
-
- BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0));
- ret = get_ref_objectid_v0(rc, path, extent_key,
- &ref_owner, NULL);
- if (ret < 0)
- return ret;
- BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
- level = (int)ref_owner;
- /* FIXME: get real generation */
- generation = 0;
-#else
BUG();
-#endif
}
btrfs_release_path(path);
@@ -3563,11 +3470,8 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
- if (IS_ERR(inode) || is_bad_inode(inode)) {
- if (!IS_ERR(inode))
- iput(inode);
+ if (IS_ERR(inode))
return -ENOENT;
- }
truncate:
ret = btrfs_check_trunc_cache_free_space(fs_info,
@@ -3781,12 +3685,7 @@ int add_data_references(struct reloc_control *rc,
eb = path->nodes[0];
ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (ptr + sizeof(struct btrfs_extent_item_v0) == end)
- ptr = end;
- else
-#endif
- ptr += sizeof(struct btrfs_extent_item);
+ ptr += sizeof(struct btrfs_extent_item);
while (ptr < end) {
iref = (struct btrfs_extent_inline_ref *)ptr;
@@ -3801,7 +3700,7 @@ int add_data_references(struct reloc_control *rc,
ret = find_data_references(rc, extent_key,
eb, dref, blocks);
} else {
- ret = -EINVAL;
+ ret = -EUCLEAN;
btrfs_err(rc->extent_root->fs_info,
"extent %llu slot %d has an invalid inline ref type",
eb->start, path->slots[0]);
@@ -3832,13 +3731,7 @@ int add_data_references(struct reloc_control *rc,
if (key.objectid != extent_key->objectid)
break;
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- if (key.type == BTRFS_SHARED_DATA_REF_KEY ||
- key.type == BTRFS_EXTENT_REF_V0_KEY) {
-#else
- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
-#endif
ret = __add_tree_block(rc, key.offset, blocksize,
blocks);
} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
@@ -3846,6 +3739,10 @@ int add_data_references(struct reloc_control *rc,
struct btrfs_extent_data_ref);
ret = find_data_references(rc, extent_key,
eb, dref, blocks);
+ } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+ btrfs_print_v0_err(eb->fs_info);
+ btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
+ ret = -EINVAL;
} else {
ret = 0;
}
@@ -4084,41 +3981,13 @@ restart:
flags = btrfs_extent_flags(path->nodes[0], ei);
ret = check_extent_flags(flags);
BUG_ON(ret);
-
+ } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
+ err = -EINVAL;
+ btrfs_print_v0_err(trans->fs_info);
+ btrfs_abort_transaction(trans, err);
+ break;
} else {
-#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
- u64 ref_owner;
- int path_change = 0;
-
- BUG_ON(item_size !=
- sizeof(struct btrfs_extent_item_v0));
- ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
- &path_change);
- if (ret < 0) {
- err = ret;
- break;
- }
- if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
- flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
- else
- flags = BTRFS_EXTENT_FLAG_DATA;
-
- if (path_change) {
- btrfs_release_path(path);
-
- path->search_commit_root = 1;
- path->skip_locking = 1;
- ret = btrfs_search_slot(NULL, rc->extent_root,
- &key, path, 0, 0);
- if (ret < 0) {
- err = ret;
- break;
- }
- BUG_ON(ret > 0);
- }
-#else
BUG();
-#endif
}
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
@@ -4169,8 +4038,7 @@ restart:
}
}
if (trans && progress && err == -ENOSPC) {
- ret = btrfs_force_chunk_alloc(trans, fs_info,
- rc->block_group->flags);
+ ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
if (ret == 1) {
err = 0;
progress = 0;
@@ -4284,7 +4152,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
- BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
+ BUG_ON(IS_ERR(inode));
BTRFS_I(inode)->index_cnt = group->key.objectid;
err = btrfs_orphan_add(trans, BTRFS_I(inode));
@@ -4375,7 +4243,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
BUG_ON(!rc->block_group);
- ret = btrfs_inc_block_group_ro(fs_info, rc->block_group);
+ ret = btrfs_inc_block_group_ro(rc->block_group);
if (ret) {
err = ret;
goto out;
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index c451285976ac..65bda0682928 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -320,9 +320,9 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
/* drop the root item for 'key' from the tree root */
int btrfs_del_root(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, const struct btrfs_key *key)
+ const struct btrfs_key *key)
{
- struct btrfs_root *root = fs_info->tree_root;
+ struct btrfs_root *root = trans->fs_info->tree_root;
struct btrfs_path *path;
int ret;
@@ -341,13 +341,12 @@ out:
return ret;
}
-int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
- const char *name, int name_len)
+int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 *sequence, const char *name,
+ int name_len)
{
- struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *tree_root = trans->fs_info->tree_root;
struct btrfs_path *path;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
@@ -413,12 +412,11 @@ out:
*
* Will return 0, -ENOMEM, or anything from the CoW path
*/
-int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
- const char *name, int name_len)
+int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
+ u64 ref_id, u64 dirid, u64 sequence, const char *name,
+ int name_len)
{
- struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *tree_root = trans->fs_info->tree_root;
struct btrfs_key key;
int ret;
struct btrfs_path *path;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6702896cdb8f..3be1456b5116 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -188,32 +188,6 @@ struct scrub_ctx {
refcount_t refs;
};
-struct scrub_fixup_nodatasum {
- struct scrub_ctx *sctx;
- struct btrfs_device *dev;
- u64 logical;
- struct btrfs_root *root;
- struct btrfs_work work;
- int mirror_num;
-};
-
-struct scrub_nocow_inode {
- u64 inum;
- u64 offset;
- u64 root;
- struct list_head list;
-};
-
-struct scrub_copy_nocow_ctx {
- struct scrub_ctx *sctx;
- u64 logical;
- u64 len;
- int mirror_num;
- u64 physical_for_dev_replace;
- struct list_head inodes;
- struct btrfs_work work;
-};
-
struct scrub_warning {
struct btrfs_path *path;
u64 extent_item_size;
@@ -232,8 +206,6 @@ struct full_stripe_lock {
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
-static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
-static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
struct scrub_block *sblocks_for_recheck);
@@ -277,13 +249,6 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
static void scrub_wr_submit(struct scrub_ctx *sctx);
static void scrub_wr_bio_end_io(struct bio *bio);
static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
-static int write_page_nocow(struct scrub_ctx *sctx,
- u64 physical_for_dev_replace, struct page *page);
-static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
- struct scrub_copy_nocow_ctx *ctx);
-static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
- int mirror_num, u64 physical_for_dev_replace);
-static void copy_nocow_pages_worker(struct btrfs_work *work);
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_put_ctx(struct scrub_ctx *sctx);
@@ -555,60 +520,6 @@ out:
return ret;
}
-/*
- * used for workers that require transaction commits (i.e., for the
- * NOCOW case)
- */
-static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
-{
- struct btrfs_fs_info *fs_info = sctx->fs_info;
-
- refcount_inc(&sctx->refs);
- /*
- * increment scrubs_running to prevent cancel requests from
- * completing as long as a worker is running. we must also
- * increment scrubs_paused to prevent deadlocking on pause
- * requests used for transactions commits (as the worker uses a
- * transaction context). it is safe to regard the worker
- * as paused for all matters practical. effectively, we only
- * avoid cancellation requests from completing.
- */
- mutex_lock(&fs_info->scrub_lock);
- atomic_inc(&fs_info->scrubs_running);
- atomic_inc(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
-
- /*
- * check if @scrubs_running=@scrubs_paused condition
- * inside wait_event() is not an atomic operation.
- * which means we may inc/dec @scrub_running/paused
- * at any time. Let's wake up @scrub_pause_wait as
- * much as we can to let commit transaction blocked less.
- */
- wake_up(&fs_info->scrub_pause_wait);
-
- atomic_inc(&sctx->workers_pending);
-}
-
-/* used for workers that require transaction commits */
-static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
-{
- struct btrfs_fs_info *fs_info = sctx->fs_info;
-
- /*
- * see scrub_pending_trans_workers_inc() why we're pretending
- * to be paused in the scrub counters
- */
- mutex_lock(&fs_info->scrub_lock);
- atomic_dec(&fs_info->scrubs_running);
- atomic_dec(&fs_info->scrubs_paused);
- mutex_unlock(&fs_info->scrub_lock);
- atomic_dec(&sctx->workers_pending);
- wake_up(&fs_info->scrub_pause_wait);
- wake_up(&sctx->list_wait);
- scrub_put_ctx(sctx);
-}
-
static void scrub_free_csums(struct scrub_ctx *sctx)
{
while (!list_empty(&sctx->csum_list)) {
@@ -882,194 +793,6 @@ out:
btrfs_free_path(path);
}
-static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
-{
- struct page *page = NULL;
- unsigned long index;
- struct scrub_fixup_nodatasum *fixup = fixup_ctx;
- int ret;
- int corrected = 0;
- struct btrfs_key key;
- struct inode *inode = NULL;
- struct btrfs_fs_info *fs_info;
- u64 end = offset + PAGE_SIZE - 1;
- struct btrfs_root *local_root;
- int srcu_index;
-
- key.objectid = root;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- fs_info = fixup->root->fs_info;
- srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
-
- local_root = btrfs_read_fs_root_no_name(fs_info, &key);
- if (IS_ERR(local_root)) {
- srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
- return PTR_ERR(local_root);
- }
-
- key.type = BTRFS_INODE_ITEM_KEY;
- key.objectid = inum;
- key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
- srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- index = offset >> PAGE_SHIFT;
-
- page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
- if (!page) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (PageUptodate(page)) {
- if (PageDirty(page)) {
- /*
- * we need to write the data to the defect sector. the
- * data that was in that sector is not in memory,
- * because the page was modified. we must not write the
- * modified page to that sector.
- *
- * TODO: what could be done here: wait for the delalloc
- * runner to write out that page (might involve
- * COW) and see whether the sector is still
- * referenced afterwards.
- *
- * For the meantime, we'll treat this error
- * incorrectable, although there is a chance that a
- * later scrub will find the bad sector again and that
- * there's no dirty page in memory, then.
- */
- ret = -EIO;
- goto out;
- }
- ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
- fixup->logical, page,
- offset - page_offset(page),
- fixup->mirror_num);
- unlock_page(page);
- corrected = !ret;
- } else {
- /*
- * we need to get good data first. the general readpage path
- * will call repair_io_failure for us, we just have to make
- * sure we read the bad mirror.
- */
- ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
- EXTENT_DAMAGED);
- if (ret) {
- /* set_extent_bits should give proper error */
- WARN_ON(ret > 0);
- if (ret > 0)
- ret = -EFAULT;
- goto out;
- }
-
- ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
- btrfs_get_extent,
- fixup->mirror_num);
- wait_on_page_locked(page);
-
- corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
- end, EXTENT_DAMAGED, 0, NULL);
- if (!corrected)
- clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
- EXTENT_DAMAGED);
- }
-
-out:
- if (page)
- put_page(page);
-
- iput(inode);
-
- if (ret < 0)
- return ret;
-
- if (ret == 0 && corrected) {
- /*
- * we only need to call readpage for one of the inodes belonging
- * to this extent. so make iterate_extent_inodes stop
- */
- return 1;
- }
-
- return -EIO;
-}
-
-static void scrub_fixup_nodatasum(struct btrfs_work *work)
-{
- struct btrfs_fs_info *fs_info;
- int ret;
- struct scrub_fixup_nodatasum *fixup;
- struct scrub_ctx *sctx;
- struct btrfs_trans_handle *trans = NULL;
- struct btrfs_path *path;
- int uncorrectable = 0;
-
- fixup = container_of(work, struct scrub_fixup_nodatasum, work);
- sctx = fixup->sctx;
- fs_info = fixup->root->fs_info;
-
- path = btrfs_alloc_path();
- if (!path) {
- spin_lock(&sctx->stat_lock);
- ++sctx->stat.malloc_errors;
- spin_unlock(&sctx->stat_lock);
- uncorrectable = 1;
- goto out;
- }
-
- trans = btrfs_join_transaction(fixup->root);
- if (IS_ERR(trans)) {
- uncorrectable = 1;
- goto out;
- }
-
- /*
- * the idea is to trigger a regular read through the standard path. we
- * read a page from the (failed) logical address by specifying the
- * corresponding copynum of the failed sector. thus, that readpage is
- * expected to fail.
- * that is the point where on-the-fly error correction will kick in
- * (once it's finished) and rewrite the failed sector if a good copy
- * can be found.
- */
- ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
- scrub_fixup_readpage, fixup, false);
- if (ret < 0) {
- uncorrectable = 1;
- goto out;
- }
- WARN_ON(ret != 1);
-
- spin_lock(&sctx->stat_lock);
- ++sctx->stat.corrected_errors;
- spin_unlock(&sctx->stat_lock);
-
-out:
- if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans);
- if (uncorrectable) {
- spin_lock(&sctx->stat_lock);
- ++sctx->stat.uncorrectable_errors;
- spin_unlock(&sctx->stat_lock);
- btrfs_dev_replace_stats_inc(
- &fs_info->dev_replace.num_uncorrectable_read_errors);
- btrfs_err_rl_in_rcu(fs_info,
- "unable to fixup (nodatasum) error at logical %llu on dev %s",
- fixup->logical, rcu_str_deref(fixup->dev->name));
- }
-
- btrfs_free_path(path);
- kfree(fixup);
-
- scrub_pending_trans_workers_dec(sctx);
-}
-
static inline void scrub_get_recover(struct scrub_recover *recover)
{
refcount_inc(&recover->refs);
@@ -1264,42 +987,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
}
/*
- * NOTE: Even for nodatasum case, it's still possible that it's a
- * compressed data extent, thus scrub_fixup_nodatasum(), which write
- * inode page cache onto disk, could cause serious data corruption.
- *
- * So here we could only read from disk, and hope our recovery could
- * reach disk before the newer write.
- */
- if (0 && !is_metadata && !have_csum) {
- struct scrub_fixup_nodatasum *fixup_nodatasum;
-
- WARN_ON(sctx->is_dev_replace);
-
- /*
- * !is_metadata and !have_csum, this means that the data
- * might not be COWed, that it might be modified
- * concurrently. The general strategy to work on the
- * commit root does not help in the case when COW is not
- * used.
- */
- fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
- if (!fixup_nodatasum)
- goto did_not_correct_error;
- fixup_nodatasum->sctx = sctx;
- fixup_nodatasum->dev = dev;
- fixup_nodatasum->logical = logical;
- fixup_nodatasum->root = fs_info->extent_root;
- fixup_nodatasum->mirror_num = failed_mirror_index + 1;
- scrub_pending_trans_workers_inc(sctx);
- btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
- scrub_fixup_nodatasum, NULL, NULL);
- btrfs_queue_work(fs_info->scrub_workers,
- &fixup_nodatasum->work);
- goto out;
- }
-
- /*
* now build and submit the bios for the other mirrors, check
* checksums.
* First try to pick the mirror which is completely without I/O
@@ -1866,7 +1553,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
bio = btrfs_io_bio_alloc(1);
bio_set_dev(bio, page_bad->dev->bdev);
bio->bi_iter.bi_sector = page_bad->physical >> 9;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE;
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
if (PAGE_SIZE != ret) {
@@ -1961,7 +1648,7 @@ again:
bio->bi_end_io = scrub_wr_bio_end_io;
bio_set_dev(bio, sbio->dev->bdev);
bio->bi_iter.bi_sector = sbio->physical >> 9;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE;
sbio->status = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical_for_dev_replace ||
@@ -2361,7 +2048,7 @@ again:
bio->bi_end_io = scrub_bio_end_io;
bio_set_dev(bio, sbio->dev->bdev);
bio->bi_iter.bi_sector = sbio->physical >> 9;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_opf = REQ_OP_READ;
sbio->status = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical ||
@@ -2800,17 +2487,10 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
have_csum = scrub_find_csum(sctx, logical, csum);
if (have_csum == 0)
++sctx->stat.no_csum;
- if (0 && sctx->is_dev_replace && !have_csum) {
- ret = copy_nocow_pages(sctx, logical, l,
- mirror_num,
- physical_for_dev_replace);
- goto behind_scrub_pages;
- }
}
ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
mirror_num, have_csum ? csum : NULL, 0,
physical_for_dev_replace);
-behind_scrub_pages:
if (ret)
return ret;
len -= l;
@@ -3863,7 +3543,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* -> btrfs_scrub_pause()
*/
scrub_pause_on(fs_info);
- ret = btrfs_inc_block_group_ro(fs_info, cache);
+ ret = btrfs_inc_block_group_ro(cache);
if (!ret && is_dev_replace) {
/*
* If we are doing a device replace wait for any tasks
@@ -3982,14 +3662,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (!cache->removed && !cache->ro && cache->reserved == 0 &&
btrfs_block_group_used(&cache->item) == 0) {
spin_unlock(&cache->lock);
- spin_lock(&fs_info->unused_bgs_lock);
- if (list_empty(&cache->bg_list)) {
- btrfs_get_block_group(cache);
- trace_btrfs_add_unused_block_group(cache);
- list_add_tail(&cache->bg_list,
- &fs_info->unused_bgs);
- }
- spin_unlock(&fs_info->unused_bgs_lock);
+ btrfs_mark_bg_unused(cache);
} else {
spin_unlock(&cache->lock);
}
@@ -4072,10 +3745,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
if (!fs_info->scrub_wr_completion_workers)
goto fail_scrub_wr_completion_workers;
- fs_info->scrub_nocow_workers =
- btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
- if (!fs_info->scrub_nocow_workers)
- goto fail_scrub_nocow_workers;
fs_info->scrub_parity_workers =
btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
max_active, 2);
@@ -4086,8 +3755,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
return 0;
fail_scrub_parity_workers:
- btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
-fail_scrub_nocow_workers:
btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
fail_scrub_wr_completion_workers:
btrfs_destroy_workqueue(fs_info->scrub_workers);
@@ -4100,7 +3767,6 @@ static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
if (--fs_info->scrub_workers_refcnt == 0) {
btrfs_destroy_workqueue(fs_info->scrub_workers);
btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
- btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
}
WARN_ON(fs_info->scrub_workers_refcnt < 0);
@@ -4113,7 +3779,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
struct scrub_ctx *sctx;
int ret;
struct btrfs_device *dev;
- struct rcu_string *name;
if (btrfs_fs_closing(fs_info))
return -EINVAL;
@@ -4167,11 +3832,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (!is_dev_replace && !readonly &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- rcu_read_lock();
- name = rcu_dereference(dev->name);
- btrfs_err(fs_info, "scrub: device %s is not writable",
- name->str);
- rcu_read_unlock();
+ btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
+ rcu_str_deref(dev->name));
return -EROFS;
}
@@ -4359,330 +4021,3 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
*extent_dev = bbio->stripes[0].dev;
btrfs_put_bbio(bbio);
}
-
-static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
- int mirror_num, u64 physical_for_dev_replace)
-{
- struct scrub_copy_nocow_ctx *nocow_ctx;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
-
- nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
- if (!nocow_ctx) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- return -ENOMEM;
- }
-
- scrub_pending_trans_workers_inc(sctx);
-
- nocow_ctx->sctx = sctx;
- nocow_ctx->logical = logical;
- nocow_ctx->len = len;
- nocow_ctx->mirror_num = mirror_num;
- nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
- btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
- copy_nocow_pages_worker, NULL, NULL);
- INIT_LIST_HEAD(&nocow_ctx->inodes);
- btrfs_queue_work(fs_info->scrub_nocow_workers,
- &nocow_ctx->work);
-
- return 0;
-}
-
-static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
-{
- struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
- struct scrub_nocow_inode *nocow_inode;
-
- nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
- if (!nocow_inode)
- return -ENOMEM;
- nocow_inode->inum = inum;
- nocow_inode->offset = offset;
- nocow_inode->root = root;
- list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
- return 0;
-}
-
-#define COPY_COMPLETE 1
-
-static void copy_nocow_pages_worker(struct btrfs_work *work)
-{
- struct scrub_copy_nocow_ctx *nocow_ctx =
- container_of(work, struct scrub_copy_nocow_ctx, work);
- struct scrub_ctx *sctx = nocow_ctx->sctx;
- struct btrfs_fs_info *fs_info = sctx->fs_info;
- struct btrfs_root *root = fs_info->extent_root;
- u64 logical = nocow_ctx->logical;
- u64 len = nocow_ctx->len;
- int mirror_num = nocow_ctx->mirror_num;
- u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
- int ret;
- struct btrfs_trans_handle *trans = NULL;
- struct btrfs_path *path;
- int not_written = 0;
-
- path = btrfs_alloc_path();
- if (!path) {
- spin_lock(&sctx->stat_lock);
- sctx->stat.malloc_errors++;
- spin_unlock(&sctx->stat_lock);
- not_written = 1;
- goto out;
- }
-
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans)) {
- not_written = 1;
- goto out;
- }
-
- ret = iterate_inodes_from_logical(logical, fs_info, path,
- record_inode_for_nocow, nocow_ctx, false);
- if (ret != 0 && ret != -ENOENT) {
- btrfs_warn(fs_info,
- "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
- logical, physical_for_dev_replace, len, mirror_num,
- ret);
- not_written = 1;
- goto out;
- }
-
- btrfs_end_transaction(trans);
- trans = NULL;
- while (!list_empty(&nocow_ctx->inodes)) {
- struct scrub_nocow_inode *entry;
- entry = list_first_entry(&nocow_ctx->inodes,
- struct scrub_nocow_inode,
- list);
- list_del_init(&entry->list);
- ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
- entry->root, nocow_ctx);
- kfree(entry);
- if (ret == COPY_COMPLETE) {
- ret = 0;
- break;
- } else if (ret) {
- break;
- }
- }
-out:
- while (!list_empty(&nocow_ctx->inodes)) {
- struct scrub_nocow_inode *entry;
- entry = list_first_entry(&nocow_ctx->inodes,
- struct scrub_nocow_inode,
- list);
- list_del_init(&entry->list);
- kfree(entry);
- }
- if (trans && !IS_ERR(trans))
- btrfs_end_transaction(trans);
- if (not_written)
- btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
- num_uncorrectable_read_errors);
-
- btrfs_free_path(path);
- kfree(nocow_ctx);
-
- scrub_pending_trans_workers_dec(sctx);
-}
-
-static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
- u64 logical)
-{
- struct extent_state *cached_state = NULL;
- struct btrfs_ordered_extent *ordered;
- struct extent_io_tree *io_tree;
- struct extent_map *em;
- u64 lockstart = start, lockend = start + len - 1;
- int ret = 0;
-
- io_tree = &inode->io_tree;
-
- lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
- ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
- if (ordered) {
- btrfs_put_ordered_extent(ordered);
- ret = 1;
- goto out_unlock;
- }
-
- em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- goto out_unlock;
- }
-
- /*
- * This extent does not actually cover the logical extent anymore,
- * move on to the next inode.
- */
- if (em->block_start > logical ||
- em->block_start + em->block_len < logical + len ||
- test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
- free_extent_map(em);
- ret = 1;
- goto out_unlock;
- }
- free_extent_map(em);
-
-out_unlock:
- unlock_extent_cached(io_tree, lockstart, lockend, &cached_state);
- return ret;
-}
-
-static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
- struct scrub_copy_nocow_ctx *nocow_ctx)
-{
- struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
- struct btrfs_key key;
- struct inode *inode;
- struct page *page;
- struct btrfs_root *local_root;
- struct extent_io_tree *io_tree;
- u64 physical_for_dev_replace;
- u64 nocow_ctx_logical;
- u64 len = nocow_ctx->len;
- unsigned long index;
- int srcu_index;
- int ret = 0;
- int err = 0;
-
- key.objectid = root;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
-
- local_root = btrfs_read_fs_root_no_name(fs_info, &key);
- if (IS_ERR(local_root)) {
- srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
- return PTR_ERR(local_root);
- }
-
- key.type = BTRFS_INODE_ITEM_KEY;
- key.objectid = inum;
- key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
- srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
- /* Avoid truncate/dio/punch hole.. */
- inode_lock(inode);
- inode_dio_wait(inode);
-
- physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
- io_tree = &BTRFS_I(inode)->io_tree;
- nocow_ctx_logical = nocow_ctx->logical;
-
- ret = check_extent_to_block(BTRFS_I(inode), offset, len,
- nocow_ctx_logical);
- if (ret) {
- ret = ret > 0 ? 0 : ret;
- goto out;
- }
-
- while (len >= PAGE_SIZE) {
- index = offset >> PAGE_SHIFT;
-again:
- page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
- if (!page) {
- btrfs_err(fs_info, "find_or_create_page() failed");
- ret = -ENOMEM;
- goto out;
- }
-
- if (PageUptodate(page)) {
- if (PageDirty(page))
- goto next_page;
- } else {
- ClearPageError(page);
- err = extent_read_full_page(io_tree, page,
- btrfs_get_extent,
- nocow_ctx->mirror_num);
- if (err) {
- ret = err;
- goto next_page;
- }
-
- lock_page(page);
- /*
- * If the page has been remove from the page cache,
- * the data on it is meaningless, because it may be
- * old one, the new data may be written into the new
- * page in the page cache.
- */
- if (page->mapping != inode->i_mapping) {
- unlock_page(page);
- put_page(page);
- goto again;
- }
- if (!PageUptodate(page)) {
- ret = -EIO;
- goto next_page;
- }
- }
-
- ret = check_extent_to_block(BTRFS_I(inode), offset, len,
- nocow_ctx_logical);
- if (ret) {
- ret = ret > 0 ? 0 : ret;
- goto next_page;
- }
-
- err = write_page_nocow(nocow_ctx->sctx,
- physical_for_dev_replace, page);
- if (err)
- ret = err;
-next_page:
- unlock_page(page);
- put_page(page);
-
- if (ret)
- break;
-
- offset += PAGE_SIZE;
- physical_for_dev_replace += PAGE_SIZE;
- nocow_ctx_logical += PAGE_SIZE;
- len -= PAGE_SIZE;
- }
- ret = COPY_COMPLETE;
-out:
- inode_unlock(inode);
- iput(inode);
- return ret;
-}
-
-static int write_page_nocow(struct scrub_ctx *sctx,
- u64 physical_for_dev_replace, struct page *page)
-{
- struct bio *bio;
- struct btrfs_device *dev;
-
- dev = sctx->wr_tgtdev;
- if (!dev)
- return -EIO;
- if (!dev->bdev) {
- btrfs_warn_rl(dev->fs_info,
- "scrub write_page_nocow(bdev == NULL) is unexpected");
- return -EIO;
- }
- bio = btrfs_io_bio_alloc(1);
- bio->bi_iter.bi_size = 0;
- bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
- bio_set_dev(bio, dev->bdev);
- bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
- /* bio_add_page won't fail on a freshly allocated bio */
- bio_add_page(bio, page, PAGE_SIZE, 0);
-
- if (btrfsic_submit_bio_wait(bio)) {
- bio_put(bio);
- btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
- return -EIO;
- }
-
- bio_put(bio);
- return 0;
-}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index c47f62b19226..ba8950bfd9c7 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -100,6 +100,7 @@ struct send_ctx {
u64 cur_inode_rdev;
u64 cur_inode_last_extent;
u64 cur_inode_next_write_offset;
+ bool ignore_cur_inode;
u64 send_progress;
@@ -1500,7 +1501,7 @@ static int read_symlink(struct btrfs_root *root,
BUG_ON(compression);
off = btrfs_file_extent_inline_start(ei);
- len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
+ len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
@@ -5006,6 +5007,15 @@ static int send_hole(struct send_ctx *sctx, u64 end)
u64 len;
int ret = 0;
+ /*
+ * A hole that starts at EOF or beyond it. Since we do not yet support
+ * fallocate (for extent preallocation and hole punching), sending a
+ * write of zeroes starting at EOF or beyond would later require issuing
+ * a truncate operation which would undo the write and achieve nothing.
+ */
+ if (offset >= sctx->cur_inode_size)
+ return 0;
+
if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
return send_update_extent(sctx, offset, end - offset);
@@ -5160,7 +5170,7 @@ static int clone_range(struct send_ctx *sctx,
ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
type = btrfs_file_extent_type(leaf, ei);
if (type == BTRFS_FILE_EXTENT_INLINE) {
- ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
+ ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
ext_len = PAGE_ALIGN(ext_len);
} else {
ext_len = btrfs_file_extent_num_bytes(leaf, ei);
@@ -5236,8 +5246,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(path->nodes[0], ei);
if (type == BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(path->nodes[0],
- path->slots[0], ei);
+ len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
/*
* it is possible the inline item won't cover the whole page,
* but there may be items after this page. Make
@@ -5375,7 +5384,7 @@ static int is_extent_unchanged(struct send_ctx *sctx,
}
if (right_type == BTRFS_FILE_EXTENT_INLINE) {
- right_len = btrfs_file_extent_inline_len(eb, slot, ei);
+ right_len = btrfs_file_extent_ram_bytes(eb, ei);
right_len = PAGE_ALIGN(right_len);
} else {
right_len = btrfs_file_extent_num_bytes(eb, ei);
@@ -5496,8 +5505,7 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(path->nodes[0], fi);
if (type == BTRFS_FILE_EXTENT_INLINE) {
- u64 size = btrfs_file_extent_inline_len(path->nodes[0],
- path->slots[0], fi);
+ u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
extent_end = ALIGN(key.offset + size,
sctx->send_root->fs_info->sectorsize);
} else {
@@ -5560,7 +5568,7 @@ static int range_is_hole_in_parent(struct send_ctx *sctx,
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
- u64 size = btrfs_file_extent_inline_len(leaf, slot, fi);
+ u64 size = btrfs_file_extent_ram_bytes(leaf, fi);
extent_end = ALIGN(key.offset + size,
root->fs_info->sectorsize);
@@ -5606,8 +5614,7 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(path->nodes[0], fi);
if (type == BTRFS_FILE_EXTENT_INLINE) {
- u64 size = btrfs_file_extent_inline_len(path->nodes[0],
- path->slots[0], fi);
+ u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
extent_end = ALIGN(key->offset + size,
sctx->send_root->fs_info->sectorsize);
} else {
@@ -5799,6 +5806,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
int pending_move = 0;
int refs_processed = 0;
+ if (sctx->ignore_cur_inode)
+ return 0;
+
ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
&refs_processed);
if (ret < 0)
@@ -5917,6 +5927,93 @@ out:
return ret;
}
+struct parent_paths_ctx {
+ struct list_head *refs;
+ struct send_ctx *sctx;
+};
+
+static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
+ void *ctx)
+{
+ struct parent_paths_ctx *ppctx = ctx;
+
+ return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
+ ppctx->refs);
+}
+
+/*
+ * Issue unlink operations for all paths of the current inode found in the
+ * parent snapshot.
+ */
+static int btrfs_unlink_all_paths(struct send_ctx *sctx)
+{
+ LIST_HEAD(deleted_refs);
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct parent_paths_ctx ctx;
+ int ret;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = sctx->cur_ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ ctx.refs = &deleted_refs;
+ ctx.sctx = sctx;
+
+ while (true) {
+ struct extent_buffer *eb = path->nodes[0];
+ int slot = path->slots[0];
+
+ if (slot >= btrfs_header_nritems(eb)) {
+ ret = btrfs_next_leaf(sctx->parent_root, path);
+ if (ret < 0)
+ goto out;
+ else if (ret > 0)
+ break;
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ if (key.objectid != sctx->cur_ino)
+ break;
+ if (key.type != BTRFS_INODE_REF_KEY &&
+ key.type != BTRFS_INODE_EXTREF_KEY)
+ break;
+
+ ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
+ record_parent_ref, &ctx);
+ if (ret < 0)
+ goto out;
+
+ path->slots[0]++;
+ }
+
+ while (!list_empty(&deleted_refs)) {
+ struct recorded_ref *ref;
+
+ ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
+ ret = send_unlink(sctx, ref->full_path);
+ if (ret < 0)
+ goto out;
+ fs_path_free(ref->full_path);
+ list_del(&ref->list);
+ kfree(ref);
+ }
+ ret = 0;
+out:
+ btrfs_free_path(path);
+ if (ret)
+ __free_recorded_refs(&deleted_refs);
+ return ret;
+}
+
static int changed_inode(struct send_ctx *sctx,
enum btrfs_compare_tree_result result)
{
@@ -5931,6 +6028,7 @@ static int changed_inode(struct send_ctx *sctx,
sctx->cur_inode_new_gen = 0;
sctx->cur_inode_last_extent = (u64)-1;
sctx->cur_inode_next_write_offset = 0;
+ sctx->ignore_cur_inode = false;
/*
* Set send_progress to current inode. This will tell all get_cur_xxx
@@ -5971,6 +6069,33 @@ static int changed_inode(struct send_ctx *sctx,
sctx->cur_inode_new_gen = 1;
}
+ /*
+ * Normally we do not find inodes with a link count of zero (orphans)
+ * because the most common case is to create a snapshot and use it
+ * for a send operation. However other less common use cases involve
+ * using a subvolume and send it after turning it to RO mode just
+ * after deleting all hard links of a file while holding an open
+ * file descriptor against it or turning a RO snapshot into RW mode,
+ * keep an open file descriptor against a file, delete it and then
+ * turn the snapshot back to RO mode before using it for a send
+ * operation. So if we find such cases, ignore the inode and all its
+ * items completely if it's a new inode, or if it's a changed inode
+ * make sure all its previous paths (from the parent snapshot) are all
+ * unlinked and all other the inode items are ignored.
+ */
+ if (result == BTRFS_COMPARE_TREE_NEW ||
+ result == BTRFS_COMPARE_TREE_CHANGED) {
+ u32 nlinks;
+
+ nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
+ if (nlinks == 0) {
+ sctx->ignore_cur_inode = true;
+ if (result == BTRFS_COMPARE_TREE_CHANGED)
+ ret = btrfs_unlink_all_paths(sctx);
+ goto out;
+ }
+ }
+
if (result == BTRFS_COMPARE_TREE_NEW) {
sctx->cur_inode_gen = left_gen;
sctx->cur_inode_new = 1;
@@ -6309,15 +6434,17 @@ static int changed_cb(struct btrfs_path *left_path,
key->objectid == BTRFS_FREE_SPACE_OBJECTID)
goto out;
- if (key->type == BTRFS_INODE_ITEM_KEY)
+ if (key->type == BTRFS_INODE_ITEM_KEY) {
ret = changed_inode(sctx, result);
- else if (key->type == BTRFS_INODE_REF_KEY ||
- key->type == BTRFS_INODE_EXTREF_KEY)
- ret = changed_ref(sctx, result);
- else if (key->type == BTRFS_XATTR_ITEM_KEY)
- ret = changed_xattr(sctx, result);
- else if (key->type == BTRFS_EXTENT_DATA_KEY)
- ret = changed_extent(sctx, result);
+ } else if (!sctx->ignore_cur_inode) {
+ if (key->type == BTRFS_INODE_REF_KEY ||
+ key->type == BTRFS_INODE_EXTREF_KEY)
+ ret = changed_ref(sctx, result);
+ else if (key->type == BTRFS_XATTR_ITEM_KEY)
+ ret = changed_xattr(sctx, result);
+ else if (key->type == BTRFS_EXTENT_DATA_KEY)
+ ret = changed_extent(sctx, result);
+ }
out:
return ret;
@@ -6328,7 +6455,6 @@ static int full_send_tree(struct send_ctx *sctx)
int ret;
struct btrfs_root *send_root = sctx->send_root;
struct btrfs_key key;
- struct btrfs_key found_key;
struct btrfs_path *path;
struct extent_buffer *eb;
int slot;
@@ -6350,17 +6476,13 @@ static int full_send_tree(struct send_ctx *sctx)
while (1) {
eb = path->nodes[0];
slot = path->slots[0];
- btrfs_item_key_to_cpu(eb, &found_key, slot);
+ btrfs_item_key_to_cpu(eb, &key, slot);
- ret = changed_cb(path, NULL, &found_key,
+ ret = changed_cb(path, NULL, &key,
BTRFS_COMPARE_TREE_NEW, sctx);
if (ret < 0)
goto out;
- key.objectid = found_key.objectid;
- key.type = found_key.type;
- key.offset = found_key.offset + 1;
-
ret = btrfs_next_item(send_root, path);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b7b4acb12833..4c13b737f568 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -3,7 +3,6 @@
* Copyright (C) 2007 Oracle. All rights reserved.
*/
-#include <linux/highmem.h>
#include <asm/unaligned.h>
#include "ctree.h"
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 81107ad49f3a..6601c9aa5e35 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -5,7 +5,6 @@
#include <linux/blkdev.h>
#include <linux/module.h>
-#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
@@ -15,8 +14,6 @@
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
-#include <linux/mpage.h>
-#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
@@ -468,9 +465,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_subvolrootid:
case Opt_device:
/*
- * These are parsed by btrfs_parse_subvol_options
- * and btrfs_parse_early_options
- * and can be happily ignored here.
+ * These are parsed by btrfs_parse_subvol_options or
+ * btrfs_parse_device_options and can be ignored here.
*/
break;
case Opt_nodatasum:
@@ -760,6 +756,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_recovery:
btrfs_warn(info,
"'recovery' is deprecated, use 'usebackuproot' instead");
+ /* fall through */
case Opt_usebackuproot:
btrfs_info(info,
"trying to use backup root at mount time");
@@ -885,13 +882,16 @@ out:
* All other options will be parsed on much later in the mount process and
* only when we need to allocate a new super block.
*/
-static int btrfs_parse_early_options(const char *options, fmode_t flags,
- void *holder, struct btrfs_fs_devices **fs_devices)
+static int btrfs_parse_device_options(const char *options, fmode_t flags,
+ void *holder)
{
substring_t args[MAX_OPT_ARGS];
char *device_name, *opts, *orig, *p;
+ struct btrfs_device *device = NULL;
int error = 0;
+ lockdep_assert_held(&uuid_mutex);
+
if (!options)
return 0;
@@ -917,11 +917,13 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
error = -ENOMEM;
goto out;
}
- error = btrfs_scan_one_device(device_name,
- flags, holder, fs_devices);
+ device = btrfs_scan_one_device(device_name, flags,
+ holder);
kfree(device_name);
- if (error)
+ if (IS_ERR(device)) {
+ error = PTR_ERR(device);
goto out;
+ }
}
}
@@ -935,8 +937,8 @@ out:
*
* The value is later passed to mount_subvol()
*/
-static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
- char **subvol_name, u64 *subvol_objectid)
+static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
+ u64 *subvol_objectid)
{
substring_t args[MAX_OPT_ARGS];
char *opts, *orig, *p;
@@ -948,7 +950,7 @@ static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
/*
* strsep changes the string, duplicate it because
- * btrfs_parse_early_options gets called later
+ * btrfs_parse_device_options gets called later
*/
opts = kstrdup(options, GFP_KERNEL);
if (!opts)
@@ -1517,6 +1519,7 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
{
struct block_device *bdev = NULL;
struct super_block *s;
+ struct btrfs_device *device = NULL;
struct btrfs_fs_devices *fs_devices = NULL;
struct btrfs_fs_info *fs_info = NULL;
struct security_mnt_opts new_sec_opts;
@@ -1526,12 +1529,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
- error = btrfs_parse_early_options(data, mode, fs_type,
- &fs_devices);
- if (error) {
- return ERR_PTR(error);
- }
-
security_init_mnt_opts(&new_sec_opts);
if (data) {
error = parse_security_options(data, &new_sec_opts);
@@ -1539,10 +1536,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
return ERR_PTR(error);
}
- error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
- if (error)
- goto error_sec_opts;
-
/*
* Setup a dummy root and fs_info for test/set super. This is because
* we don't actually fill this stuff out until open_ctree, but we need
@@ -1555,8 +1548,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
goto error_sec_opts;
}
- fs_info->fs_devices = fs_devices;
-
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
security_init_mnt_opts(&fs_info->security_opts);
@@ -1565,7 +1556,25 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
goto error_fs_info;
}
+ mutex_lock(&uuid_mutex);
+ error = btrfs_parse_device_options(data, mode, fs_type);
+ if (error) {
+ mutex_unlock(&uuid_mutex);
+ goto error_fs_info;
+ }
+
+ device = btrfs_scan_one_device(device_name, mode, fs_type);
+ if (IS_ERR(device)) {
+ mutex_unlock(&uuid_mutex);
+ error = PTR_ERR(device);
+ goto error_fs_info;
+ }
+
+ fs_devices = device->fs_devices;
+ fs_info->fs_devices = fs_devices;
+
error = btrfs_open_devices(fs_devices, mode, fs_type);
+ mutex_unlock(&uuid_mutex);
if (error)
goto error_fs_info;
@@ -1650,8 +1659,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
- error = btrfs_parse_subvol_options(data, mode,
- &subvol_name, &subvol_objectid);
+ error = btrfs_parse_subvol_options(data, &subvol_name,
+ &subvol_objectid);
if (error) {
kfree(subvol_name);
return ERR_PTR(error);
@@ -2098,14 +2107,9 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
btrfs_account_ro_block_groups_free_space(found);
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
- if (!list_empty(&found->block_groups[i])) {
- switch (i) {
- case BTRFS_RAID_DUP:
- case BTRFS_RAID_RAID1:
- case BTRFS_RAID_RAID10:
- factor = 2;
- }
- }
+ if (!list_empty(&found->block_groups[i]))
+ factor = btrfs_bg_type_to_factor(
+ btrfs_raid_array[i].bg_flag);
}
}
@@ -2222,7 +2226,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct btrfs_ioctl_vol_args *vol;
- struct btrfs_fs_devices *fs_devices;
+ struct btrfs_device *device = NULL;
int ret = -ENOTTY;
if (!capable(CAP_SYS_ADMIN))
@@ -2234,15 +2238,24 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
- ret = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_root_fs_type, &fs_devices);
+ mutex_lock(&uuid_mutex);
+ device = btrfs_scan_one_device(vol->name, FMODE_READ,
+ &btrfs_root_fs_type);
+ ret = PTR_ERR_OR_ZERO(device);
+ mutex_unlock(&uuid_mutex);
break;
case BTRFS_IOC_DEVICES_READY:
- ret = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_root_fs_type, &fs_devices);
- if (ret)
+ mutex_lock(&uuid_mutex);
+ device = btrfs_scan_one_device(vol->name, FMODE_READ,
+ &btrfs_root_fs_type);
+ if (IS_ERR(device)) {
+ mutex_unlock(&uuid_mutex);
+ ret = PTR_ERR(device);
break;
- ret = !(fs_devices->num_devices == fs_devices->total_devices);
+ }
+ ret = !(device->fs_devices->num_devices ==
+ device->fs_devices->total_devices);
+ mutex_unlock(&uuid_mutex);
break;
case BTRFS_IOC_GET_SUPPORTED_FEATURES:
ret = btrfs_ioctl_get_supported_features((void __user*)arg);
@@ -2290,7 +2303,6 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
struct btrfs_fs_devices *cur_devices;
struct btrfs_device *dev, *first_dev = NULL;
struct list_head *head;
- struct rcu_string *name;
/*
* Lightweight locking of the devices. We should not need
@@ -2314,12 +2326,10 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
cur_devices = cur_devices->seed;
}
- if (first_dev) {
- name = rcu_dereference(first_dev->name);
- seq_escape(m, name->str, " \t\n\\");
- } else {
+ if (first_dev)
+ seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
+ else
WARN_ON(1);
- }
rcu_read_unlock();
return 0;
}
@@ -2331,7 +2341,6 @@ static const struct super_operations btrfs_super_ops = {
.sync_fs = btrfs_sync_fs,
.show_options = btrfs_show_options,
.show_devname = btrfs_show_devname,
- .write_inode = btrfs_write_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
@@ -2369,7 +2378,7 @@ static __cold void btrfs_interface_exit(void)
static void __init btrfs_print_mod_info(void)
{
- pr_info("Btrfs loaded, crc32c=%s"
+ static const char options[] = ""
#ifdef CONFIG_BTRFS_DEBUG
", debug=on"
#endif
@@ -2382,8 +2391,8 @@ static void __init btrfs_print_mod_info(void)
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
", ref-verify=on"
#endif
- "\n",
- crc32c_impl());
+ ;
+ pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
}
static int __init init_btrfs_fs(void)
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 4a4e960c7c66..3717c864ba23 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -7,10 +7,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
-#include <linux/buffer_head.h>
#include <linux/kobject.h>
#include <linux/bug.h>
-#include <linux/genhd.h>
#include <linux/debugfs.h>
#include "ctree.h"
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ace94db09d29..412b910b04cc 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -216,7 +216,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
btrfs_init_dummy_trans(&trans, fs_info);
test_msg("qgroup basic add");
- ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
+ ret = btrfs_create_qgroup(&trans, BTRFS_FS_TREE_OBJECTID);
if (ret) {
test_err("couldn't create a qgroup %d", ret);
return ret;
@@ -249,8 +249,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return ret;
@@ -285,8 +285,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return -EINVAL;
@@ -322,7 +322,7 @@ static int test_multiple_refs(struct btrfs_root *root,
* We have BTRFS_FS_TREE_OBJECTID created already from the
* previous test.
*/
- ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
+ ret = btrfs_create_qgroup(&trans, BTRFS_FIRST_FREE_OBJECTID);
if (ret) {
test_err("couldn't create a qgroup %d", ret);
return ret;
@@ -350,8 +350,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return ret;
@@ -385,8 +385,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return ret;
@@ -426,8 +426,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
}
- ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
- nodesize, old_roots, new_roots);
+ ret = btrfs_qgroup_account_extent(&trans, nodesize, nodesize, old_roots,
+ new_roots);
if (ret) {
test_err("couldn't account space for a qgroup %d", ret);
return ret;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index ff5f6c719976..3b84f5015029 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -241,7 +241,7 @@ loop:
refcount_set(&cur_trans->use_count, 2);
atomic_set(&cur_trans->pending_ordered, 0);
cur_trans->flags = 0;
- cur_trans->start_time = get_seconds();
+ cur_trans->start_time = ktime_get_seconds();
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
@@ -680,7 +680,7 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH, true);
- if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
+ if (trans == ERR_PTR(-ENOENT))
btrfs_wait_for_commit(root->fs_info, 0);
return trans;
@@ -1152,7 +1152,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
ret = btrfs_run_dev_replace(trans, fs_info);
if (ret)
return ret;
- ret = btrfs_run_qgroups(trans, fs_info);
+ ret = btrfs_run_qgroups(trans);
if (ret)
return ret;
@@ -1355,8 +1355,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
goto out;
/* Now qgroup are all updated, we can inherit it to new qgroups */
- ret = btrfs_qgroup_inherit(trans, fs_info,
- src->root_key.objectid, dst_objectid,
+ ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
inherit);
if (ret < 0)
goto out;
@@ -1574,7 +1573,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
/*
* insert root back/forward references
*/
- ret = btrfs_add_root_ref(trans, fs_info, objectid,
+ ret = btrfs_add_root_ref(trans, objectid,
parent_root->root_key.objectid,
btrfs_ino(BTRFS_I(parent_inode)), index,
dentry->d_name.name, dentry->d_name.len);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 94439482a0ec..4cbb1b55387d 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -48,7 +48,7 @@ struct btrfs_transaction {
int aborted;
struct list_head list;
struct extent_io_tree dirty_pages;
- unsigned long start_time;
+ time64_t start_time;
wait_queue_head_t writer_wait;
wait_queue_head_t commit_wait;
wait_queue_head_t pending_wait;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 8d40e7dd8c30..db835635372f 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -19,6 +19,7 @@
#include "tree-checker.h"
#include "disk-io.h"
#include "compression.h"
+#include "volumes.h"
/*
* Error message should follow the following format:
@@ -353,6 +354,102 @@ static int check_dir_item(struct btrfs_fs_info *fs_info,
return 0;
}
+__printf(4, 5)
+__cold
+static void block_group_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
+ key.objectid, key.offset, &vaf);
+ va_end(args);
+}
+
+static int check_block_group_item(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_block_group_item bgi;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u64 flags;
+ u64 type;
+
+ /*
+ * Here we don't really care about alignment since extent allocator can
+ * handle it. We care more about the size, as if one block group is
+ * larger than maximum size, it's must be some obvious corruption.
+ */
+ if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group size, have %llu expect (0, %llu]",
+ key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
+ return -EUCLEAN;
+ }
+
+ if (item_size != sizeof(bgi)) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid item size, have %u expect %zu",
+ item_size, sizeof(bgi));
+ return -EUCLEAN;
+ }
+
+ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bgi));
+ if (btrfs_block_group_chunk_objectid(&bgi) !=
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group chunk objectid, have %llu expect %llu",
+ btrfs_block_group_chunk_objectid(&bgi),
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ return -EUCLEAN;
+ }
+
+ if (btrfs_block_group_used(&bgi) > key->offset) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group used, have %llu expect [0, %llu)",
+ btrfs_block_group_used(&bgi), key->offset);
+ return -EUCLEAN;
+ }
+
+ flags = btrfs_block_group_flags(&bgi);
+ if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
+ block_group_err(fs_info, leaf, slot,
+"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
+ flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
+ hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
+ return -EUCLEAN;
+ }
+
+ type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
+ if (type != BTRFS_BLOCK_GROUP_DATA &&
+ type != BTRFS_BLOCK_GROUP_METADATA &&
+ type != BTRFS_BLOCK_GROUP_SYSTEM &&
+ type != (BTRFS_BLOCK_GROUP_METADATA |
+ BTRFS_BLOCK_GROUP_DATA)) {
+ block_group_err(fs_info, leaf, slot,
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx",
+ type, hweight64(type),
+ BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
+ BTRFS_BLOCK_GROUP_SYSTEM,
+ BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
/*
* Common point to switch the item-specific validation.
*/
@@ -374,6 +471,9 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
case BTRFS_XATTR_ITEM_KEY:
ret = check_dir_item(fs_info, leaf, key, slot);
break;
+ case BTRFS_BLOCK_GROUP_ITEM_KEY:
+ ret = check_block_group_item(fs_info, leaf, key, slot);
+ break;
}
return ret;
}
@@ -396,9 +496,22 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
* skip this check for relocation trees.
*/
if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
+ u64 owner = btrfs_header_owner(leaf);
struct btrfs_root *check_root;
- key.objectid = btrfs_header_owner(leaf);
+ /* These trees must never be empty */
+ if (owner == BTRFS_ROOT_TREE_OBJECTID ||
+ owner == BTRFS_CHUNK_TREE_OBJECTID ||
+ owner == BTRFS_EXTENT_TREE_OBJECTID ||
+ owner == BTRFS_DEV_TREE_OBJECTID ||
+ owner == BTRFS_FS_TREE_OBJECTID ||
+ owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ generic_err(fs_info, leaf, 0,
+ "invalid root, root %llu must never be empty",
+ owner);
+ return -EUCLEAN;
+ }
+ key.objectid = owner;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f8220ec02036..1650dc44a5e3 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -545,12 +545,8 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
- if (IS_ERR(inode)) {
+ if (IS_ERR(inode))
inode = NULL;
- } else if (is_bad_inode(inode)) {
- iput(inode);
- inode = NULL;
- }
return inode;
}
@@ -597,7 +593,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
nbytes = 0;
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- size = btrfs_file_extent_inline_len(eb, slot, item);
+ size = btrfs_file_extent_ram_bytes(eb, item);
nbytes = btrfs_file_extent_ram_bytes(eb, item);
extent_end = ALIGN(start + size,
fs_info->sectorsize);
@@ -685,7 +681,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* as the owner of the file extent changed from log tree
* (doesn't affect qgroup) to fs/file tree(affects qgroup)
*/
- ret = btrfs_qgroup_trace_extent(trans, fs_info,
+ ret = btrfs_qgroup_trace_extent(trans,
btrfs_file_extent_disk_bytenr(eb, item),
btrfs_file_extent_disk_num_bytes(eb, item),
GFP_NOFS);
@@ -715,7 +711,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* allocation tree
*/
ret = btrfs_alloc_logged_file_extent(trans,
- fs_info,
root->root_key.objectid,
key->objectid, offset, &ins);
if (ret)
@@ -1291,6 +1286,46 @@ again:
return ret;
}
+static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
+ const u8 ref_type, const char *name,
+ const int namelen)
+{
+ struct btrfs_key key;
+ struct btrfs_path *path;
+ const u64 parent_id = btrfs_ino(BTRFS_I(dir));
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.type = ref_type;
+ if (key.type == BTRFS_INODE_REF_KEY)
+ key.offset = parent_id;
+ else
+ key.offset = btrfs_extref_hash(parent_id, name, namelen);
+
+ ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (ret > 0) {
+ ret = 0;
+ goto out;
+ }
+ if (key.type == BTRFS_INODE_EXTREF_KEY)
+ ret = btrfs_find_name_in_ext_backref(path->nodes[0],
+ path->slots[0], parent_id,
+ name, namelen, NULL);
+ else
+ ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
+ name, namelen, NULL);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
/*
* replay one inode back reference item found in the log tree.
* eb, slot and key refer to the buffer and key found in the log tree.
@@ -1400,6 +1435,32 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
}
}
+ /*
+ * If a reference item already exists for this inode
+ * with the same parent and name, but different index,
+ * drop it and the corresponding directory index entries
+ * from the parent before adding the new reference item
+ * and dir index entries, otherwise we would fail with
+ * -EEXIST returned from btrfs_add_link() below.
+ */
+ ret = btrfs_inode_ref_exists(inode, dir, key->type,
+ name, namelen);
+ if (ret > 0) {
+ ret = btrfs_unlink_inode(trans, root,
+ BTRFS_I(dir),
+ BTRFS_I(inode),
+ name, namelen);
+ /*
+ * If we dropped the link count to 0, bump it so
+ * that later the iput() on the inode will not
+ * free it. We will fixup the link count later.
+ */
+ if (!ret && inode->i_nlink == 0)
+ inc_nlink(inode);
+ }
+ if (ret < 0)
+ goto out;
+
/* insert our name */
ret = btrfs_add_link(trans, BTRFS_I(dir),
BTRFS_I(inode),
@@ -2120,7 +2181,7 @@ again:
dir_key->offset,
name, name_len, 0);
}
- if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
+ if (!log_di || log_di == ERR_PTR(-ENOENT)) {
btrfs_dir_item_key_to_cpu(eb, di, &location);
btrfs_release_path(path);
btrfs_release_path(log_path);
@@ -2933,7 +2994,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
/* bail out if we need to do a full commit */
if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN;
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
goto out;
}
@@ -2951,7 +3011,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (ret) {
blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret);
- btrfs_free_logged_extents(log, log_transid);
btrfs_set_log_full_commit(fs_info, trans);
mutex_unlock(&root->log_mutex);
goto out;
@@ -3002,7 +3061,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out;
}
btrfs_wait_tree_log_extents(log, mark);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out;
@@ -3020,7 +3078,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
ret = btrfs_wait_tree_log_extents(log, mark);
- btrfs_wait_logged_extents(trans, log, log_transid);
wait_log_commit(log_root_tree,
root_log_ctx.log_transid);
mutex_unlock(&log_root_tree->log_mutex);
@@ -3045,7 +3102,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (btrfs_need_log_full_commit(fs_info, trans)) {
blk_finish_plug(&plug);
btrfs_wait_tree_log_extents(log, mark);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
@@ -3058,7 +3114,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (ret) {
btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
@@ -3068,11 +3123,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
EXTENT_NEW | EXTENT_DIRTY);
if (ret) {
btrfs_set_log_full_commit(fs_info, trans);
- btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
- btrfs_wait_logged_extents(trans, log, log_transid);
btrfs_set_super_log_root(fs_info->super_for_commit,
log_root_tree->node->start);
@@ -3159,14 +3212,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
}
- /*
- * We may have short-circuited the log tree with the full commit logic
- * and left ordered extents on our list, so clear these out to keep us
- * from leaking inodes and memory.
- */
- btrfs_free_logged_extents(log, 0);
- btrfs_free_logged_extents(log, 1);
-
free_extent_buffer(log->node);
kfree(log);
}
@@ -3756,7 +3801,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
int start_slot, int nr, int inode_only,
u64 logged_isize)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
unsigned long src_offset;
unsigned long dst_offset;
struct btrfs_root *log = inode->root->log_root;
@@ -3937,9 +3982,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(src, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(src,
- src_path->slots[0],
- extent);
+ len = btrfs_file_extent_ram_bytes(src, extent);
*last_extent = ALIGN(key.offset + len,
fs_info->sectorsize);
} else {
@@ -4004,7 +4047,7 @@ fill_holes:
extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(src, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(src, i, extent);
+ len = btrfs_file_extent_ram_bytes(src, extent);
extent_end = ALIGN(key.offset + len,
fs_info->sectorsize);
} else {
@@ -4078,131 +4121,32 @@ static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
return 0;
}
-static int wait_ordered_extents(struct btrfs_trans_handle *trans,
- struct inode *inode,
- struct btrfs_root *root,
- const struct extent_map *em,
- const struct list_head *logged_list,
- bool *ordered_io_error)
+static int log_extent_csums(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode,
+ struct btrfs_root *log_root,
+ const struct extent_map *em)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_ordered_extent *ordered;
- struct btrfs_root *log = root->log_root;
- u64 mod_start = em->mod_start;
- u64 mod_len = em->mod_len;
- const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
u64 csum_offset;
u64 csum_len;
LIST_HEAD(ordered_sums);
int ret = 0;
- *ordered_io_error = false;
-
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
+ if (inode->flags & BTRFS_INODE_NODATASUM ||
+ test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
em->block_start == EXTENT_MAP_HOLE)
return 0;
- /*
- * Wait far any ordered extent that covers our extent map. If it
- * finishes without an error, first check and see if our csums are on
- * our outstanding ordered extents.
- */
- list_for_each_entry(ordered, logged_list, log_list) {
- struct btrfs_ordered_sum *sum;
-
- if (!mod_len)
- break;
-
- if (ordered->file_offset + ordered->len <= mod_start ||
- mod_start + mod_len <= ordered->file_offset)
- continue;
-
- if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
- !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
- !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
- const u64 start = ordered->file_offset;
- const u64 end = ordered->file_offset + ordered->len - 1;
-
- WARN_ON(ordered->inode != inode);
- filemap_fdatawrite_range(inode->i_mapping, start, end);
- }
-
- wait_event(ordered->wait,
- (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
- test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
-
- if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
- /*
- * Clear the AS_EIO/AS_ENOSPC flags from the inode's
- * i_mapping flags, so that the next fsync won't get
- * an outdated io error too.
- */
- filemap_check_errors(inode->i_mapping);
- *ordered_io_error = true;
- break;
- }
- /*
- * We are going to copy all the csums on this ordered extent, so
- * go ahead and adjust mod_start and mod_len in case this
- * ordered extent has already been logged.
- */
- if (ordered->file_offset > mod_start) {
- if (ordered->file_offset + ordered->len >=
- mod_start + mod_len)
- mod_len = ordered->file_offset - mod_start;
- /*
- * If we have this case
- *
- * |--------- logged extent ---------|
- * |----- ordered extent ----|
- *
- * Just don't mess with mod_start and mod_len, we'll
- * just end up logging more csums than we need and it
- * will be ok.
- */
- } else {
- if (ordered->file_offset + ordered->len <
- mod_start + mod_len) {
- mod_len = (mod_start + mod_len) -
- (ordered->file_offset + ordered->len);
- mod_start = ordered->file_offset +
- ordered->len;
- } else {
- mod_len = 0;
- }
- }
-
- if (skip_csum)
- continue;
-
- /*
- * To keep us from looping for the above case of an ordered
- * extent that falls inside of the logged extent.
- */
- if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
- &ordered->flags))
- continue;
-
- list_for_each_entry(sum, &ordered->list, list) {
- ret = btrfs_csum_file_blocks(trans, log, sum);
- if (ret)
- break;
- }
- }
-
- if (*ordered_io_error || !mod_len || ret || skip_csum)
- return ret;
-
+ /* If we're compressed we have to save the entire range of csums. */
if (em->compress_type) {
csum_offset = 0;
csum_len = max(em->block_len, em->orig_block_len);
} else {
- csum_offset = mod_start - em->start;
- csum_len = mod_len;
+ csum_offset = em->mod_start - em->start;
+ csum_len = em->mod_len;
}
/* block start is already adjusted for the file extent offset. */
- ret = btrfs_lookup_csums_range(fs_info->csum_root,
+ ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
em->block_start + csum_offset,
em->block_start + csum_offset +
csum_len - 1, &ordered_sums, 0);
@@ -4214,7 +4158,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum,
list);
if (!ret)
- ret = btrfs_csum_file_blocks(trans, log, sums);
+ ret = btrfs_csum_file_blocks(trans, log_root, sums);
list_del(&sums->list);
kfree(sums);
}
@@ -4226,7 +4170,6 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, struct btrfs_root *root,
const struct extent_map *em,
struct btrfs_path *path,
- const struct list_head *logged_list,
struct btrfs_log_ctx *ctx)
{
struct btrfs_root *log = root->log_root;
@@ -4238,18 +4181,11 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
u64 block_len;
int ret;
int extent_inserted = 0;
- bool ordered_io_err = false;
- ret = wait_ordered_extents(trans, &inode->vfs_inode, root, em,
- logged_list, &ordered_io_err);
+ ret = log_extent_csums(trans, inode, log, em);
if (ret)
return ret;
- if (ordered_io_err) {
- ctx->io_err = -EIO;
- return ctx->io_err;
- }
-
btrfs_init_map_token(&token);
ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
@@ -4424,7 +4360,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode,
struct btrfs_path *path,
- struct list_head *logged_list,
struct btrfs_log_ctx *ctx,
const u64 start,
const u64 end)
@@ -4480,20 +4415,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
}
list_sort(NULL, &extents, extent_cmp);
- btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
- /*
- * Some ordered extents started by fsync might have completed
- * before we could collect them into the list logged_list, which
- * means they're gone, not in our logged_list nor in the inode's
- * ordered tree. We want the application/user space to know an
- * error happened while attempting to persist file data so that
- * it can take proper action. If such error happened, we leave
- * without writing to the log tree and the fsync must report the
- * file data write error and not commit the current transaction.
- */
- ret = filemap_check_errors(inode->vfs_inode.i_mapping);
- if (ret)
- ctx->io_err = ret;
process:
while (!list_empty(&extents)) {
em = list_entry(extents.next, struct extent_map, list);
@@ -4512,8 +4433,7 @@ process:
write_unlock(&tree->lock);
- ret = log_one_extent(trans, inode, root, em, path, logged_list,
- ctx);
+ ret = log_one_extent(trans, inode, root, em, path, ctx);
write_lock(&tree->lock);
clear_em_logging(tree, em);
free_extent_map(em);
@@ -4712,9 +4632,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
if (btrfs_file_extent_type(leaf, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(leaf,
- path->slots[0],
- extent);
+ len = btrfs_file_extent_ram_bytes(leaf, extent);
ASSERT(len == i_size ||
(len == fs_info->sectorsize &&
btrfs_file_extent_compression(leaf, extent) !=
@@ -4898,7 +4816,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_key min_key;
struct btrfs_key max_key;
struct btrfs_root *log = root->log_root;
- LIST_HEAD(logged_list);
u64 last_extent = 0;
int err = 0;
int ret;
@@ -5094,8 +5011,7 @@ again:
* we don't need to do more work nor fallback to
* a transaction commit.
*/
- if (IS_ERR(other_inode) &&
- PTR_ERR(other_inode) == -ENOENT) {
+ if (other_inode == ERR_PTR(-ENOENT)) {
goto next_key;
} else if (IS_ERR(other_inode)) {
err = PTR_ERR(other_inode);
@@ -5235,7 +5151,7 @@ log_extents:
}
if (fast_search) {
ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
- &logged_list, ctx, start, end);
+ ctx, start, end);
if (ret) {
err = ret;
goto out_unlock;
@@ -5286,10 +5202,6 @@ log_extents:
inode->last_log_commit = inode->last_sub_trans;
spin_unlock(&inode->lock);
out_unlock:
- if (unlikely(err))
- btrfs_put_logged_extents(&logged_list);
- else
- btrfs_submit_logged_extents(&logged_list, log);
mutex_unlock(&inode->log_mutex);
btrfs_free_path(path);
@@ -5585,7 +5497,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode,
struct btrfs_log_ctx *ctx)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
@@ -6120,7 +6032,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, struct btrfs_inode *old_dir,
struct dentry *parent)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_fs_info *fs_info = trans->fs_info;
/*
* this will force the logging code to walk the dentry chain
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1da162928d1a..da86706123ff 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -8,15 +8,12 @@
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
-#include <linux/iocontext.h>
-#include <linux/capability.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/semaphore.h>
#include <linux/uuid.h>
#include <linux/list_sort.h>
-#include <asm/div64.h>
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
@@ -634,44 +631,48 @@ static void pending_bios_fn(struct btrfs_work *work)
* devices.
*/
static void btrfs_free_stale_devices(const char *path,
- struct btrfs_device *skip_dev)
+ struct btrfs_device *skip_device)
{
- struct btrfs_fs_devices *fs_devs, *tmp_fs_devs;
- struct btrfs_device *dev, *tmp_dev;
+ struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
+ struct btrfs_device *device, *tmp_device;
- list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, fs_list) {
-
- if (fs_devs->opened)
+ list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
+ mutex_lock(&fs_devices->device_list_mutex);
+ if (fs_devices->opened) {
+ mutex_unlock(&fs_devices->device_list_mutex);
continue;
+ }
- list_for_each_entry_safe(dev, tmp_dev,
- &fs_devs->devices, dev_list) {
+ list_for_each_entry_safe(device, tmp_device,
+ &fs_devices->devices, dev_list) {
int not_found = 0;
- if (skip_dev && skip_dev == dev)
+ if (skip_device && skip_device == device)
continue;
- if (path && !dev->name)
+ if (path && !device->name)
continue;
rcu_read_lock();
if (path)
- not_found = strcmp(rcu_str_deref(dev->name),
+ not_found = strcmp(rcu_str_deref(device->name),
path);
rcu_read_unlock();
if (not_found)
continue;
/* delete the stale device */
- if (fs_devs->num_devices == 1) {
- btrfs_sysfs_remove_fsid(fs_devs);
- list_del(&fs_devs->fs_list);
- free_fs_devices(fs_devs);
+ fs_devices->num_devices--;
+ list_del(&device->dev_list);
+ btrfs_free_device(device);
+
+ if (fs_devices->num_devices == 0)
break;
- } else {
- fs_devs->num_devices--;
- list_del(&dev->dev_list);
- btrfs_free_device(dev);
- }
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+ if (fs_devices->num_devices == 0) {
+ btrfs_sysfs_remove_fsid(fs_devices);
+ list_del(&fs_devices->fs_list);
+ free_fs_devices(fs_devices);
}
}
}
@@ -750,7 +751,8 @@ error_brelse:
* error pointer when failed
*/
static noinline struct btrfs_device *device_list_add(const char *path,
- struct btrfs_super_block *disk_super)
+ struct btrfs_super_block *disk_super,
+ bool *new_device_added)
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices;
@@ -764,21 +766,26 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (IS_ERR(fs_devices))
return ERR_CAST(fs_devices);
+ mutex_lock(&fs_devices->device_list_mutex);
list_add(&fs_devices->fs_list, &fs_uuids);
device = NULL;
} else {
+ mutex_lock(&fs_devices->device_list_mutex);
device = find_device(fs_devices, devid,
disk_super->dev_item.uuid);
}
if (!device) {
- if (fs_devices->opened)
+ if (fs_devices->opened) {
+ mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-EBUSY);
+ }
device = btrfs_alloc_device(NULL, &devid,
disk_super->dev_item.uuid);
if (IS_ERR(device)) {
+ mutex_unlock(&fs_devices->device_list_mutex);
/* we can safely leave the fs_devices entry around */
return device;
}
@@ -786,17 +793,16 @@ static noinline struct btrfs_device *device_list_add(const char *path,
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
btrfs_free_device(device);
+ mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-ENOMEM);
}
rcu_assign_pointer(device->name, name);
- mutex_lock(&fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &fs_devices->devices);
fs_devices->num_devices++;
- mutex_unlock(&fs_devices->device_list_mutex);
device->fs_devices = fs_devices;
- btrfs_free_stale_devices(path, device);
+ *new_device_added = true;
if (disk_super->label[0])
pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
@@ -840,12 +846,15 @@ static noinline struct btrfs_device *device_list_add(const char *path,
* with larger generation number or the last-in if
* generation are equal.
*/
+ mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-EEXIST);
}
name = rcu_string_strdup(path, GFP_NOFS);
- if (!name)
+ if (!name) {
+ mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-ENOMEM);
+ }
rcu_string_free(device->name);
rcu_assign_pointer(device->name, name);
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
@@ -865,6 +874,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
fs_devices->total_devices = btrfs_super_num_devices(disk_super);
+ mutex_unlock(&fs_devices->device_list_mutex);
return device;
}
@@ -1004,7 +1014,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
blkdev_put(device->bdev, device->mode);
}
-static void btrfs_prepare_close_one_device(struct btrfs_device *device)
+static void btrfs_close_one_device(struct btrfs_device *device)
{
struct btrfs_fs_devices *fs_devices = device->fs_devices;
struct btrfs_device *new_device;
@@ -1022,6 +1032,8 @@ static void btrfs_prepare_close_one_device(struct btrfs_device *device)
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
fs_devices->missing_devices--;
+ btrfs_close_bdev(device);
+
new_device = btrfs_alloc_device(NULL, &device->devid,
device->uuid);
BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
@@ -1035,39 +1047,23 @@ static void btrfs_prepare_close_one_device(struct btrfs_device *device)
list_replace_rcu(&device->dev_list, &new_device->dev_list);
new_device->fs_devices = device->fs_devices;
+
+ call_rcu(&device->rcu, free_device_rcu);
}
static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device, *tmp;
- struct list_head pending_put;
-
- INIT_LIST_HEAD(&pending_put);
if (--fs_devices->opened > 0)
return 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
- btrfs_prepare_close_one_device(device);
- list_add(&device->dev_list, &pending_put);
+ btrfs_close_one_device(device);
}
mutex_unlock(&fs_devices->device_list_mutex);
- /*
- * btrfs_show_devname() is using the device_list_mutex,
- * sometimes call to blkdev_put() leads vfs calling
- * into this func. So do put outside of device_list_mutex,
- * as of now.
- */
- while (!list_empty(&pending_put)) {
- device = list_first_entry(&pending_put,
- struct btrfs_device, dev_list);
- list_del(&device->dev_list);
- btrfs_close_bdev(device);
- call_rcu(&device->rcu, free_device_rcu);
- }
-
WARN_ON(fs_devices->open_devices);
WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
@@ -1146,7 +1142,8 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
{
int ret;
- mutex_lock(&uuid_mutex);
+ lockdep_assert_held(&uuid_mutex);
+
mutex_lock(&fs_devices->device_list_mutex);
if (fs_devices->opened) {
fs_devices->opened++;
@@ -1156,7 +1153,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
ret = open_fs_devices(fs_devices, flags, holder);
}
mutex_unlock(&fs_devices->device_list_mutex);
- mutex_unlock(&uuid_mutex);
return ret;
}
@@ -1217,16 +1213,18 @@ static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
* and we are not allowed to call set_blocksize during the scan. The superblock
* is read via pagecache
*/
-int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
- struct btrfs_fs_devices **fs_devices_ret)
+struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
+ void *holder)
{
struct btrfs_super_block *disk_super;
- struct btrfs_device *device;
+ bool new_device_added = false;
+ struct btrfs_device *device = NULL;
struct block_device *bdev;
struct page *page;
- int ret = 0;
u64 bytenr;
+ lockdep_assert_held(&uuid_mutex);
+
/*
* we would like to check all the supers, but that would make
* a btrfs mount succeed after a mkfs from a different FS.
@@ -1238,112 +1236,25 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
bdev = blkdev_get_by_path(path, flags, holder);
if (IS_ERR(bdev))
- return PTR_ERR(bdev);
+ return ERR_CAST(bdev);
if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
- ret = -EINVAL;
+ device = ERR_PTR(-EINVAL);
goto error_bdev_put;
}
- mutex_lock(&uuid_mutex);
- device = device_list_add(path, disk_super);
- if (IS_ERR(device))
- ret = PTR_ERR(device);
- else
- *fs_devices_ret = device->fs_devices;
- mutex_unlock(&uuid_mutex);
+ device = device_list_add(path, disk_super, &new_device_added);
+ if (!IS_ERR(device)) {
+ if (new_device_added)
+ btrfs_free_stale_devices(path, device);
+ }
btrfs_release_disk_super(page);
error_bdev_put:
blkdev_put(bdev, flags);
- return ret;
-}
-
-/* helper to account the used device space in the range */
-int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
- u64 end, u64 *length)
-{
- struct btrfs_key key;
- struct btrfs_root *root = device->fs_info->dev_root;
- struct btrfs_dev_extent *dev_extent;
- struct btrfs_path *path;
- u64 extent_end;
- int ret;
- int slot;
- struct extent_buffer *l;
-
- *length = 0;
-
- if (start >= device->total_bytes ||
- test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
- return 0;
-
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- path->reada = READA_FORWARD;
-
- key.objectid = device->devid;
- key.offset = start;
- key.type = BTRFS_DEV_EXTENT_KEY;
-
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
- goto out;
- if (ret > 0) {
- ret = btrfs_previous_item(root, path, key.objectid, key.type);
- if (ret < 0)
- goto out;
- }
-
- while (1) {
- l = path->nodes[0];
- slot = path->slots[0];
- if (slot >= btrfs_header_nritems(l)) {
- ret = btrfs_next_leaf(root, path);
- if (ret == 0)
- continue;
- if (ret < 0)
- goto out;
-
- break;
- }
- btrfs_item_key_to_cpu(l, &key, slot);
-
- if (key.objectid < device->devid)
- goto next;
-
- if (key.objectid > device->devid)
- break;
-
- if (key.type != BTRFS_DEV_EXTENT_KEY)
- goto next;
-
- dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
- extent_end = key.offset + btrfs_dev_extent_length(l,
- dev_extent);
- if (key.offset <= start && extent_end > end) {
- *length = end - start + 1;
- break;
- } else if (key.offset <= start && extent_end > start)
- *length += extent_end - start;
- else if (key.offset > start && extent_end <= end)
- *length += extent_end - key.offset;
- else if (key.offset > start && key.offset <= end) {
- *length += end - key.offset + 1;
- break;
- } else if (key.offset > end)
- break;
-
-next:
- path->slots[0]++;
- }
- ret = 0;
-out:
- btrfs_free_path(path);
- return ret;
+ return device;
}
static int contains_pending_extent(struct btrfs_transaction *transaction,
@@ -1755,10 +1666,8 @@ error:
* the btrfs_device struct should be fully filled in
*/
static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
- struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
struct btrfs_dev_item *dev_item;
@@ -1774,8 +1683,8 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
- ret = btrfs_insert_empty_item(trans, root, path, &key,
- sizeof(*dev_item));
+ ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
+ &key, sizeof(*dev_item));
if (ret)
goto out;
@@ -1800,7 +1709,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
ptr = btrfs_device_uuid(dev_item);
write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
ptr = btrfs_device_fsid(dev_item);
- write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_FSID_SIZE);
+ write_extent_buffer(leaf, trans->fs_info->fsid, ptr, BTRFS_FSID_SIZE);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
@@ -1924,9 +1833,10 @@ static struct btrfs_device * btrfs_find_next_active_device(
* where this function called, there should be always be another device (or
* this_dev) which is active.
*/
-void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
- struct btrfs_device *device, struct btrfs_device *this_dev)
+void btrfs_assign_next_active_device(struct btrfs_device *device,
+ struct btrfs_device *this_dev)
{
+ struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_device *next_device;
if (this_dev)
@@ -2029,11 +1939,14 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
cur_devices->num_devices--;
cur_devices->total_devices--;
+ /* Update total_devices of the parent fs_devices if it's seed */
+ if (cur_devices != fs_devices)
+ fs_devices->total_devices--;
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
cur_devices->missing_devices--;
- btrfs_assign_next_active_device(fs_info, device, NULL);
+ btrfs_assign_next_active_device(device, NULL);
if (device->bdev) {
cur_devices->open_devices--;
@@ -2084,12 +1997,11 @@ error_undo:
goto out;
}
-void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *srcdev)
+void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
{
struct btrfs_fs_devices *fs_devices;
- lockdep_assert_held(&fs_info->fs_devices->device_list_mutex);
+ lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
/*
* in case of fs with no seed, srcdev->fs_devices will point
@@ -2151,10 +2063,9 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
}
}
-void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *tgtdev)
+void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
{
- struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
WARN_ON(!tgtdev);
mutex_lock(&fs_devices->device_list_mutex);
@@ -2166,7 +2077,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
fs_devices->num_devices--;
- btrfs_assign_next_active_device(fs_info, tgtdev, NULL);
+ btrfs_assign_next_active_device(tgtdev, NULL);
list_del_rcu(&tgtdev->dev_list);
@@ -2297,7 +2208,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&seed_devices->alloc_list);
mutex_init(&seed_devices->device_list_mutex);
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_devices->device_list_mutex);
list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
synchronize_rcu);
list_for_each_entry(device, &seed_devices->devices, dev_list)
@@ -2317,7 +2228,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
generate_random_uuid(fs_devices->fsid);
memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_devices->device_list_mutex);
super_flags = btrfs_super_flags(disk_super) &
~BTRFS_SUPER_FLAG_SEEDING;
@@ -2407,15 +2318,16 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
struct block_device *bdev;
- struct list_head *devices;
struct super_block *sb = fs_info->sb;
struct rcu_string *name;
- u64 tmp;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ u64 orig_super_total_bytes;
+ u64 orig_super_num_devices;
int seeding_dev = 0;
int ret = 0;
bool unlocked = false;
- if (sb_rdonly(sb) && !fs_info->fs_devices->seeding)
+ if (sb_rdonly(sb) && !fs_devices->seeding)
return -EROFS;
bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
@@ -2423,7 +2335,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- if (fs_info->fs_devices->seeding) {
+ if (fs_devices->seeding) {
seeding_dev = 1;
down_write(&sb->s_umount);
mutex_lock(&uuid_mutex);
@@ -2431,18 +2343,16 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
filemap_write_and_wait(bdev->bd_inode->i_mapping);
- devices = &fs_info->fs_devices->devices;
-
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
- list_for_each_entry(device, devices, dev_list) {
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->bdev == bdev) {
ret = -EEXIST;
mutex_unlock(
- &fs_info->fs_devices->device_list_mutex);
+ &fs_devices->device_list_mutex);
goto error;
}
}
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_devices->device_list_mutex);
device = btrfs_alloc_device(fs_info, NULL, NULL);
if (IS_ERR(device)) {
@@ -2491,33 +2401,34 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
}
- device->fs_devices = fs_info->fs_devices;
+ device->fs_devices = fs_devices;
- mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
- list_add_rcu(&device->dev_list, &fs_info->fs_devices->devices);
- list_add(&device->dev_alloc_list,
- &fs_info->fs_devices->alloc_list);
- fs_info->fs_devices->num_devices++;
- fs_info->fs_devices->open_devices++;
- fs_info->fs_devices->rw_devices++;
- fs_info->fs_devices->total_devices++;
- fs_info->fs_devices->total_rw_bytes += device->total_bytes;
+ list_add_rcu(&device->dev_list, &fs_devices->devices);
+ list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
+ fs_devices->num_devices++;
+ fs_devices->open_devices++;
+ fs_devices->rw_devices++;
+ fs_devices->total_devices++;
+ fs_devices->total_rw_bytes += device->total_bytes;
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
if (!blk_queue_nonrot(q))
- fs_info->fs_devices->rotating = 1;
+ fs_devices->rotating = 1;
- tmp = btrfs_super_total_bytes(fs_info->super_copy);
+ orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
btrfs_set_super_total_bytes(fs_info->super_copy,
- round_down(tmp + device->total_bytes, fs_info->sectorsize));
+ round_down(orig_super_total_bytes + device->total_bytes,
+ fs_info->sectorsize));
- tmp = btrfs_super_num_devices(fs_info->super_copy);
- btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1);
+ orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
+ btrfs_set_super_num_devices(fs_info->super_copy,
+ orig_super_num_devices + 1);
/* add sysfs device entry */
- btrfs_sysfs_add_device_link(fs_info->fs_devices, device);
+ btrfs_sysfs_add_device_link(fs_devices, device);
/*
* we've got more storage, clear any full flags on the space
@@ -2526,7 +2437,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
btrfs_clear_space_info_full(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&fs_devices->device_list_mutex);
if (seeding_dev) {
mutex_lock(&fs_info->chunk_mutex);
@@ -2538,7 +2449,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
}
- ret = btrfs_add_dev_item(trans, fs_info, device);
+ ret = btrfs_add_dev_item(trans, device);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
@@ -2558,7 +2469,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
*/
snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
fs_info->fsid);
- if (kobject_rename(&fs_info->fs_devices->fsid_kobj, fsid_buf))
+ if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
btrfs_warn(fs_info,
"sysfs: failed to create fsid for sprout");
}
@@ -2593,7 +2504,23 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
return ret;
error_sysfs:
- btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
+ btrfs_sysfs_rm_device_link(fs_devices, device);
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&fs_info->chunk_mutex);
+ list_del_rcu(&device->dev_list);
+ list_del(&device->dev_alloc_list);
+ fs_info->fs_devices->num_devices--;
+ fs_info->fs_devices->open_devices--;
+ fs_info->fs_devices->rw_devices--;
+ fs_info->fs_devices->total_devices--;
+ fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
+ atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
+ btrfs_set_super_total_bytes(fs_info->super_copy,
+ orig_super_total_bytes);
+ btrfs_set_super_num_devices(fs_info->super_copy,
+ orig_super_num_devices);
+ mutex_unlock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
error_trans:
if (seeding_dev)
sb->s_flags |= SB_RDONLY;
@@ -2697,9 +2624,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
return btrfs_update_device(trans, device);
}
-static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 chunk_offset)
+static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
@@ -2808,9 +2735,9 @@ static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info,
return em;
}
-int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 chunk_offset)
+int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct extent_map *em;
struct map_lookup *map;
u64 dev_extent_len = 0;
@@ -2829,7 +2756,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
map = em->map_lookup;
mutex_lock(&fs_info->chunk_mutex);
- check_system_chunk(trans, fs_info, map->type);
+ check_system_chunk(trans, map->type);
mutex_unlock(&fs_info->chunk_mutex);
/*
@@ -2869,7 +2796,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
mutex_unlock(&fs_devices->device_list_mutex);
- ret = btrfs_free_chunk(trans, fs_info, chunk_offset);
+ ret = btrfs_free_chunk(trans, chunk_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -2885,7 +2812,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
}
- ret = btrfs_remove_block_group(trans, fs_info, chunk_offset, em);
+ ret = btrfs_remove_block_group(trans, chunk_offset, em);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -2950,7 +2877,7 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
* step two, delete the device extents and the
* chunk tree entries
*/
- ret = btrfs_remove_chunk(trans, fs_info, chunk_offset);
+ ret = btrfs_remove_chunk(trans, chunk_offset);
btrfs_end_transaction(trans);
return ret;
}
@@ -3059,7 +2986,7 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_force_chunk_alloc(trans, fs_info,
+ ret = btrfs_force_chunk_alloc(trans,
BTRFS_BLOCK_GROUP_DATA);
btrfs_end_transaction(trans);
if (ret < 0)
@@ -4692,7 +4619,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = SZ_1G;
- max_chunk_size = 10 * max_stripe_size;
+ max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
if (!devs_max)
devs_max = BTRFS_MAX_DEVS(info);
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
@@ -4900,7 +4827,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
refcount_inc(&em->refs);
write_unlock(&em_tree->lock);
- ret = btrfs_make_block_group(trans, info, 0, type, start, num_bytes);
+ ret = btrfs_make_block_group(trans, 0, type, start, num_bytes);
if (ret)
goto error_del_extent;
@@ -4934,9 +4861,9 @@ error:
}
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 chunk_offset, u64 chunk_size)
+ u64 chunk_offset, u64 chunk_size)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_key key;
@@ -5038,13 +4965,12 @@ out:
* require modifying the chunk tree. This division is important for the
* bootstrap process of adding storage to a seed btrfs.
*/
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type)
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
{
u64 chunk_offset;
- lockdep_assert_held(&fs_info->chunk_mutex);
- chunk_offset = find_next_chunk(fs_info);
+ lockdep_assert_held(&trans->fs_info->chunk_mutex);
+ chunk_offset = find_next_chunk(trans->fs_info);
return __btrfs_alloc_chunk(trans, chunk_offset, type);
}
@@ -5175,7 +5101,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
/*
* There could be two corrupted data stripes, we need
* to loop retry in order to rebuild the correct data.
- *
+ *
* Fail a stripe at a time on every retry except the
* stripe under reconstruction.
*/
@@ -6187,21 +6113,11 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
btrfs_io_bio(bio)->stripe_index = dev_nr;
bio->bi_end_io = btrfs_end_bio;
bio->bi_iter.bi_sector = physical >> 9;
-#ifdef DEBUG
- {
- struct rcu_string *name;
-
- rcu_read_lock();
- name = rcu_dereference(dev->name);
- btrfs_debug(fs_info,
- "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
- bio_op(bio), bio->bi_opf,
- (u64)bio->bi_iter.bi_sector,
- (u_long)dev->bdev->bd_dev, name->str, dev->devid,
- bio->bi_iter.bi_size);
- rcu_read_unlock();
- }
-#endif
+ btrfs_debug_in_rcu(fs_info,
+ "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
+ bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
+ (u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
+ bio->bi_iter.bi_size);
bio_set_dev(bio, dev->bdev);
btrfs_bio_counter_inc_noblocked(fs_info);
@@ -6403,6 +6319,8 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
u16 num_stripes;
u16 sub_stripes;
u64 type;
+ u64 features;
+ bool mixed = false;
length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
@@ -6441,6 +6359,32 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
btrfs_chunk_type(leaf, chunk));
return -EIO;
}
+
+ if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
+ btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
+ return -EIO;
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
+ (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
+ btrfs_err(fs_info,
+ "system chunk with data or metadata type: 0x%llx", type);
+ return -EIO;
+ }
+
+ features = btrfs_super_incompat_flags(fs_info->super_copy);
+ if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+ mixed = true;
+
+ if (!mixed) {
+ if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
+ (type & BTRFS_BLOCK_GROUP_DATA)) {
+ btrfs_err(fs_info,
+ "mixed chunk type in non-mixed mode: 0x%llx", type);
+ return -EIO;
+ }
+ }
+
if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
(type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
@@ -6527,6 +6471,7 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = btrfs_chunk_type(leaf, chunk);
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ map->verified_stripes = 0;
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
@@ -6563,10 +6508,14 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em, 0);
write_unlock(&map_tree->map_tree.lock);
- BUG_ON(ret); /* Tree corruption */
+ if (ret < 0) {
+ btrfs_err(fs_info,
+ "failed to add chunk map, start=%llu len=%llu: %d",
+ em->start, em->len, ret);
+ }
free_extent_map(em);
- return 0;
+ return ret;
}
static void fill_device_from_item(struct extent_buffer *leaf,
@@ -7108,9 +7057,9 @@ out:
}
static int update_dev_stat_item(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_path *path;
struct btrfs_key key;
@@ -7203,7 +7152,7 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
*/
smp_rmb();
- ret = update_dev_stat_item(trans, fs_info, device);
+ ret = update_dev_stat_item(trans, device);
if (!ret)
atomic_sub(stats_cnt, &device->dev_stats_ccnt);
}
@@ -7382,3 +7331,197 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
fs_devices = fs_devices->seed;
}
}
+
+/*
+ * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
+ */
+int btrfs_bg_type_to_factor(u64 flags)
+{
+ if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10))
+ return 2;
+ return 1;
+}
+
+
+static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
+{
+ int index = btrfs_bg_flags_to_raid_index(type);
+ int ncopies = btrfs_raid_array[index].ncopies;
+ int data_stripes;
+
+ switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ case BTRFS_BLOCK_GROUP_RAID5:
+ data_stripes = num_stripes - 1;
+ break;
+ case BTRFS_BLOCK_GROUP_RAID6:
+ data_stripes = num_stripes - 2;
+ break;
+ default:
+ data_stripes = num_stripes / ncopies;
+ break;
+ }
+ return div_u64(chunk_len, data_stripes);
+}
+
+static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
+ u64 chunk_offset, u64 devid,
+ u64 physical_offset, u64 physical_len)
+{
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
+ struct extent_map *em;
+ struct map_lookup *map;
+ u64 stripe_len;
+ bool found = false;
+ int ret = 0;
+ int i;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, chunk_offset, 1);
+ read_unlock(&em_tree->lock);
+
+ if (!em) {
+ btrfs_err(fs_info,
+"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
+ physical_offset, devid);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ map = em->map_lookup;
+ stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
+ if (physical_len != stripe_len) {
+ btrfs_err(fs_info,
+"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
+ physical_offset, devid, em->start, physical_len,
+ stripe_len);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ for (i = 0; i < map->num_stripes; i++) {
+ if (map->stripes[i].dev->devid == devid &&
+ map->stripes[i].physical == physical_offset) {
+ found = true;
+ if (map->verified_stripes >= map->num_stripes) {
+ btrfs_err(fs_info,
+ "too many dev extents for chunk %llu found",
+ em->start);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ map->verified_stripes++;
+ break;
+ }
+ }
+ if (!found) {
+ btrfs_err(fs_info,
+ "dev extent physical offset %llu devid %llu has no corresponding chunk",
+ physical_offset, devid);
+ ret = -EUCLEAN;
+ }
+out:
+ free_extent_map(em);
+ return ret;
+}
+
+static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
+{
+ struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
+ struct extent_map *em;
+ struct rb_node *node;
+ int ret = 0;
+
+ read_lock(&em_tree->lock);
+ for (node = rb_first(&em_tree->map); node; node = rb_next(node)) {
+ em = rb_entry(node, struct extent_map, rb_node);
+ if (em->map_lookup->num_stripes !=
+ em->map_lookup->verified_stripes) {
+ btrfs_err(fs_info,
+ "chunk %llu has missing dev extent, have %d expect %d",
+ em->start, em->map_lookup->verified_stripes,
+ em->map_lookup->num_stripes);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+out:
+ read_unlock(&em_tree->lock);
+ return ret;
+}
+
+/*
+ * Ensure that all dev extents are mapped to correct chunk, otherwise
+ * later chunk allocation/free would cause unexpected behavior.
+ *
+ * NOTE: This will iterate through the whole device tree, which should be of
+ * the same size level as the chunk tree. This slightly increases mount time.
+ */
+int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_path *path;
+ struct btrfs_root *root = fs_info->dev_root;
+ struct btrfs_key key;
+ int ret = 0;
+
+ key.objectid = 1;
+ key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ path->reada = READA_FORWARD;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_item(root, path);
+ if (ret < 0)
+ goto out;
+ /* No dev extents at all? Not good */
+ if (ret > 0) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+ while (1) {
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_dev_extent *dext;
+ int slot = path->slots[0];
+ u64 chunk_offset;
+ u64 physical_offset;
+ u64 physical_len;
+ u64 devid;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.type != BTRFS_DEV_EXTENT_KEY)
+ break;
+ devid = key.objectid;
+ physical_offset = key.offset;
+
+ dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
+ chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
+ physical_len = btrfs_dev_extent_length(leaf, dext);
+
+ ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
+ physical_offset, physical_len);
+ if (ret < 0)
+ goto out;
+ ret = btrfs_next_item(root, path);
+ if (ret < 0)
+ goto out;
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ }
+
+ /* Ensure all chunks have corresponding dev extents */
+ ret = verify_chunk_dev_extent_mapping(fs_info);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 5139ec8daf4c..23e9285d88de 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -11,6 +11,8 @@
#include <linux/btrfs.h>
#include "async-thread.h"
+#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
+
extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN SZ_64K
@@ -343,6 +345,7 @@ struct map_lookup {
u64 stripe_len;
int num_stripes;
int sub_stripes;
+ int verified_stripes; /* For mount time dev extent verification */
struct btrfs_bio_stripe stripes[];
};
@@ -382,8 +385,6 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
}
}
-int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
- u64 end, u64 *length);
void btrfs_get_bbio(struct btrfs_bio *bbio);
void btrfs_put_bbio(struct btrfs_bio *bbio);
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
@@ -396,20 +397,19 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
u64 physical, u64 **logical, int *naddrs, int *stripe_len);
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 type);
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
-int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
- struct btrfs_fs_devices **fs_devices_ret);
+struct btrfs_device *btrfs_scan_one_device(const char *path,
+ fmode_t flags, void *holder);
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
-void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
- struct btrfs_device *device, struct btrfs_device *this_dev);
+void btrfs_assign_next_active_device(struct btrfs_device *device,
+ struct btrfs_device *this_dev);
int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
const char *device_path,
struct btrfs_device **device);
@@ -453,22 +453,18 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *srcdev);
+void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev);
-void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *tgtdev);
+void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
u64 logical, u64 len);
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical);
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- u64 chunk_offset, u64 chunk_size);
-int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 chunk_offset);
+ u64 chunk_offset, u64 chunk_size);
+int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
@@ -560,4 +556,7 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
struct btrfs_device *failing_dev);
+int btrfs_bg_type_to_factor(u64 flags);
+int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
+
#endif
diff --git a/fs/buffer.c b/fs/buffer.c
index cabc045f483d..4cc679d5bf58 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -45,6 +45,7 @@
#include <linux/mpage.h>
#include <linux/bit_spinlock.h>
#include <linux/pagevec.h>
+#include <linux/sched/mm.h>
#include <trace/events/block.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
@@ -813,12 +814,16 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry)
{
struct buffer_head *bh, *head;
- gfp_t gfp = GFP_NOFS;
+ gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
long offset;
+ struct mem_cgroup *memcg;
if (retry)
gfp |= __GFP_NOFAIL;
+ memcg = get_mem_cgroup_from_page(page);
+ memalloc_use_memcg(memcg);
+
head = NULL;
offset = PAGE_SIZE;
while ((offset -= size) >= 0) {
@@ -835,6 +840,9 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
/* Link the buffer to its page */
set_bh_page(bh, page, offset);
}
+out:
+ memalloc_unuse_memcg();
+ mem_cgroup_put(memcg);
return head;
/*
* In case anything failed, we just free everything we got.
@@ -848,7 +856,7 @@ no_grow:
} while (head);
}
- return NULL;
+ goto out;
}
EXPORT_SYMBOL_GPL(alloc_page_buffers);
@@ -1900,15 +1908,16 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
break;
case IOMAP_UNWRITTEN:
/*
- * For unwritten regions, we always need to ensure that
- * sub-block writes cause the regions in the block we are not
- * writing to are zeroed. Set the buffer as new to ensure this.
+ * For unwritten regions, we always need to ensure that regions
+ * in the block we are not writing to are zeroed. Mark the
+ * buffer as new to ensure this.
*/
set_buffer_new(bh);
set_buffer_unwritten(bh);
/* FALLTHRU */
case IOMAP_MAPPED:
- if (offset >= i_size_read(inode))
+ if ((iomap->flags & IOMAP_F_NEW) ||
+ offset >= i_size_read(inode))
set_buffer_new(bh);
bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
inode->i_blkbits;
@@ -2076,6 +2085,40 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
}
EXPORT_SYMBOL(block_write_begin);
+int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied,
+ struct page *page)
+{
+ loff_t old_size = inode->i_size;
+ bool i_size_changed = false;
+
+ /*
+ * No need to use i_size_read() here, the i_size cannot change under us
+ * because we hold i_rwsem.
+ *
+ * But it's important to update i_size while still holding page lock:
+ * page writeout could otherwise come in and zero beyond i_size.
+ */
+ if (pos + copied > inode->i_size) {
+ i_size_write(inode, pos + copied);
+ i_size_changed = true;
+ }
+
+ unlock_page(page);
+ put_page(page);
+
+ if (old_size < pos)
+ pagecache_isize_extended(inode, old_size, pos);
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+ * ordering of page lock and transaction start for journaling
+ * filesystems.
+ */
+ if (i_size_changed)
+ mark_inode_dirty(inode);
+ return copied;
+}
+
int block_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
@@ -2116,39 +2159,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- struct inode *inode = mapping->host;
- loff_t old_size = inode->i_size;
- int i_size_changed = 0;
-
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_mutex.
- *
- * But it's important to update i_size while still holding page lock:
- * page writeout could otherwise come in and zero beyond i_size.
- */
- if (pos+copied > inode->i_size) {
- i_size_write(inode, pos+copied);
- i_size_changed = 1;
- }
-
- unlock_page(page);
- put_page(page);
-
- if (old_size < pos)
- pagecache_isize_extended(inode, old_size, pos);
- /*
- * Don't mark the inode dirty under page lock. First, it unnecessarily
- * makes the holding time of page lock longer. Second, it forces lock
- * ordering of page lock and transaction start for journaling
- * filesystems.
- */
- if (i_size_changed)
- mark_inode_dirty(inode);
-
- return copied;
+ return __generic_write_end(mapping->host, pos, copied, page);
}
EXPORT_SYMBOL(generic_write_end);
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 59cb307b15fb..027408d55aee 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -45,6 +45,7 @@ static inline void ceph_set_cached_acl(struct inode *inode,
struct posix_acl *ceph_get_acl(struct inode *inode, int type)
{
int size;
+ unsigned int retry_cnt = 0;
const char *name;
char *value = NULL;
struct posix_acl *acl;
@@ -60,6 +61,7 @@ struct posix_acl *ceph_get_acl(struct inode *inode, int type)
BUG();
}
+retry:
size = __ceph_getxattr(inode, name, "", 0);
if (size > 0) {
value = kzalloc(size, GFP_NOFS);
@@ -68,12 +70,22 @@ struct posix_acl *ceph_get_acl(struct inode *inode, int type)
size = __ceph_getxattr(inode, name, value, size);
}
- if (size > 0)
+ if (size == -ERANGE && retry_cnt < 10) {
+ retry_cnt++;
+ kfree(value);
+ value = NULL;
+ goto retry;
+ }
+
+ if (size > 0) {
acl = posix_acl_from_xattr(&init_user_ns, value, size);
- else if (size == -ERANGE || size == -ENODATA || size == 0)
+ } else if (size == -ENODATA || size == 0) {
acl = NULL;
- else
+ } else {
+ pr_err_ratelimited("get acl %llx.%llx failed, err=%d\n",
+ ceph_vinop(inode), size);
acl = ERR_PTR(-EIO);
+ }
kfree(value);
@@ -89,6 +101,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
const char *name = NULL;
char *value = NULL;
struct iattr newattrs;
+ struct timespec64 old_ctime = inode->i_ctime;
umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
switch (type) {
@@ -133,7 +146,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (new_mode != old_mode) {
newattrs.ia_ctime = current_time(inode);
newattrs.ia_mode = new_mode;
- newattrs.ia_valid = ATTR_MODE;
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
ret = __ceph_setattr(inode, &newattrs);
if (ret)
goto out_free;
@@ -142,8 +155,9 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
ret = __ceph_setxattr(inode, name, value, size, 0);
if (ret) {
if (new_mode != old_mode) {
+ newattrs.ia_ctime = old_ctime;
newattrs.ia_mode = old_mode;
- newattrs.ia_valid = ATTR_MODE;
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
__ceph_setattr(inode, &newattrs);
}
goto out_free;
@@ -171,10 +185,10 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode,
return err;
if (acl) {
- int ret = posix_acl_equiv_mode(acl, mode);
- if (ret < 0)
+ err = posix_acl_equiv_mode(acl, mode);
+ if (err < 0)
goto out_err;
- if (ret == 0) {
+ if (err == 0) {
posix_acl_release(acl);
acl = NULL;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 292b3d72d725..9c332a6f6667 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -574,7 +574,6 @@ static u64 get_writepages_data_length(struct inode *inode,
*/
static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
{
- struct timespec ts;
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_fs_client *fsc;
@@ -625,12 +624,11 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
set_page_writeback(page);
- ts = timespec64_to_timespec(inode->i_mtime);
err = ceph_osdc_writepages(&fsc->client->osdc, ceph_vino(inode),
&ci->i_layout, snapc, page_off, len,
ceph_wbc.truncate_seq,
ceph_wbc.truncate_size,
- &ts, &page, 1);
+ &inode->i_mtime, &page, 1);
if (err < 0) {
struct writeback_control tmp_wbc;
if (!wbc)
@@ -1134,7 +1132,7 @@ new_request:
pages = NULL;
}
- req->r_mtime = timespec64_to_timespec(inode->i_mtime);
+ req->r_mtime = inode->i_mtime;
rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
BUG_ON(rc);
req = NULL;
@@ -1431,7 +1429,7 @@ static void ceph_restore_sigs(sigset_t *oldset)
/*
* vm ops
*/
-static int ceph_filemap_fault(struct vm_fault *vmf)
+static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
@@ -1439,8 +1437,9 @@ static int ceph_filemap_fault(struct vm_fault *vmf)
struct ceph_file_info *fi = vma->vm_file->private_data;
struct page *pinned_page = NULL;
loff_t off = vmf->pgoff << PAGE_SHIFT;
- int want, got, ret;
+ int want, got, err;
sigset_t oldset;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
ceph_block_sigs(&oldset);
@@ -1452,8 +1451,8 @@ static int ceph_filemap_fault(struct vm_fault *vmf)
want = CEPH_CAP_FILE_CACHE;
got = 0;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
- if (ret < 0)
+ err = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
+ if (err < 0)
goto out_restore;
dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
@@ -1465,16 +1464,17 @@ static int ceph_filemap_fault(struct vm_fault *vmf)
ceph_add_rw_context(fi, &rw_ctx);
ret = filemap_fault(vmf);
ceph_del_rw_context(fi, &rw_ctx);
+ dout("filemap_fault %p %llu~%zd drop cap refs %s ret %x\n",
+ inode, off, (size_t)PAGE_SIZE,
+ ceph_cap_string(got), ret);
} else
- ret = -EAGAIN;
+ err = -EAGAIN;
- dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
- inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
if (pinned_page)
put_page(pinned_page);
ceph_put_cap_refs(ci, got);
- if (ret != -EAGAIN)
+ if (err != -EAGAIN)
goto out_restore;
/* read inline data */
@@ -1482,7 +1482,6 @@ static int ceph_filemap_fault(struct vm_fault *vmf)
/* does not support inline data > PAGE_SIZE */
ret = VM_FAULT_SIGBUS;
} else {
- int ret1;
struct address_space *mapping = inode->i_mapping;
struct page *page = find_or_create_page(mapping, 0,
mapping_gfp_constraint(mapping,
@@ -1491,32 +1490,32 @@ static int ceph_filemap_fault(struct vm_fault *vmf)
ret = VM_FAULT_OOM;
goto out_inline;
}
- ret1 = __ceph_do_getattr(inode, page,
+ err = __ceph_do_getattr(inode, page,
CEPH_STAT_CAP_INLINE_DATA, true);
- if (ret1 < 0 || off >= i_size_read(inode)) {
+ if (err < 0 || off >= i_size_read(inode)) {
unlock_page(page);
put_page(page);
- if (ret1 < 0)
- ret = ret1;
+ if (err == -ENOMEM)
+ ret = VM_FAULT_OOM;
else
ret = VM_FAULT_SIGBUS;
goto out_inline;
}
- if (ret1 < PAGE_SIZE)
- zero_user_segment(page, ret1, PAGE_SIZE);
+ if (err < PAGE_SIZE)
+ zero_user_segment(page, err, PAGE_SIZE);
else
flush_dcache_page(page);
SetPageUptodate(page);
vmf->page = page;
ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
out_inline:
- dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
+ dout("filemap_fault %p %llu~%zd read inline data ret %x\n",
inode, off, (size_t)PAGE_SIZE, ret);
}
out_restore:
ceph_restore_sigs(&oldset);
- if (ret < 0)
- ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ if (err < 0)
+ ret = vmf_error(err);
return ret;
}
@@ -1524,7 +1523,7 @@ out_restore:
/*
* Reuse write_begin here for simplicity.
*/
-static int ceph_page_mkwrite(struct vm_fault *vmf)
+static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
@@ -1535,8 +1534,9 @@ static int ceph_page_mkwrite(struct vm_fault *vmf)
loff_t off = page_offset(page);
loff_t size = i_size_read(inode);
size_t len;
- int want, got, ret;
+ int want, got, err;
sigset_t oldset;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
@@ -1550,10 +1550,10 @@ static int ceph_page_mkwrite(struct vm_fault *vmf)
lock_page(page);
locked_page = page;
}
- ret = ceph_uninline_data(vma->vm_file, locked_page);
+ err = ceph_uninline_data(vma->vm_file, locked_page);
if (locked_page)
unlock_page(locked_page);
- if (ret < 0)
+ if (err < 0)
goto out_free;
}
@@ -1570,9 +1570,9 @@ static int ceph_page_mkwrite(struct vm_fault *vmf)
want = CEPH_CAP_FILE_BUFFER;
got = 0;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
+ err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
&got, NULL);
- if (ret < 0)
+ if (err < 0)
goto out_free;
dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
@@ -1590,13 +1590,13 @@ static int ceph_page_mkwrite(struct vm_fault *vmf)
break;
}
- ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
- if (ret >= 0) {
+ err = ceph_update_writeable_page(vma->vm_file, off, len, page);
+ if (err >= 0) {
/* success. we'll keep the page locked. */
set_page_dirty(page);
ret = VM_FAULT_LOCKED;
}
- } while (ret == -EAGAIN);
+ } while (err == -EAGAIN);
if (ret == VM_FAULT_LOCKED ||
ci->i_inline_version != CEPH_INLINE_NONE) {
@@ -1610,14 +1610,14 @@ static int ceph_page_mkwrite(struct vm_fault *vmf)
__mark_inode_dirty(inode, dirty);
}
- dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n",
+ dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
inode, off, len, ceph_cap_string(got), ret);
ceph_put_cap_refs(ci, got);
out_free:
ceph_restore_sigs(&oldset);
ceph_free_cap_flush(prealloc_cf);
- if (ret < 0)
- ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+ if (err < 0)
+ ret = vmf_error(err);
return ret;
}
@@ -1734,7 +1734,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
goto out;
}
- req->r_mtime = timespec64_to_timespec(inode->i_mtime);
+ req->r_mtime = inode->i_mtime;
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1776,7 +1776,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
goto out_put;
}
- req->r_mtime = timespec64_to_timespec(inode->i_mtime);
+ req->r_mtime = inode->i_mtime;
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1937,7 +1937,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
0, false, true);
err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
- wr_req->r_mtime = timespec64_to_timespec(ci->vfs_inode.i_mtime);
+ wr_req->r_mtime = ci->vfs_inode.i_mtime;
err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
if (!err)
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 362900e42424..1bf3502bdd6f 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -25,8 +25,9 @@
#include "cache.h"
struct ceph_aux_inode {
- u64 version;
- struct timespec mtime;
+ u64 version;
+ u64 mtime_sec;
+ u64 mtime_nsec;
};
struct fscache_netfs ceph_cache_netfs = {
@@ -130,7 +131,8 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
memset(&aux, 0, sizeof(aux));
aux.version = ci->i_version;
- aux.mtime = timespec64_to_timespec(inode->i_mtime);
+ aux.mtime_sec = inode->i_mtime.tv_sec;
+ aux.mtime_nsec = inode->i_mtime.tv_nsec;
if (memcmp(data, &aux, sizeof(aux)) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
@@ -163,7 +165,8 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
if (!ci->fscache) {
memset(&aux, 0, sizeof(aux));
aux.version = ci->i_version;
- aux.mtime = timespec64_to_timespec(inode->i_mtime);
+ aux.mtime_sec = inode->i_mtime.tv_sec;
+ aux.mtime_nsec = inode->i_mtime.tv_nsec;
ci->fscache = fscache_acquire_cookie(fsc->fscache,
&ceph_fscache_inode_object_def,
&ci->i_vino, sizeof(ci->i_vino),
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 990258cbd836..dd7dfdd2ba13 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -156,6 +156,37 @@ void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
spin_unlock(&mdsc->caps_list_lock);
}
+static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps)
+{
+ struct ceph_cap *cap;
+ int i;
+
+ if (nr_caps) {
+ BUG_ON(mdsc->caps_reserve_count < nr_caps);
+ mdsc->caps_reserve_count -= nr_caps;
+ if (mdsc->caps_avail_count >=
+ mdsc->caps_reserve_count + mdsc->caps_min_count) {
+ mdsc->caps_total_count -= nr_caps;
+ for (i = 0; i < nr_caps; i++) {
+ cap = list_first_entry(&mdsc->caps_list,
+ struct ceph_cap, caps_item);
+ list_del(&cap->caps_item);
+ kmem_cache_free(ceph_cap_cachep, cap);
+ }
+ } else {
+ mdsc->caps_avail_count += nr_caps;
+ }
+
+ dout("%s: caps %d = %d used + %d resv + %d avail\n",
+ __func__,
+ mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
+ mdsc->caps_reserve_count +
+ mdsc->caps_avail_count);
+ }
+}
+
/*
* Called under mdsc->mutex.
*/
@@ -167,6 +198,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
int have;
int alloc = 0;
int max_caps;
+ int err = 0;
bool trimmed = false;
struct ceph_mds_session *s;
LIST_HEAD(newcaps);
@@ -233,9 +265,14 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
ctx, need, have + alloc);
- goto out_nomem;
+ err = -ENOMEM;
+ break;
+ }
+
+ if (!err) {
+ BUG_ON(have + alloc != need);
+ ctx->count = need;
}
- BUG_ON(have + alloc != need);
spin_lock(&mdsc->caps_list_lock);
mdsc->caps_total_count += alloc;
@@ -245,77 +282,26 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
mdsc->caps_reserve_count +
mdsc->caps_avail_count);
+
+ if (err)
+ __ceph_unreserve_caps(mdsc, have + alloc);
+
spin_unlock(&mdsc->caps_list_lock);
- ctx->count = need;
dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
ctx, mdsc->caps_total_count, mdsc->caps_use_count,
mdsc->caps_reserve_count, mdsc->caps_avail_count);
- return 0;
-
-out_nomem:
-
- spin_lock(&mdsc->caps_list_lock);
- mdsc->caps_avail_count += have;
- mdsc->caps_reserve_count -= have;
-
- while (!list_empty(&newcaps)) {
- cap = list_first_entry(&newcaps,
- struct ceph_cap, caps_item);
- list_del(&cap->caps_item);
-
- /* Keep some preallocated caps around (ceph_min_count), to
- * avoid lots of free/alloc churn. */
- if (mdsc->caps_avail_count >=
- mdsc->caps_reserve_count + mdsc->caps_min_count) {
- kmem_cache_free(ceph_cap_cachep, cap);
- } else {
- mdsc->caps_avail_count++;
- mdsc->caps_total_count++;
- list_add(&cap->caps_item, &mdsc->caps_list);
- }
- }
-
- BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
- mdsc->caps_reserve_count +
- mdsc->caps_avail_count);
- spin_unlock(&mdsc->caps_list_lock);
- return -ENOMEM;
+ return err;
}
-int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
+void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx)
{
- int i;
- struct ceph_cap *cap;
-
dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
- if (ctx->count) {
- spin_lock(&mdsc->caps_list_lock);
- BUG_ON(mdsc->caps_reserve_count < ctx->count);
- mdsc->caps_reserve_count -= ctx->count;
- if (mdsc->caps_avail_count >=
- mdsc->caps_reserve_count + mdsc->caps_min_count) {
- mdsc->caps_total_count -= ctx->count;
- for (i = 0; i < ctx->count; i++) {
- cap = list_first_entry(&mdsc->caps_list,
- struct ceph_cap, caps_item);
- list_del(&cap->caps_item);
- kmem_cache_free(ceph_cap_cachep, cap);
- }
- } else {
- mdsc->caps_avail_count += ctx->count;
- }
- ctx->count = 0;
- dout("unreserve caps %d = %d used + %d resv + %d avail\n",
- mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
- BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
- mdsc->caps_reserve_count +
- mdsc->caps_avail_count);
- spin_unlock(&mdsc->caps_list_lock);
- }
- return 0;
+ spin_lock(&mdsc->caps_list_lock);
+ __ceph_unreserve_caps(mdsc, ctx->count);
+ ctx->count = 0;
+ spin_unlock(&mdsc->caps_list_lock);
}
struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
@@ -1125,7 +1111,7 @@ struct cap_msg_args {
u64 flush_tid, oldest_flush_tid, size, max_size;
u64 xattr_version;
struct ceph_buffer *xattr_buf;
- struct timespec atime, mtime, ctime;
+ struct timespec64 atime, mtime, ctime;
int op, caps, wanted, dirty;
u32 seq, issue_seq, mseq, time_warp_seq;
u32 flags;
@@ -1146,7 +1132,7 @@ static int send_cap_msg(struct cap_msg_args *arg)
struct ceph_msg *msg;
void *p;
size_t extra_len;
- struct timespec zerotime = {0};
+ struct timespec64 zerotime = {0};
struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc;
dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
@@ -1186,9 +1172,9 @@ static int send_cap_msg(struct cap_msg_args *arg)
fc->size = cpu_to_le64(arg->size);
fc->max_size = cpu_to_le64(arg->max_size);
- ceph_encode_timespec(&fc->mtime, &arg->mtime);
- ceph_encode_timespec(&fc->atime, &arg->atime);
- ceph_encode_timespec(&fc->ctime, &arg->ctime);
+ ceph_encode_timespec64(&fc->mtime, &arg->mtime);
+ ceph_encode_timespec64(&fc->atime, &arg->atime);
+ ceph_encode_timespec64(&fc->ctime, &arg->ctime);
fc->time_warp_seq = cpu_to_le32(arg->time_warp_seq);
fc->uid = cpu_to_le32(from_kuid(&init_user_ns, arg->uid));
@@ -1237,7 +1223,7 @@ static int send_cap_msg(struct cap_msg_args *arg)
* We just zero these out for now, as the MDS ignores them unless
* the requisite feature flags are set (which we don't do yet).
*/
- ceph_encode_timespec(p, &zerotime);
+ ceph_encode_timespec64(p, &zerotime);
p += sizeof(struct ceph_timespec);
ceph_encode_64(&p, 0);
@@ -1360,9 +1346,9 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
arg.xattr_buf = NULL;
}
- arg.mtime = timespec64_to_timespec(inode->i_mtime);
- arg.atime = timespec64_to_timespec(inode->i_atime);
- arg.ctime = timespec64_to_timespec(inode->i_ctime);
+ arg.mtime = inode->i_mtime;
+ arg.atime = inode->i_atime;
+ arg.ctime = inode->i_ctime;
arg.op = op;
arg.caps = cap->implemented;
@@ -3148,11 +3134,11 @@ static void handle_cap_grant(struct inode *inode,
}
if (newcaps & CEPH_CAP_ANY_RD) {
- struct timespec mtime, atime, ctime;
+ struct timespec64 mtime, atime, ctime;
/* ctime/mtime/atime? */
- ceph_decode_timespec(&mtime, &grant->mtime);
- ceph_decode_timespec(&atime, &grant->atime);
- ceph_decode_timespec(&ctime, &grant->ctime);
+ ceph_decode_timespec64(&mtime, &grant->mtime);
+ ceph_decode_timespec64(&atime, &grant->atime);
+ ceph_decode_timespec64(&ctime, &grant->ctime);
ceph_fill_file_time(inode, extra_info->issued,
le32_to_cpu(grant->time_warp_seq),
&ctime, &mtime, &atime);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 036ac0f3a393..82928cea0209 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -827,12 +827,14 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
- if (ceph_quota_is_max_files_exceeded(dir))
- return -EDQUOT;
+ if (ceph_quota_is_max_files_exceeded(dir)) {
+ err = -EDQUOT;
+ goto out;
+ }
err = ceph_pre_init_acls(dir, &mode, &acls);
if (err < 0)
- return err;
+ goto out;
dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
dir, dentry, mode, rdev);
@@ -883,8 +885,10 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
- if (ceph_quota_is_max_files_exceeded(dir))
- return -EDQUOT;
+ if (ceph_quota_is_max_files_exceeded(dir)) {
+ err = -EDQUOT;
+ goto out;
+ }
dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
@@ -1393,7 +1397,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
" rfiles: %20lld\n"
" rsubdirs: %20lld\n"
"rbytes: %20lld\n"
- "rctime: %10ld.%09ld\n",
+ "rctime: %10lld.%09ld\n",
ci->i_files + ci->i_subdirs,
ci->i_files,
ci->i_subdirs,
@@ -1401,8 +1405,8 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
ci->i_rfiles,
ci->i_rsubdirs,
ci->i_rbytes,
- (long)ci->i_rctime.tv_sec,
- (long)ci->i_rctime.tv_nsec);
+ ci->i_rctime.tv_sec,
+ ci->i_rctime.tv_nsec);
}
if (*ppos >= dfi->dir_info_len)
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ad0bed99b1d5..92ab20433682 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -429,8 +429,7 @@ out:
* file or symlink, return 1 so the VFS can retry.
*/
int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened)
+ struct file *file, unsigned flags, umode_t mode)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -507,9 +506,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
ceph_init_inode_acls(d_inode(dentry), &acls);
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
}
- err = finish_open(file, dentry, ceph_open, opened);
+ err = finish_open(file, dentry, ceph_open);
}
out_req:
if (!req->r_err && req->r_target_inode)
@@ -721,7 +720,7 @@ struct ceph_aio_request {
struct list_head osd_reqs;
unsigned num_reqs;
atomic_t pending_reqs;
- struct timespec mtime;
+ struct timespec64 mtime;
struct ceph_cap_flush *prealloc_cf;
};
@@ -923,7 +922,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
int num_pages = 0;
int flags;
int ret;
- struct timespec mtime = timespec64_to_timespec(current_time(inode));
+ struct timespec64 mtime = current_time(inode);
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
bool write = iov_iter_rw(iter) == WRITE;
@@ -1131,7 +1130,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
int flags;
int ret;
bool check_caps = false;
- struct timespec mtime = timespec64_to_timespec(current_time(inode));
+ struct timespec64 mtime = current_time(inode);
size_t count = iov_iter_count(from);
if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
@@ -1384,12 +1383,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc =
- &ceph_sb_to_client(inode->i_sb)->client->osdc;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_cap_flush *prealloc_cf;
ssize_t count, written = 0;
int err, want, got;
loff_t pos;
+ loff_t limit = max(i_size_read(inode), fsc->max_file_size);
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@@ -1415,6 +1414,13 @@ retry_snap:
goto out;
pos = iocb->ki_pos;
+ if (unlikely(pos >= limit)) {
+ err = -EFBIG;
+ goto out;
+ } else {
+ iov_iter_truncate(from, limit - pos);
+ }
+
count = iov_iter_count(from);
if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
err = -EDQUOT;
@@ -1436,7 +1442,7 @@ retry_snap:
}
/* FIXME: not complete since it doesn't account for being at quota */
- if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
+ if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
err = -ENOSPC;
goto out;
}
@@ -1526,7 +1532,7 @@ retry_snap:
}
if (written >= 0) {
- if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
+ if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
iocb->ki_flags |= IOCB_DSYNC;
written = generic_write_sync(iocb, written);
}
@@ -1547,6 +1553,7 @@ out_unlocked:
static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
loff_t i_size;
loff_t ret;
@@ -1591,7 +1598,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
break;
}
- ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
+ ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
out:
inode_unlock(inode);
@@ -1663,7 +1670,7 @@ static int ceph_zero_partial_object(struct inode *inode,
goto out;
}
- req->r_mtime = timespec64_to_timespec(inode->i_mtime);
+ req->r_mtime = inode->i_mtime;
ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!ret) {
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1728,8 +1735,7 @@ static long ceph_fallocate(struct file *file, int mode,
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_osd_client *osdc =
- &ceph_inode_to_client(inode)->client->osdc;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_cap_flush *prealloc_cf;
int want, got = 0;
int dirty;
@@ -1737,6 +1743,9 @@ static long ceph_fallocate(struct file *file, int mode,
loff_t endoff = 0;
loff_t size;
+ if ((offset + length) > max(i_size_read(inode), fsc->max_file_size))
+ return -EFBIG;
+
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
@@ -1760,7 +1769,7 @@ static long ceph_fallocate(struct file *file, int mode,
goto unlock;
}
- if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
+ if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL) &&
!(mode & FALLOC_FL_PUNCH_HOLE)) {
ret = -ENOSPC;
goto unlock;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index a866be999216..ebc7bdaed2d0 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -658,13 +658,10 @@ int ceph_fill_file_size(struct inode *inode, int issued,
}
void ceph_fill_file_time(struct inode *inode, int issued,
- u64 time_warp_seq, struct timespec *ctime,
- struct timespec *mtime, struct timespec *atime)
+ u64 time_warp_seq, struct timespec64 *ctime,
+ struct timespec64 *mtime, struct timespec64 *atime)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct timespec64 ctime64 = timespec_to_timespec64(*ctime);
- struct timespec64 mtime64 = timespec_to_timespec64(*mtime);
- struct timespec64 atime64 = timespec_to_timespec64(*atime);
int warn = 0;
if (issued & (CEPH_CAP_FILE_EXCL|
@@ -673,39 +670,39 @@ void ceph_fill_file_time(struct inode *inode, int issued,
CEPH_CAP_AUTH_EXCL|
CEPH_CAP_XATTR_EXCL)) {
if (ci->i_version == 0 ||
- timespec64_compare(&ctime64, &inode->i_ctime) > 0) {
+ timespec64_compare(ctime, &inode->i_ctime) > 0) {
dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
- (long long)inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
- (long long)ctime->tv_sec, ctime->tv_nsec);
- inode->i_ctime = ctime64;
+ inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
+ ctime->tv_sec, ctime->tv_nsec);
+ inode->i_ctime = *ctime;
}
if (ci->i_version == 0 ||
ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
/* the MDS did a utimes() */
dout("mtime %lld.%09ld -> %lld.%09ld "
"tw %d -> %d\n",
- (long long)inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
- (long long)mtime->tv_sec, mtime->tv_nsec,
+ inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
+ mtime->tv_sec, mtime->tv_nsec,
ci->i_time_warp_seq, (int)time_warp_seq);
- inode->i_mtime = mtime64;
- inode->i_atime = atime64;
+ inode->i_mtime = *mtime;
+ inode->i_atime = *atime;
ci->i_time_warp_seq = time_warp_seq;
} else if (time_warp_seq == ci->i_time_warp_seq) {
/* nobody did utimes(); take the max */
- if (timespec64_compare(&mtime64, &inode->i_mtime) > 0) {
+ if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
- (long long)inode->i_mtime.tv_sec,
+ inode->i_mtime.tv_sec,
inode->i_mtime.tv_nsec,
- (long long)mtime->tv_sec, mtime->tv_nsec);
- inode->i_mtime = mtime64;
+ mtime->tv_sec, mtime->tv_nsec);
+ inode->i_mtime = *mtime;
}
- if (timespec64_compare(&atime64, &inode->i_atime) > 0) {
+ if (timespec64_compare(atime, &inode->i_atime) > 0) {
dout("atime %lld.%09ld -> %lld.%09ld inc\n",
- (long long)inode->i_atime.tv_sec,
+ inode->i_atime.tv_sec,
inode->i_atime.tv_nsec,
- (long long)atime->tv_sec, atime->tv_nsec);
- inode->i_atime = atime64;
+ atime->tv_sec, atime->tv_nsec);
+ inode->i_atime = *atime;
}
} else if (issued & CEPH_CAP_FILE_EXCL) {
/* we did a utimes(); ignore mds values */
@@ -715,9 +712,9 @@ void ceph_fill_file_time(struct inode *inode, int issued,
} else {
/* we have no write|excl caps; whatever the MDS says is true */
if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
- inode->i_ctime = ctime64;
- inode->i_mtime = mtime64;
- inode->i_atime = atime64;
+ inode->i_ctime = *ctime;
+ inode->i_mtime = *mtime;
+ inode->i_atime = *atime;
ci->i_time_warp_seq = time_warp_seq;
} else {
warn = 1;
@@ -743,7 +740,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
struct ceph_mds_reply_inode *info = iinfo->in;
struct ceph_inode_info *ci = ceph_inode(inode);
int issued, new_issued, info_caps;
- struct timespec mtime, atime, ctime;
+ struct timespec64 mtime, atime, ctime;
struct ceph_buffer *xattr_blob = NULL;
struct ceph_string *pool_ns = NULL;
struct ceph_cap *new_cap = NULL;
@@ -823,9 +820,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
/* be careful with mtime, atime, size */
- ceph_decode_timespec(&atime, &info->atime);
- ceph_decode_timespec(&mtime, &info->mtime);
- ceph_decode_timespec(&ctime, &info->ctime);
+ ceph_decode_timespec64(&atime, &info->atime);
+ ceph_decode_timespec64(&mtime, &info->mtime);
+ ceph_decode_timespec64(&ctime, &info->ctime);
ceph_fill_file_time(inode, issued,
le32_to_cpu(info->time_warp_seq),
&ctime, &mtime, &atime);
@@ -872,7 +869,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
ci->i_rbytes = le64_to_cpu(info->rbytes);
ci->i_rfiles = le64_to_cpu(info->rfiles);
ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
- ceph_decode_timespec(&ci->i_rctime, &info->rctime);
+ ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
}
}
@@ -1954,7 +1951,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
int err = 0;
int inode_dirty_flags = 0;
bool lock_snap_rwsem = false;
- struct timespec ts;
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
@@ -2030,8 +2026,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
if (ia_valid & ATTR_ATIME) {
dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
- (long long)inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
- (long long)attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
+ inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
+ attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
ci->i_time_warp_seq++;
inode->i_atime = attr->ia_atime;
@@ -2043,8 +2039,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
dirtied |= CEPH_CAP_FILE_WR;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
!timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
- ts = timespec64_to_timespec(attr->ia_atime);
- ceph_encode_timespec(&req->r_args.setattr.atime, &ts);
+ ceph_encode_timespec64(&req->r_args.setattr.atime,
+ &attr->ia_atime);
mask |= CEPH_SETATTR_ATIME;
release |= CEPH_CAP_FILE_SHARED |
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
@@ -2052,8 +2048,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
}
if (ia_valid & ATTR_MTIME) {
dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
- (long long)inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
- (long long)attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
+ inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
+ attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
ci->i_time_warp_seq++;
inode->i_mtime = attr->ia_mtime;
@@ -2065,8 +2061,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
dirtied |= CEPH_CAP_FILE_WR;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
!timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
- ts = timespec64_to_timespec(attr->ia_mtime);
- ceph_encode_timespec(&req->r_args.setattr.mtime, &ts);
+ ceph_encode_timespec64(&req->r_args.setattr.mtime,
+ &attr->ia_mtime);
mask |= CEPH_SETATTR_MTIME;
release |= CEPH_CAP_FILE_SHARED |
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
@@ -2097,8 +2093,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
- (long long)inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
- (long long)attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
+ inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
+ attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
only ? "ctime only" : "ignored");
if (only) {
/*
@@ -2140,7 +2136,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
req->r_inode_drop = release;
req->r_args.setattr.mask = cpu_to_le32(mask);
req->r_num_caps = 1;
- req->r_stamp = timespec64_to_timespec(attr->ia_ctime);
+ req->r_stamp = attr->ia_ctime;
err = ceph_mdsc_do_request(mdsc, NULL, req);
}
dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
@@ -2161,6 +2157,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
int ceph_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
int err;
if (ceph_snap(inode) != CEPH_NOSNAP)
@@ -2171,6 +2168,10 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
return err;
if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size > max(inode->i_size, fsc->max_file_size))
+ return -EFBIG;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
return -EDQUOT;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index dc8bc664a871..bc43c822426a 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -902,6 +902,27 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
return msg;
}
+static void encode_supported_features(void **p, void *end)
+{
+ static const unsigned char bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
+ static const size_t count = ARRAY_SIZE(bits);
+
+ if (count > 0) {
+ size_t i;
+ size_t size = ((size_t)bits[count - 1] + 64) / 64 * 8;
+
+ BUG_ON(*p + 4 + size > end);
+ ceph_encode_32(p, size);
+ memset(*p, 0, size);
+ for (i = 0; i < count; i++)
+ ((unsigned char*)(*p))[i / 8] |= 1 << (bits[i] % 8);
+ *p += size;
+ } else {
+ BUG_ON(*p + 4 > end);
+ ceph_encode_32(p, 0);
+ }
+}
+
/*
* session message, specialization for CEPH_SESSION_REQUEST_OPEN
* to include additional client metadata fields.
@@ -911,11 +932,11 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
int i = -1;
- int metadata_bytes = 0;
+ int extra_bytes = 0;
int metadata_key_count = 0;
struct ceph_options *opt = mdsc->fsc->client->options;
struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
- void *p;
+ void *p, *end;
const char* metadata[][2] = {
{"hostname", mdsc->nodename},
@@ -926,21 +947,26 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
};
/* Calculate serialized length of metadata */
- metadata_bytes = 4; /* map length */
+ extra_bytes = 4; /* map length */
for (i = 0; metadata[i][0]; ++i) {
- metadata_bytes += 8 + strlen(metadata[i][0]) +
+ extra_bytes += 8 + strlen(metadata[i][0]) +
strlen(metadata[i][1]);
metadata_key_count++;
}
+ /* supported feature */
+ extra_bytes += 4 + 8;
/* Allocate the message */
- msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
GFP_NOFS, false);
if (!msg) {
pr_err("create_session_msg ENOMEM creating msg\n");
return NULL;
}
- h = msg->front.iov_base;
+ p = msg->front.iov_base;
+ end = p + msg->front.iov_len;
+
+ h = p;
h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
h->seq = cpu_to_le64(seq);
@@ -950,11 +976,11 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
*
* ClientSession messages with metadata are v2
*/
- msg->hdr.version = cpu_to_le16(2);
+ msg->hdr.version = cpu_to_le16(3);
msg->hdr.compat_version = cpu_to_le16(1);
/* The write pointer, following the session_head structure */
- p = msg->front.iov_base + sizeof(*h);
+ p += sizeof(*h);
/* Number of entries in the map */
ceph_encode_32(&p, metadata_key_count);
@@ -972,6 +998,10 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
p += val_len;
}
+ encode_supported_features(&p, end);
+ msg->front.iov_len = p - msg->front.iov_base;
+ msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+
return msg;
}
@@ -1779,6 +1809,7 @@ struct ceph_mds_request *
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
{
struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
+ struct timespec64 ts;
if (!req)
return ERR_PTR(-ENOMEM);
@@ -1797,7 +1828,8 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
init_completion(&req->r_safe_completion);
INIT_LIST_HEAD(&req->r_unsafe_item);
- req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran);
+ ktime_get_coarse_real_ts64(&ts);
+ req->r_stamp = timespec64_trunc(ts, mdsc->fsc->sb->s_time_gran);
req->r_op = op;
req->r_direct_mode = mode;
@@ -2094,7 +2126,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
/* time stamp */
{
struct ceph_timespec ts;
- ceph_encode_timespec(&ts, &req->r_stamp);
+ ceph_encode_timespec64(&ts, &req->r_stamp);
ceph_encode_copy(&p, &ts, sizeof(ts));
}
@@ -2187,7 +2219,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
p = msg->front.iov_base + req->r_request_release_offset;
{
struct ceph_timespec ts;
- ceph_encode_timespec(&ts, &req->r_stamp);
+ ceph_encode_timespec64(&ts, &req->r_stamp);
ceph_encode_copy(&p, &ts, sizeof(ts));
}
@@ -2225,7 +2257,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
/*
* send request, or put it on the appropriate wait list.
*/
-static int __do_request(struct ceph_mds_client *mdsc,
+static void __do_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
struct ceph_mds_session *session = NULL;
@@ -2235,7 +2267,7 @@ static int __do_request(struct ceph_mds_client *mdsc,
if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
__unregister_request(mdsc, req);
- goto out;
+ return;
}
if (req->r_timeout &&
@@ -2258,7 +2290,7 @@ static int __do_request(struct ceph_mds_client *mdsc,
if (mdsc->mdsmap->m_epoch == 0) {
dout("do_request no mdsmap, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
- goto finish;
+ return;
}
if (!(mdsc->fsc->mount_options->flags &
CEPH_MOUNT_OPT_MOUNTWAIT) &&
@@ -2276,7 +2308,7 @@ static int __do_request(struct ceph_mds_client *mdsc,
ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
dout("do_request no mds or not active, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
- goto out;
+ return;
}
/* get, open session */
@@ -2326,8 +2358,7 @@ finish:
complete_request(mdsc, req);
__unregister_request(mdsc, req);
}
-out:
- return err;
+ return;
}
/*
@@ -2748,7 +2779,7 @@ static void handle_session(struct ceph_mds_session *session,
int wake = 0;
/* decode */
- if (msg->front.iov_len != sizeof(*h))
+ if (msg->front.iov_len < sizeof(*h))
goto bad;
op = le32_to_cpu(h->op);
seq = le64_to_cpu(h->seq);
@@ -2958,15 +2989,12 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
- struct timespec ts;
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v1.issued = cpu_to_le32(cap->issued);
rec.v1.size = cpu_to_le64(inode->i_size);
- ts = timespec64_to_timespec(inode->i_mtime);
- ceph_encode_timespec(&rec.v1.mtime, &ts);
- ts = timespec64_to_timespec(inode->i_atime);
- ceph_encode_timespec(&rec.v1.atime, &ts);
+ ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
+ ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v1.pathbase = cpu_to_le64(pathbase);
}
@@ -3378,10 +3406,10 @@ static void handle_lease(struct ceph_mds_client *mdsc,
vino.ino = le64_to_cpu(h->ino);
vino.snap = CEPH_NOSNAP;
seq = le32_to_cpu(h->seq);
- dname.name = (void *)h + sizeof(*h) + sizeof(u32);
- dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
- if (dname.len != get_unaligned_le32(h+1))
+ dname.len = get_unaligned_le32(h + 1);
+ if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
goto bad;
+ dname.name = (void *)(h + 1) + sizeof(u32);
/* lookup inode */
inode = ceph_find_inode(sb, vino);
@@ -3644,8 +3672,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
init_rwsem(&mdsc->pool_perm_rwsem);
mdsc->pool_perm_tree = RB_ROOT;
- strncpy(mdsc->nodename, utsname()->nodename,
- sizeof(mdsc->nodename) - 1);
+ strscpy(mdsc->nodename, utsname()->nodename,
+ sizeof(mdsc->nodename));
return 0;
}
@@ -4019,7 +4047,8 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
} else {
mdsc->mdsmap = newmap; /* first mds map */
}
- mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
+ mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
+ MAX_LFS_FILESIZE);
__wake_requests(mdsc, &mdsc->waiting_for_map);
ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
@@ -4155,6 +4184,16 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
return auth;
}
+static int add_authorizer_challenge(struct ceph_connection *con,
+ void *challenge_buf, int challenge_buf_len)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
+
+ return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
+ challenge_buf, challenge_buf_len);
+}
static int verify_authorizer_reply(struct ceph_connection *con)
{
@@ -4218,6 +4257,7 @@ static const struct ceph_connection_operations mds_con_ops = {
.put = con_put,
.dispatch = dispatch,
.get_authorizer = get_authorizer,
+ .add_authorizer_challenge = add_authorizer_challenge,
.verify_authorizer_reply = verify_authorizer_reply,
.invalidate_authorizer = invalidate_authorizer,
.peer_reset = peer_reset,
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 2ec3b5b35067..32fcce0d4d3c 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -16,6 +16,18 @@
#include <linux/ceph/mdsmap.h>
#include <linux/ceph/auth.h>
+/* The first 8 bits are reserved for old ceph releases */
+#define CEPHFS_FEATURE_MIMIC 8
+
+#define CEPHFS_FEATURES_ALL { \
+ 0, 1, 2, 3, 4, 5, 6, 7, \
+ CEPHFS_FEATURE_MIMIC, \
+}
+
+#define CEPHFS_FEATURES_CLIENT_SUPPORTED CEPHFS_FEATURES_ALL
+#define CEPHFS_FEATURES_CLIENT_REQUIRED {}
+
+
/*
* Some lock dependencies:
*
@@ -229,7 +241,7 @@ struct ceph_mds_request {
int r_fmode; /* file mode, if expecting cap */
kuid_t r_uid;
kgid_t r_gid;
- struct timespec r_stamp;
+ struct timespec64 r_stamp;
/* for choosing which mds to send this request to */
int r_direct_mode;
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 242bfa5c0539..32d4f13784ba 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -48,7 +48,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
struct inode *inode;
struct ceph_inode_info *ci;
- if (msg->front.iov_len != sizeof(*h)) {
+ if (msg->front.iov_len < sizeof(*h)) {
pr_err("%s corrupt message mds%d len %d\n", __func__,
session->s_mds, (int)msg->front.iov_len);
ceph_msg_dump(msg);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index af81555c14fd..041c27ea8de1 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -594,9 +594,9 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
BUG_ON(capsnap->writing);
capsnap->size = inode->i_size;
- capsnap->mtime = timespec64_to_timespec(inode->i_mtime);
- capsnap->atime = timespec64_to_timespec(inode->i_atime);
- capsnap->ctime = timespec64_to_timespec(inode->i_ctime);
+ capsnap->mtime = inode->i_mtime;
+ capsnap->atime = inode->i_atime;
+ capsnap->ctime = inode->i_ctime;
capsnap->time_warp_seq = ci->i_time_warp_seq;
capsnap->truncate_size = ci->i_truncate_size;
capsnap->truncate_seq = ci->i_truncate_seq;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 95a3b3ac9b6e..43ca3b763875 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -219,8 +219,7 @@ static int parse_fsopt_token(char *c, void *private)
if (token < Opt_last_int) {
ret = match_int(&argstr[0], &intval);
if (ret < 0) {
- pr_err("bad mount option arg (not int) "
- "at '%s'\n", c);
+ pr_err("bad option arg (not int) at '%s'\n", c);
return ret;
}
dout("got int token %d val %d\n", token, intval);
@@ -941,11 +940,12 @@ static int ceph_set_super(struct super_block *s, void *data)
dout("set_super %p data %p\n", s, data);
s->s_flags = fsc->mount_options->sb_flags;
- s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
+ s->s_maxbytes = MAX_LFS_FILESIZE;
s->s_xattr = ceph_xattr_handlers;
s->s_fs_info = fsc;
fsc->sb = s;
+ fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
s->s_op = &ceph_super_ops;
s->s_d_op = &ceph_dentry_ops;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index a7077a0c989f..582e28fd1b7b 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -98,6 +98,7 @@ struct ceph_fs_client {
unsigned long mount_state;
int min_caps; /* min caps i added */
+ loff_t max_file_size;
struct ceph_mds_client *mdsc;
@@ -193,7 +194,7 @@ struct ceph_cap_snap {
u64 xattr_version;
u64 size;
- struct timespec mtime, atime, ctime;
+ struct timespec64 mtime, atime, ctime;
u64 time_warp_seq;
u64 truncate_size;
u32 truncate_seq;
@@ -307,7 +308,7 @@ struct ceph_inode_info {
char *i_symlink;
/* for dirs */
- struct timespec i_rctime;
+ struct timespec64 i_rctime;
u64 i_rbytes, i_rfiles, i_rsubdirs;
u64 i_files, i_subdirs;
@@ -655,7 +656,7 @@ extern void ceph_caps_finalize(struct ceph_mds_client *mdsc);
extern void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta);
extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx, int need);
-extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
+extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx);
extern void ceph_reservation_status(struct ceph_fs_client *client,
int *total, int *avail, int *used,
@@ -857,8 +858,9 @@ extern struct inode *ceph_get_snapdir(struct inode *parent);
extern int ceph_fill_file_size(struct inode *inode, int issued,
u32 truncate_seq, u64 truncate_size, u64 size);
extern void ceph_fill_file_time(struct inode *inode, int issued,
- u64 time_warp_seq, struct timespec *ctime,
- struct timespec *mtime, struct timespec *atime);
+ u64 time_warp_seq, struct timespec64 *ctime,
+ struct timespec64 *mtime,
+ struct timespec64 *atime);
extern int ceph_fill_trace(struct super_block *sb,
struct ceph_mds_request *req);
extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
@@ -1025,8 +1027,7 @@ extern const struct file_operations ceph_file_fops;
extern int ceph_renew_caps(struct inode *inode);
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened);
+ struct file *file, unsigned flags, umode_t mode);
extern int ceph_release(struct inode *inode, struct file *filp);
extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 5bc8edb4c2a6..5cc8b94f8206 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -224,8 +224,8 @@ static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
size_t size)
{
- return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec,
- (long)ci->i_rctime.tv_nsec);
+ return snprintf(val, size, "%lld.09%ld", ci->i_rctime.tv_sec,
+ ci->i_rctime.tv_nsec);
}
/* quotas */
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 5f132d59dfc2..35c83fe7dba0 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -16,24 +16,28 @@ config CIFS
select CRYPTO_DES
help
This is the client VFS module for the SMB3 family of NAS protocols,
- as well as for earlier dialects such as SMB2.1, SMB2 and the
+ (including support for the most recent, most secure dialect SMB3.1.1)
+ as well as for earlier dialects such as SMB2.1, SMB2 and the older
Common Internet File System (CIFS) protocol. CIFS was the successor
to the original dialect, the Server Message Block (SMB) protocol, the
native file sharing mechanism for most early PC operating systems.
- The SMB3 protocol is supported by most modern operating systems and
- NAS appliances (e.g. Samba, Windows 8, Windows 2012, MacOS).
+ The SMB3 protocol is supported by most modern operating systems
+ and NAS appliances (e.g. Samba, Windows 10, Windows Server 2016,
+ MacOS) and even in the cloud (e.g. Microsoft Azure).
The older CIFS protocol was included in Windows NT4, 2000 and XP (and
later) as well by Samba (which provides excellent CIFS and SMB3
- server support for Linux and many other operating systems). Limited
- support for OS/2 and Windows ME and similar very old servers is
- provided as well.
+ server support for Linux and many other operating systems). Use of
+ dialects older than SMB2.1 is often discouraged on public networks.
+ This module also provides limited support for OS/2 and Windows ME
+ and similar very old servers.
- The cifs module provides an advanced network file system client
+ This module provides an advanced network file system client
for mounting to SMB3 (and CIFS) compliant servers. It includes
support for DFS (hierarchical name space), secure per-user
- session establishment via Kerberos or NTLM or NTLMv2,
- safe distributed caching (oplock), optional packet
+ session establishment via Kerberos or NTLM or NTLMv2, RDMA
+ (smbdirect), advanced security features, per-share encryption,
+ directory leases, safe distributed caching (oplock), optional packet
signing, Unicode and other internationalization improvements.
In general, the default dialects, SMB3 and later, enable better
@@ -43,18 +47,11 @@ config CIFS
than SMB3 mounts. SMB2/SMB3 mount options are also
slightly simpler (compared to CIFS) due to protocol improvements.
- If you need to mount to Samba, Macs or Windows from this machine, say Y.
-
-config CIFS_STATS
- bool "CIFS statistics"
- depends on CIFS
- help
- Enabling this option will cause statistics for each server share
- mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
+ If you need to mount to Samba, Azure, Macs or Windows from this machine, say Y.
config CIFS_STATS2
bool "Extended statistics"
- depends on CIFS_STATS
+ depends on CIFS
help
Enabling this option will allow more detailed statistics on SMB
request timing to be displayed in /proc/fs/cifs/DebugData and also
@@ -66,9 +63,24 @@ config CIFS_STATS2
Unless you are a developer or are doing network performance analysis
or tuning, say N.
+config CIFS_ALLOW_INSECURE_LEGACY
+ bool "Support legacy servers which use less secure dialects"
+ depends on CIFS
+ default y
+ help
+ Modern dialects, SMB2.1 and later (including SMB3 and 3.1.1), have
+ additional security features, including protection against
+ man-in-the-middle attacks and stronger crypto hashes, so the use
+ of legacy dialects (SMB1/CIFS and SMB2.0) is discouraged.
+
+ Disabling this option prevents users from using vers=1.0 or vers=2.0
+ on mounts with cifs.ko
+
+ If unsure, say Y.
+
config CIFS_WEAK_PW_HASH
bool "Support legacy servers which use weaker LANMAN security"
- depends on CIFS
+ depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY
help
Modern CIFS servers including Samba and most Windows versions
(since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
@@ -186,15 +198,6 @@ config CIFS_NFSD_EXPORT
help
Allows NFS server to export a CIFS mounted share (nfsd over cifs)
-config CIFS_SMB311
- bool "SMB3.1.1 network file system support"
- depends on CIFS
- select CRYPTO_SHA512
-
- help
- This enables support for the newest, and most secure dialect, SMB3.11.
- If unsure, say Y
-
config CIFS_SMB_DIRECT
bool "SMB Direct support (Experimental)"
depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index e1553d1e0e50..b7420e605b28 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -128,8 +128,10 @@ fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data,
memset(&auxdata, 0, sizeof(auxdata));
auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time = timespec64_to_timespec(cifsi->vfs_inode.i_mtime);
- auxdata.last_change_time = timespec64_to_timespec(cifsi->vfs_inode.i_ctime);
+ auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index bfe999505815..f1fbea947fef 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -160,25 +160,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
seq_printf(m, "Features:");
#ifdef CONFIG_CIFS_DFS_UPCALL
- seq_printf(m, " dfs");
+ seq_printf(m, " DFS");
#endif
#ifdef CONFIG_CIFS_FSCACHE
- seq_printf(m, " fscache");
+ seq_printf(m, ",FSCACHE");
+#endif
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ seq_printf(m, ",SMB_DIRECT");
+#endif
+#ifdef CONFIG_CIFS_STATS2
+ seq_printf(m, ",STATS2");
+#else
+ seq_printf(m, ",STATS");
+#endif
+#ifdef CONFIG_CIFS_DEBUG2
+ seq_printf(m, ",DEBUG2");
+#elif defined(CONFIG_CIFS_DEBUG)
+ seq_printf(m, ",DEBUG");
+#endif
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ seq_printf(m, ",ALLOW_INSECURE_LEGACY");
#endif
#ifdef CONFIG_CIFS_WEAK_PW_HASH
- seq_printf(m, " lanman");
+ seq_printf(m, ",WEAK_PW_HASH");
#endif
#ifdef CONFIG_CIFS_POSIX
- seq_printf(m, " posix");
+ seq_printf(m, ",CIFS_POSIX");
#endif
#ifdef CONFIG_CIFS_UPCALL
- seq_printf(m, " spnego");
+ seq_printf(m, ",UPCALL(SPNEGO)");
#endif
#ifdef CONFIG_CIFS_XATTR
- seq_printf(m, " xattr");
+ seq_printf(m, ",XATTR");
#endif
#ifdef CONFIG_CIFS_ACL
- seq_printf(m, " acl");
+ seq_printf(m, ",ACL");
#endif
seq_putc(m, '\n');
seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
@@ -259,10 +275,9 @@ skip_rdma:
server->credits, server->dialect);
if (server->sign)
seq_printf(m, " signed");
-#ifdef CONFIG_CIFS_SMB311
if (server->posix_ext_supported)
seq_printf(m, " posix");
-#endif /* 3.1.1 */
+
i++;
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
@@ -350,7 +365,6 @@ skip_rdma:
return 0;
}
-#ifdef CONFIG_CIFS_STATS
static ssize_t cifs_stats_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
@@ -364,13 +378,23 @@ static ssize_t cifs_stats_proc_write(struct file *file,
rc = kstrtobool_from_user(buffer, count, &bv);
if (rc == 0) {
#ifdef CONFIG_CIFS_STATS2
+ int i;
+
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */
+ spin_lock(&GlobalMid_Lock);
+ GlobalMaxActiveXid = 0;
+ GlobalCurrentXid = 0;
+ spin_unlock(&GlobalMid_Lock);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+#ifdef CONFIG_CIFS_STATS2
+ for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++)
+ atomic_set(&server->smb2slowcmd[i], 0);
+#endif /* CONFIG_CIFS_STATS2 */
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
@@ -379,6 +403,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
struct cifs_tcon,
tcon_list);
atomic_set(&tcon->num_smbs_sent, 0);
+ spin_lock(&tcon->stat_lock);
+ tcon->bytes_read = 0;
+ tcon->bytes_written = 0;
+ spin_unlock(&tcon->stat_lock);
if (server->ops->clear_stats)
server->ops->clear_stats(tcon);
}
@@ -395,13 +423,15 @@ static ssize_t cifs_stats_proc_write(struct file *file,
static int cifs_stats_proc_show(struct seq_file *m, void *v)
{
int i;
+#ifdef CONFIG_CIFS_STATS2
+ int j;
+#endif /* STATS2 */
struct list_head *tmp1, *tmp2, *tmp3;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- seq_printf(m,
- "Resources in use\nCIFS Session: %d\n",
+ seq_printf(m, "Resources in use\nCIFS Session: %d\n",
sesInfoAllocCount.counter);
seq_printf(m, "Share (unique mount targets): %d\n",
tconInfoAllocCount.counter);
@@ -430,6 +460,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+#ifdef CONFIG_CIFS_STATS2
+ for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
+ if (atomic_read(&server->smb2slowcmd[j]))
+ seq_printf(m, "%d slow responses from %s for command %d\n",
+ atomic_read(&server->smb2slowcmd[j]),
+ server->hostname, j);
+#endif /* STATS2 */
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
smb_ses_list);
@@ -466,7 +503,6 @@ static const struct file_operations cifs_stats_proc_fops = {
.release = single_release,
.write = cifs_stats_proc_write,
};
-#endif /* STATS */
#ifdef CONFIG_CIFS_SMB_DIRECT
#define PROC_FILE_DEFINE(name) \
@@ -524,9 +560,7 @@ cifs_proc_init(void)
proc_create_single("DebugData", 0, proc_fs_cifs,
cifs_debug_data_proc_show);
-#ifdef CONFIG_CIFS_STATS
proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_fops);
-#endif /* STATS */
proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("LinuxExtensionsEnabled", 0644, proc_fs_cifs,
@@ -564,9 +598,7 @@ cifs_proc_clean(void)
remove_proc_entry("DebugData", proc_fs_cifs);
remove_proc_entry("cifsFYI", proc_fs_cifs);
remove_proc_entry("traceSMB", proc_fs_cifs);
-#ifdef CONFIG_CIFS_STATS
remove_proc_entry("Stats", proc_fs_cifs);
-#endif
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index ee2a8ec70056..85b31cfa2f3c 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -83,7 +83,13 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
kaddr = (char *) kmap(rqst->rq_pages[i]) + offset;
- crypto_shash_update(shash, kaddr, len);
+ rc = crypto_shash_update(shash, kaddr, len);
+ if (rc) {
+ cifs_dbg(VFS, "%s: Could not update with payload\n",
+ __func__);
+ kunmap(rqst->rq_pages[i]);
+ return rc;
+ }
kunmap(rqst->rq_pages[i]);
}
@@ -452,7 +458,7 @@ find_timestamp(struct cifs_ses *ses)
unsigned char *blobptr;
unsigned char *blobend;
struct ntlmssp2_name *attrptr;
- struct timespec ts;
+ struct timespec64 ts;
if (!ses->auth_key.len || !ses->auth_key.response)
return 0;
@@ -477,7 +483,7 @@ find_timestamp(struct cifs_ses *ses)
blobptr += attrsize; /* advance attr value */
}
- ktime_get_real_ts(&ts);
+ ktime_get_real_ts64(&ts);
return cpu_to_le64(cifs_UnixTimeToNT(ts));
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d5aa7ae917bf..7065426b3280 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -139,6 +139,9 @@ cifs_read_super(struct super_block *sb)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
sb->s_flags |= SB_POSIXACL;
+ if (tcon->snapshot_time)
+ sb->s_flags |= SB_RDONLY;
+
if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
sb->s_maxbytes = MAX_LFS_FILESIZE;
else
@@ -209,14 +212,16 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
xid = get_xid();
- /*
- * PATH_MAX may be too long - it would presumably be total path,
- * but note that some servers (includinng Samba 3) have a shorter
- * maximum path.
- *
- * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
- */
- buf->f_namelen = PATH_MAX;
+ if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
+ buf->f_namelen =
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
+ else
+ buf->f_namelen = PATH_MAX;
+
+ buf->f_fsid.val[0] = tcon->vol_serial_number;
+ /* are using part of create time for more randomness, see man statfs */
+ buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
+
buf->f_files = 0; /* undefined */
buf->f_ffree = 0; /* unlimited */
@@ -427,7 +432,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
else if (tcon->ses->user_name)
seq_show_option(s, "username", tcon->ses->user_name);
- if (tcon->ses->domainName)
+ if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
seq_show_option(s, "domain", tcon->ses->domainName);
if (srcaddr->sa_family != AF_UNSPEC) {
@@ -481,20 +486,12 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",persistenthandles");
else if (tcon->use_resilient)
seq_puts(s, ",resilienthandles");
-
-#ifdef CONFIG_CIFS_SMB311
if (tcon->posix_extensions)
seq_puts(s, ",posix");
else if (tcon->unix_ext)
seq_puts(s, ",unix");
else
seq_puts(s, ",nounix");
-#else
- if (tcon->unix_ext)
- seq_puts(s, ",unix");
- else
- seq_puts(s, ",nounix");
-#endif /* SMB311 */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
seq_puts(s, ",posixpaths");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
@@ -546,6 +543,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",wsize=%u", cifs_sb->wsize);
seq_printf(s, ",echo_interval=%lu",
tcon->ses->server->echo_interval / HZ);
+ if (tcon->snapshot_time)
+ seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
/* convert actimeo and display it in seconds */
seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 5f0231803431..f047e87871a1 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -65,8 +65,7 @@ extern struct inode *cifs_root_iget(struct super_block *);
extern int cifs_create(struct inode *, struct dentry *, umode_t,
bool excl);
extern int cifs_atomic_open(struct inode *, struct dentry *,
- struct file *, unsigned, umode_t,
- int *);
+ struct file *, unsigned, umode_t);
extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
unsigned int);
extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
@@ -149,5 +148,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.12"
+#define CIFS_VERSION "2.13"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c923c7854027..0c9ab62c3df4 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -76,6 +76,9 @@
#define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60
+/* maximum number of PDUs in one compound */
+#define MAX_COMPOUND 5
+
/*
* Default number of credits to keep available for SMB3.
* This value is chosen somewhat arbitrarily. The Windows client
@@ -191,9 +194,7 @@ enum smb_version {
Smb_21,
Smb_30,
Smb_302,
-#ifdef CONFIG_CIFS_SMB311
Smb_311,
-#endif /* SMB311 */
Smb_3any,
Smb_default,
Smb_version_err
@@ -456,13 +457,11 @@ struct smb_version_operations {
long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
loff_t);
/* init transform request - used for encryption for now */
- int (*init_transform_rq)(struct TCP_Server_Info *, struct smb_rqst *,
- struct smb_rqst *);
- /* free transform request */
- void (*free_transform_rq)(struct smb_rqst *);
+ int (*init_transform_rq)(struct TCP_Server_Info *, int num_rqst,
+ struct smb_rqst *, struct smb_rqst *);
int (*is_transform_hdr)(void *buf);
int (*receive_transform)(struct TCP_Server_Info *,
- struct mid_q_entry **);
+ struct mid_q_entry **, char **, int *);
enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
enum securityEnum);
int (*next_header)(char *);
@@ -684,15 +683,14 @@ struct TCP_Server_Info {
#ifdef CONFIG_CIFS_STATS2
atomic_t in_send; /* requests trying to send */
atomic_t num_waiters; /* blocked waiting to get in sendrecv */
-#endif
+ atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
+#endif /* STATS2 */
unsigned int max_read;
unsigned int max_write;
-#ifdef CONFIG_CIFS_SMB311
__le16 cipher_type;
/* save initital negprot hash */
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
bool posix_ext_supported;
-#endif /* 3.1.1 */
struct delayed_work reconnect; /* reconnect workqueue job */
struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
unsigned long echo_interval;
@@ -886,9 +884,7 @@ struct cifs_ses {
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
-#ifdef CONFIG_CIFS_SMB311
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
-#endif /* 3.1.1 */
/*
* Network interfaces available on the server this session is
@@ -913,6 +909,7 @@ cap_unix(struct cifs_ses *ses)
struct cached_fid {
bool is_valid:1; /* Do we have a useable root fid */
+ struct kref refcount;
struct cifs_fid *fid;
struct mutex fid_mutex;
struct cifs_tcon *tcon;
@@ -936,7 +933,6 @@ struct cifs_tcon {
__u32 tid; /* The 4 byte tree id */
__u16 Flags; /* optional support bits */
enum statusEnum tidStatus;
-#ifdef CONFIG_CIFS_STATS
atomic_t num_smbs_sent;
union {
struct {
@@ -967,24 +963,9 @@ struct cifs_tcon {
atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
} smb2_stats;
} stats;
-#ifdef CONFIG_CIFS_STATS2
- unsigned long long time_writes;
- unsigned long long time_reads;
- unsigned long long time_opens;
- unsigned long long time_deletes;
- unsigned long long time_closes;
- unsigned long long time_mkdirs;
- unsigned long long time_rmdirs;
- unsigned long long time_renames;
- unsigned long long time_t2renames;
- unsigned long long time_ffirst;
- unsigned long long time_fnext;
- unsigned long long time_fclose;
-#endif /* CONFIG_CIFS_STATS2 */
__u64 bytes_read;
__u64 bytes_written;
spinlock_t stat_lock; /* protects the two fields above */
-#endif /* CONFIG_CIFS_STATS */
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
@@ -997,9 +978,7 @@ struct cifs_tcon {
bool seal:1; /* transport encryption for this mounted share */
bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol
for this mount even if server would support */
-#ifdef CONFIG_CIFS_SMB311
bool posix_extensions; /* if true SMB3.11 posix extensions enabled */
-#endif /* CIFS_311 */
bool local_lease:1; /* check leases (only) on local system not remote */
bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
bool broken_sparse_sup; /* if server or share does not support sparse */
@@ -1046,6 +1025,7 @@ struct tcon_link {
};
extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
+extern void smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst);
static inline struct cifs_tcon *
tlink_tcon(struct tcon_link *tlink)
@@ -1352,7 +1332,6 @@ convert_delimiter(char *path, char delim)
*pos = delim;
}
-#ifdef CONFIG_CIFS_STATS
#define cifs_stats_inc atomic_inc
static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
@@ -1372,13 +1351,6 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
tcon->bytes_read += bytes;
spin_unlock(&tcon->stat_lock);
}
-#else
-
-#define cifs_stats_inc(field) do {} while (0)
-#define cifs_stats_bytes_written(tcon, bytes) do {} while (0)
-#define cifs_stats_bytes_read(tcon, bytes) do {} while (0)
-
-#endif
/*
@@ -1544,9 +1516,9 @@ struct cifs_fattr {
dev_t cf_rdev;
unsigned int cf_nlink;
unsigned int cf_dtype;
- struct timespec cf_atime;
- struct timespec cf_mtime;
- struct timespec cf_ctime;
+ struct timespec64 cf_atime;
+ struct timespec64 cf_mtime;
+ struct timespec64 cf_ctime;
};
static inline void free_dfs_info_param(struct dfs_info3_param *param)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1890f534c88b..20adda4de83b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -94,6 +94,10 @@ extern int cifs_call_async(struct TCP_Server_Info *server,
extern int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
struct smb_rqst *rqst, int *resp_buf_type,
const int flags, struct kvec *resp_iov);
+extern int compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ const int flags, const int num_rqst,
+ struct smb_rqst *rqst, int *resp_buf_type,
+ struct kvec *resp_iov);
extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
@@ -143,9 +147,9 @@ extern enum securityEnum select_sectype(struct TCP_Server_Info *server,
enum securityEnum requested);
extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp);
-extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
-extern u64 cifs_UnixTimeToNT(struct timespec);
-extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
+extern struct timespec64 cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
+extern u64 cifs_UnixTimeToNT(struct timespec64);
+extern struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
int offset);
extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
extern int cifs_get_writer(struct cifsInodeInfo *cinode);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 93408eab92e7..dc2f4cf08fe9 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -508,13 +508,13 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
* this requirement.
*/
int val, seconds, remain, result;
- struct timespec ts;
- unsigned long utc = ktime_get_real_seconds();
+ struct timespec64 ts;
+ time64_t utc = ktime_get_real_seconds();
ts = cnvrtDosUnixTm(rsp->SrvTime.Date,
rsp->SrvTime.Time, 0);
- cifs_dbg(FYI, "SrvTime %d sec since 1970 (utc: %d) diff: %d\n",
- (int)ts.tv_sec, (int)utc,
- (int)(utc - ts.tv_sec));
+ cifs_dbg(FYI, "SrvTime %lld sec since 1970 (utc: %lld) diff: %lld\n",
+ ts.tv_sec, utc,
+ utc - ts.tv_sec);
val = (int)(utc - ts.tv_sec);
seconds = abs(val);
result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
@@ -4082,7 +4082,7 @@ QInfRetry:
if (rc) {
cifs_dbg(FYI, "Send error in QueryInfo = %d\n", rc);
} else if (data) {
- struct timespec ts;
+ struct timespec64 ts;
__u32 time = le32_to_cpu(pSMBr->last_write_time);
/* decode response */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5df2c0698cda..c832a8a1970a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -303,10 +303,8 @@ static const match_table_t cifs_smb_version_tokens = {
{ Smb_21, SMB21_VERSION_STRING },
{ Smb_30, SMB30_VERSION_STRING },
{ Smb_302, SMB302_VERSION_STRING },
-#ifdef CONFIG_CIFS_SMB311
{ Smb_311, SMB311_VERSION_STRING },
{ Smb_311, ALT_SMB311_VERSION_STRING },
-#endif /* SMB311 */
{ Smb_3any, SMB3ANY_VERSION_STRING },
{ Smb_default, SMBDEFAULT_VERSION_STRING },
{ Smb_version_err, NULL }
@@ -350,6 +348,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->max_read = 0;
cifs_dbg(FYI, "Reconnecting tcp session\n");
+ trace_smb3_reconnect(server->CurrentMid, server->hostname);
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
@@ -851,13 +850,14 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
static int
cifs_demultiplex_thread(void *p)
{
- int length;
+ int i, num_mids, length;
struct TCP_Server_Info *server = p;
unsigned int pdu_length;
unsigned int next_offset;
char *buf = NULL;
struct task_struct *task_to_wake = NULL;
- struct mid_q_entry *mid_entry;
+ struct mid_q_entry *mids[MAX_COMPOUND];
+ char *bufs[MAX_COMPOUND];
current->flags |= PF_MEMALLOC;
cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
@@ -924,58 +924,75 @@ next_pdu:
server->pdu_size = next_offset;
}
- mid_entry = NULL;
+ memset(mids, 0, sizeof(mids));
+ memset(bufs, 0, sizeof(bufs));
+ num_mids = 0;
+
if (server->ops->is_transform_hdr &&
server->ops->receive_transform &&
server->ops->is_transform_hdr(buf)) {
length = server->ops->receive_transform(server,
- &mid_entry);
+ mids,
+ bufs,
+ &num_mids);
} else {
- mid_entry = server->ops->find_mid(server, buf);
+ mids[0] = server->ops->find_mid(server, buf);
+ bufs[0] = buf;
+ if (mids[0])
+ num_mids = 1;
- if (!mid_entry || !mid_entry->receive)
- length = standard_receive3(server, mid_entry);
+ if (!mids[0] || !mids[0]->receive)
+ length = standard_receive3(server, mids[0]);
else
- length = mid_entry->receive(server, mid_entry);
+ length = mids[0]->receive(server, mids[0]);
}
if (length < 0) {
- if (mid_entry)
- cifs_mid_q_entry_release(mid_entry);
+ for (i = 0; i < num_mids; i++)
+ if (mids[i])
+ cifs_mid_q_entry_release(mids[i]);
continue;
}
if (server->large_buf)
buf = server->bigbuf;
+
server->lstrp = jiffies;
- if (mid_entry != NULL) {
- mid_entry->resp_buf_size = server->pdu_size;
- if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
- mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
- server->ops->handle_cancelled_mid)
- server->ops->handle_cancelled_mid(
- mid_entry->resp_buf,
- server);
- if (!mid_entry->multiRsp || mid_entry->multiEnd)
- mid_entry->callback(mid_entry);
+ for (i = 0; i < num_mids; i++) {
+ if (mids[i] != NULL) {
+ mids[i]->resp_buf_size = server->pdu_size;
+ if ((mids[i]->mid_flags & MID_WAIT_CANCELLED) &&
+ mids[i]->mid_state == MID_RESPONSE_RECEIVED &&
+ server->ops->handle_cancelled_mid)
+ server->ops->handle_cancelled_mid(
+ mids[i]->resp_buf,
+ server);
- cifs_mid_q_entry_release(mid_entry);
- } else if (server->ops->is_oplock_break &&
- server->ops->is_oplock_break(buf, server)) {
- cifs_dbg(FYI, "Received oplock break\n");
- } else {
- cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
- atomic_read(&midCount));
- cifs_dump_mem("Received Data is: ", buf,
- HEADER_SIZE(server));
+ if (!mids[i]->multiRsp || mids[i]->multiEnd)
+ mids[i]->callback(mids[i]);
+
+ cifs_mid_q_entry_release(mids[i]);
+ } else if (server->ops->is_oplock_break &&
+ server->ops->is_oplock_break(bufs[i],
+ server)) {
+ cifs_dbg(FYI, "Received oplock break\n");
+ } else {
+ cifs_dbg(VFS, "No task to wake, unknown frame "
+ "received! NumMids %d\n",
+ atomic_read(&midCount));
+ cifs_dump_mem("Received Data is: ", bufs[i],
+ HEADER_SIZE(server));
#ifdef CONFIG_CIFS_DEBUG2
- if (server->ops->dump_detail)
- server->ops->dump_detail(buf, server);
- cifs_dump_mids(server);
+ if (server->ops->dump_detail)
+ server->ops->dump_detail(bufs[i],
+ server);
+ cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
+ }
}
+
if (pdu_length > server->pdu_size) {
if (!allocate_buffers(server))
continue;
@@ -1174,6 +1191,7 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
substring_t args[MAX_OPT_ARGS];
switch (match_token(value, cifs_smb_version_tokens, args)) {
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
case Smb_1:
if (disable_legacy_dialects) {
cifs_dbg(VFS, "mount with legacy dialect disabled\n");
@@ -1198,6 +1216,14 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
vol->ops = &smb20_operations;
vol->vals = &smb20_values;
break;
+#else
+ case Smb_1:
+ cifs_dbg(VFS, "vers=1.0 (cifs) mount not permitted when legacy dialects disabled\n");
+ return 1;
+ case Smb_20:
+ cifs_dbg(VFS, "vers=2.0 mount not permitted when legacy dialects disabled\n");
+ return 1;
+#endif /* CIFS_ALLOW_INSECURE_LEGACY */
case Smb_21:
vol->ops = &smb21_operations;
vol->vals = &smb21_values;
@@ -1210,12 +1236,10 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
vol->ops = &smb30_operations; /* currently identical with 3.0 */
vol->vals = &smb302_values;
break;
-#ifdef CONFIG_CIFS_SMB311
case Smb_311:
vol->ops = &smb311_operations;
vol->vals = &smb311_values;
break;
-#endif /* SMB311 */
case Smb_3any:
vol->ops = &smb30_operations; /* currently identical with 3.0 */
vol->vals = &smb3any_values;
@@ -3030,15 +3054,17 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
}
}
-#ifdef CONFIG_CIFS_SMB311
- if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
- if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
+ if (volume_info->linux_ext) {
+ if (ses->server->posix_ext_supported) {
tcon->posix_extensions = true;
printk_once(KERN_WARNING
"SMB3.11 POSIX Extensions are experimental\n");
+ } else {
+ cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions.\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
}
}
-#endif /* 311 */
/*
* BB Do we need to wrap session_mutex around this TCon call and Unix
@@ -3992,11 +4018,9 @@ try_mount_again:
goto remote_path_check;
}
-#ifdef CONFIG_CIFS_SMB311
/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
if (tcon->posix_extensions)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-#endif /* SMB3.11 */
/* tell server which Unix caps we support */
if (cap_unix(tcon->ses)) {
@@ -4459,11 +4483,10 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
goto out;
}
-#ifdef CONFIG_CIFS_SMB311
/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
if (tcon->posix_extensions)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-#endif /* SMB3.11 */
+
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ddae52bd1993..3713d22b95a7 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -465,8 +465,7 @@ out_err:
int
cifs_atomic_open(struct inode *inode, struct dentry *direntry,
- struct file *file, unsigned oflags, umode_t mode,
- int *opened)
+ struct file *file, unsigned oflags, umode_t mode)
{
int rc;
unsigned int xid;
@@ -539,9 +538,9 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
}
if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
- rc = finish_open(file, direntry, generic_file_open, opened);
+ rc = finish_open(file, direntry, generic_file_open);
if (rc) {
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 85145a763021..ea6ace9c2417 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -129,8 +129,10 @@ static void cifs_fscache_acquire_inode_cookie(struct cifsInodeInfo *cifsi,
memset(&auxdata, 0, sizeof(auxdata));
auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time = timespec64_to_timespec(cifsi->vfs_inode.i_mtime);
- auxdata.last_change_time = timespec64_to_timespec(cifsi->vfs_inode.i_ctime);
+ auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
cifsi->fscache =
fscache_acquire_cookie(tcon->fscache,
@@ -166,8 +168,10 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
if (cifsi->fscache) {
memset(&auxdata, 0, sizeof(auxdata));
auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time = timespec64_to_timespec(cifsi->vfs_inode.i_mtime);
- auxdata.last_change_time = timespec64_to_timespec(cifsi->vfs_inode.i_ctime);
+ auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
+ auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
+ auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
fscache_relinquish_cookie(cifsi->fscache, &auxdata, false);
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index c7e3ac251e16..8c0862e41306 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -31,9 +31,11 @@
* Auxiliary data attached to CIFS inode within the cache
*/
struct cifs_fscache_inode_auxdata {
- struct timespec last_write_time;
- struct timespec last_change_time;
- u64 eof;
+ u64 last_write_time_sec;
+ u64 last_change_time_sec;
+ u32 last_write_time_nsec;
+ u32 last_change_time_nsec;
+ u64 eof;
};
/*
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index a2cfb33e85c1..d32eaa4b2437 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -95,7 +95,6 @@ static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
- struct timespec ts;
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
__func__, cifs_i->uniqueid);
@@ -114,8 +113,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
}
/* revalidate if mtime or size have changed */
- ts = timespec64_to_timespec(inode->i_mtime);
- if (timespec_equal(&ts, &fattr->cf_mtime) &&
+ if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
__func__, cifs_i->uniqueid);
@@ -164,9 +162,9 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
cifs_revalidate_cache(inode, fattr);
spin_lock(&inode->i_lock);
- inode->i_atime = timespec_to_timespec64(fattr->cf_atime);
- inode->i_mtime = timespec_to_timespec64(fattr->cf_mtime);
- inode->i_ctime = timespec_to_timespec64(fattr->cf_ctime);
+ inode->i_atime = fattr->cf_atime;
+ inode->i_mtime = fattr->cf_mtime;
+ inode->i_ctime = fattr->cf_ctime;
inode->i_rdev = fattr->cf_rdev;
cifs_nlink_fattr_to_inode(inode, fattr);
inode->i_uid = fattr->cf_uid;
@@ -327,8 +325,8 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
- ktime_get_real_ts(&fattr->cf_mtime);
- fattr->cf_mtime = timespec_trunc(fattr->cf_mtime, sb->s_time_gran);
+ ktime_get_real_ts64(&fattr->cf_mtime);
+ fattr->cf_mtime = timespec64_trunc(fattr->cf_mtime, sb->s_time_gran);
fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
fattr->cf_nlink = 2;
fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
@@ -604,8 +602,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
if (info->LastAccessTime)
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
else {
- ktime_get_real_ts(&fattr->cf_atime);
- fattr->cf_atime = timespec_trunc(fattr->cf_atime, sb->s_time_gran);
+ ktime_get_real_ts64(&fattr->cf_atime);
+ fattr->cf_atime = timespec64_trunc(fattr->cf_atime, sb->s_time_gran);
}
fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
@@ -1122,17 +1120,19 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
if (!server->ops->set_file_info)
return -ENOSYS;
+ info_buf.Pad = 0;
+
if (attrs->ia_valid & ATTR_ATIME) {
set_time = true;
info_buf.LastAccessTime =
- cpu_to_le64(cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_atime)));
+ cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime));
} else
info_buf.LastAccessTime = 0;
if (attrs->ia_valid & ATTR_MTIME) {
set_time = true;
info_buf.LastWriteTime =
- cpu_to_le64(cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_mtime)));
+ cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime));
} else
info_buf.LastWriteTime = 0;
@@ -1145,7 +1145,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
cifs_dbg(FYI, "CIFS - CTIME changed\n");
info_buf.ChangeTime =
- cpu_to_le64(cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_ctime)));
+ cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
} else
info_buf.ChangeTime = 0;
@@ -1577,14 +1577,12 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
server = tcon->ses->server;
-#ifdef CONFIG_CIFS_SMB311
if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
cifs_sb);
d_drop(direntry); /* for time being always refresh inode info */
goto mkdir_out;
}
-#endif /* SMB311 */
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
@@ -2071,8 +2069,8 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
/* old CIFS Unix Extensions doesn't return create time */
if (CIFS_I(inode)->createtime) {
stat->result_mask |= STATX_BTIME;
- stat->btime = timespec_to_timespec64(
- cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime)));
+ stat->btime =
+ cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime));
}
stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED);
@@ -2278,17 +2276,17 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
args->gid = INVALID_GID; /* no change */
if (attrs->ia_valid & ATTR_ATIME)
- args->atime = cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_atime));
+ args->atime = cifs_UnixTimeToNT(attrs->ia_atime);
else
args->atime = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_MTIME)
- args->mtime = cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_mtime));
+ args->mtime = cifs_UnixTimeToNT(attrs->ia_mtime);
else
args->mtime = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_CTIME)
- args->ctime = cifs_UnixTimeToNT(timespec64_to_timespec(attrs->ia_ctime));
+ args->ctime = cifs_UnixTimeToNT(attrs->ia_ctime);
else
args->ctime = NO_CHANGE_64;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index de41f96aba49..2148b0f60e5e 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -396,7 +396,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
__le16 *utf16_path;
- __u8 oplock = SMB2_OPLOCK_LEVEL_II;
+ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct smb2_file_all_info *pfile_info = NULL;
oparms.tcon = tcon;
@@ -459,7 +459,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_io_parms io_parms;
int create_options = CREATE_NOT_DIR;
__le16 *utf16_path;
- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct kvec iov[2];
if (backup_cred(cifs_sb))
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 53e8362cbc4a..dacb2c05674c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -122,9 +122,7 @@ tconInfoAlloc(void)
mutex_init(&ret_buf->crfid.fid_mutex);
ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
GFP_KERNEL);
-#ifdef CONFIG_CIFS_STATS
spin_lock_init(&ret_buf->stat_lock);
-#endif
}
return ret_buf;
}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index d7ad0dfe4e68..fdd908e4a26b 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -918,10 +918,10 @@ smbCalcSize(void *buf, struct TCP_Server_Info *server)
* Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
* into Unix UTC (based 1970-01-01, in seconds).
*/
-struct timespec
+struct timespec64
cifs_NTtimeToUnix(__le64 ntutc)
{
- struct timespec ts;
+ struct timespec64 ts;
/* BB what about the timezone? BB */
/* Subtract the NTFS time offset, then convert to 1s intervals. */
@@ -935,12 +935,12 @@ cifs_NTtimeToUnix(__le64 ntutc)
*/
if (t < 0) {
abs_t = -t;
- ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100);
+ ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100);
ts.tv_nsec = -ts.tv_nsec;
ts.tv_sec = -abs_t;
} else {
abs_t = t;
- ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100;
+ ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100;
ts.tv_sec = abs_t;
}
@@ -949,7 +949,7 @@ cifs_NTtimeToUnix(__le64 ntutc)
/* Convert the Unix UTC into NT UTC. */
u64
-cifs_UnixTimeToNT(struct timespec t)
+cifs_UnixTimeToNT(struct timespec64 t)
{
/* Convert to 100ns intervals and then add the NTFS time offset. */
return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET;
@@ -959,10 +959,11 @@ static const int total_days_of_prev_months[] = {
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
};
-struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
+struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
{
- struct timespec ts;
- int sec, min, days, month, year;
+ struct timespec64 ts;
+ time64_t sec;
+ int min, days, month, year;
u16 date = le16_to_cpu(le_date);
u16 time = le16_to_cpu(le_time);
SMB_TIME *st = (SMB_TIME *)&time;
@@ -973,7 +974,7 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
sec = 2 * st->TwoSeconds;
min = st->Minutes;
if ((sec > 59) || (min > 59))
- cifs_dbg(VFS, "illegal time min %d sec %d\n", min, sec);
+ cifs_dbg(VFS, "illegal time min %d sec %lld\n", min, sec);
sec += (min * 60);
sec += 60 * 60 * st->Hours;
if (st->Hours > 24)
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 8b0502cd39af..aa23c00367ec 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
goto setup_ntlmv2_ret;
}
*pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+ if (!*pbuffer) {
+ rc = -ENOMEM;
+ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+ *buflen = 0;
+ goto setup_ntlmv2_ret;
+ }
sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 646dcd149de1..378151e09e91 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -624,7 +624,6 @@ cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
static void
cifs_clear_stats(struct cifs_tcon *tcon)
{
-#ifdef CONFIG_CIFS_STATS
atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
@@ -646,13 +645,11 @@ cifs_clear_stats(struct cifs_tcon *tcon)
atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
-#endif
}
static void
cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
{
-#ifdef CONFIG_CIFS_STATS
seq_printf(m, " Oplocks breaks: %d",
atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
seq_printf(m, "\nReads: %d Bytes: %llu",
@@ -684,7 +681,6 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
atomic_read(&tcon->stats.cifs_stats.num_ffirst),
atomic_read(&tcon->stats.cifs_stats.num_fnext),
atomic_read(&tcon->stats.cifs_stats.num_fclose));
-#endif
}
static void
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index d01ad706d7fc..1eef1791d0c4 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -120,7 +120,9 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
break;
}
- if (use_cached_root_handle == false)
+ if (use_cached_root_handle)
+ close_shroot(&tcon->crfid);
+ else
rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
if (tmprc)
rc = tmprc;
@@ -281,7 +283,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
int rc;
if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
- (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
+ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
(buf->Attributes == 0))
return 0; /* would be a no op, no sense sending this */
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 3ff7cec2da81..db0453660ff6 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -93,7 +93,6 @@ static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
/* SMB2_OPLOCK_BREAK */ cpu_to_le16(24)
};
-#ifdef CONFIG_CIFS_SMB311
static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len,
__u32 non_ctxlen)
{
@@ -127,7 +126,6 @@ static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len,
/* length of negcontexts including pad from end of sec blob to them */
return (len - nc_offset) + size_of_pad_before_neg_ctxts;
}
-#endif /* CIFS_SMB311 */
int
smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
@@ -222,10 +220,9 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
clc_len = smb2_calc_size(buf, srvr);
-#ifdef CONFIG_CIFS_SMB311
if (shdr->Command == SMB2_NEGOTIATE)
clc_len += get_neg_ctxt_len(shdr, len, clc_len);
-#endif /* SMB311 */
+
if (len != clc_len) {
cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n",
clc_len, len, mid);
@@ -241,6 +238,13 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
return 0;
/*
+ * Some windows servers (win2016) will pad also the final
+ * PDU in a compound to 8 bytes.
+ */
+ if (((clc_len + 7) & ~7) == len)
+ return 0;
+
+ /*
* MacOS server pads after SMB2.1 write response with 3 bytes
* of junk. Other servers match RFC1001 len to actual
* SMB2/SMB3 frame length (header + smb2 response specific data)
@@ -451,15 +455,13 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
/* Windows doesn't allow paths beginning with \ */
if (from[0] == '\\')
start_of_path = from + 1;
-#ifdef CONFIG_CIFS_SMB311
+
/* SMB311 POSIX extensions paths do not include leading slash */
else if (cifs_sb_master_tlink(cifs_sb) &&
cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
(from[0] == '/')) {
start_of_path = from + 1;
- }
-#endif /* 311 */
- else
+ } else
start_of_path = from;
to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
@@ -759,7 +761,6 @@ smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
return 0;
}
-#ifdef CONFIG_CIFS_SMB311
/**
* smb311_update_preauth_hash - update @ses hash with the packet data in @iov
*
@@ -821,4 +822,3 @@ smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec)
return 0;
}
-#endif
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index ea92a38b2f08..247a98e6c856 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -444,7 +444,11 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
NULL /* no data input */, 0 /* no data input */,
(char **)&out_buf, &ret_data_len);
- if (rc != 0) {
+ if (rc == -EOPNOTSUPP) {
+ cifs_dbg(FYI,
+ "server does not support query network interfaces\n");
+ goto out;
+ } else if (rc != 0) {
cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
goto out;
}
@@ -466,21 +470,36 @@ out:
return rc;
}
-void
-smb2_cached_lease_break(struct work_struct *work)
+static void
+smb2_close_cached_fid(struct kref *ref)
{
- struct cached_fid *cfid = container_of(work,
- struct cached_fid, lease_break);
- mutex_lock(&cfid->fid_mutex);
+ struct cached_fid *cfid = container_of(ref, struct cached_fid,
+ refcount);
+
if (cfid->is_valid) {
cifs_dbg(FYI, "clear cached root file handle\n");
SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
cfid->fid->volatile_fid);
cfid->is_valid = false;
}
+}
+
+void close_shroot(struct cached_fid *cfid)
+{
+ mutex_lock(&cfid->fid_mutex);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
mutex_unlock(&cfid->fid_mutex);
}
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+ struct cached_fid *cfid = container_of(work,
+ struct cached_fid, lease_break);
+
+ close_shroot(cfid);
+}
+
/*
* Open the directory at the root of a share
*/
@@ -495,6 +514,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
if (tcon->crfid.is_valid) {
cifs_dbg(FYI, "found a cached root file handle\n");
memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+ kref_get(&tcon->crfid.refcount);
mutex_unlock(&tcon->crfid.fid_mutex);
return 0;
}
@@ -511,6 +531,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
tcon->crfid.tcon = tcon;
tcon->crfid.is_valid = true;
+ kref_init(&tcon->crfid.refcount);
+ kref_get(&tcon->crfid.refcount);
}
mutex_unlock(&tcon->crfid.fid_mutex);
return rc;
@@ -549,9 +571,14 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
FS_DEVICE_INFORMATION);
SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+ FS_VOLUME_INFORMATION);
+ SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
if (no_cached_open)
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ else
+ close_shroot(&tcon->crfid);
+
return;
}
@@ -873,13 +900,11 @@ smb2_can_echo(struct TCP_Server_Info *server)
static void
smb2_clear_stats(struct cifs_tcon *tcon)
{
-#ifdef CONFIG_CIFS_STATS
int i;
for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
}
-#endif
}
static void
@@ -918,67 +943,58 @@ smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
static void
smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
{
-#ifdef CONFIG_CIFS_STATS
atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
- seq_printf(m, "\nNegotiates: %d sent %d failed",
- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
- seq_printf(m, "\nSessionSetups: %d sent %d failed",
- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
- seq_printf(m, "\nLogoffs: %d sent %d failed",
- atomic_read(&sent[SMB2_LOGOFF_HE]),
- atomic_read(&failed[SMB2_LOGOFF_HE]));
- seq_printf(m, "\nTreeConnects: %d sent %d failed",
+
+ /*
+ * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
+ * totals (requests sent) since those SMBs are per-session not per tcon
+ */
+ seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
+ (long long)(tcon->bytes_read),
+ (long long)(tcon->bytes_written));
+ seq_printf(m, "\nTreeConnects: %d total %d failed",
atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
- seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
+ seq_printf(m, "\nTreeDisconnects: %d total %d failed",
atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
- seq_printf(m, "\nCreates: %d sent %d failed",
+ seq_printf(m, "\nCreates: %d total %d failed",
atomic_read(&sent[SMB2_CREATE_HE]),
atomic_read(&failed[SMB2_CREATE_HE]));
- seq_printf(m, "\nCloses: %d sent %d failed",
+ seq_printf(m, "\nCloses: %d total %d failed",
atomic_read(&sent[SMB2_CLOSE_HE]),
atomic_read(&failed[SMB2_CLOSE_HE]));
- seq_printf(m, "\nFlushes: %d sent %d failed",
+ seq_printf(m, "\nFlushes: %d total %d failed",
atomic_read(&sent[SMB2_FLUSH_HE]),
atomic_read(&failed[SMB2_FLUSH_HE]));
- seq_printf(m, "\nReads: %d sent %d failed",
+ seq_printf(m, "\nReads: %d total %d failed",
atomic_read(&sent[SMB2_READ_HE]),
atomic_read(&failed[SMB2_READ_HE]));
- seq_printf(m, "\nWrites: %d sent %d failed",
+ seq_printf(m, "\nWrites: %d total %d failed",
atomic_read(&sent[SMB2_WRITE_HE]),
atomic_read(&failed[SMB2_WRITE_HE]));
- seq_printf(m, "\nLocks: %d sent %d failed",
+ seq_printf(m, "\nLocks: %d total %d failed",
atomic_read(&sent[SMB2_LOCK_HE]),
atomic_read(&failed[SMB2_LOCK_HE]));
- seq_printf(m, "\nIOCTLs: %d sent %d failed",
+ seq_printf(m, "\nIOCTLs: %d total %d failed",
atomic_read(&sent[SMB2_IOCTL_HE]),
atomic_read(&failed[SMB2_IOCTL_HE]));
- seq_printf(m, "\nCancels: %d sent %d failed",
- atomic_read(&sent[SMB2_CANCEL_HE]),
- atomic_read(&failed[SMB2_CANCEL_HE]));
- seq_printf(m, "\nEchos: %d sent %d failed",
- atomic_read(&sent[SMB2_ECHO_HE]),
- atomic_read(&failed[SMB2_ECHO_HE]));
- seq_printf(m, "\nQueryDirectories: %d sent %d failed",
+ seq_printf(m, "\nQueryDirectories: %d total %d failed",
atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
- seq_printf(m, "\nChangeNotifies: %d sent %d failed",
+ seq_printf(m, "\nChangeNotifies: %d total %d failed",
atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
- seq_printf(m, "\nQueryInfos: %d sent %d failed",
+ seq_printf(m, "\nQueryInfos: %d total %d failed",
atomic_read(&sent[SMB2_QUERY_INFO_HE]),
atomic_read(&failed[SMB2_QUERY_INFO_HE]));
- seq_printf(m, "\nSetInfos: %d sent %d failed",
+ seq_printf(m, "\nSetInfos: %d total %d failed",
atomic_read(&sent[SMB2_SET_INFO_HE]),
atomic_read(&failed[SMB2_SET_INFO_HE]));
seq_printf(m, "\nOplockBreaks: %d sent %d failed",
atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
-#endif
}
static void
@@ -1353,6 +1369,13 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
}
+/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
+#define GMT_TOKEN_SIZE 50
+
+/*
+ * Input buffer contains (empty) struct smb_snapshot array with size filled in
+ * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
+ */
static int
smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile, void __user *ioc_buf)
@@ -1382,14 +1405,27 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
kfree(retbuf);
return rc;
}
- if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
- rc = -ERANGE;
- kfree(retbuf);
- return rc;
- }
- if (ret_data_len > snapshot_in.snapshot_array_size)
- ret_data_len = snapshot_in.snapshot_array_size;
+ /*
+ * Check for min size, ie not large enough to fit even one GMT
+ * token (snapshot). On the first ioctl some users may pass in
+ * smaller size (or zero) to simply get the size of the array
+ * so the user space caller can allocate sufficient memory
+ * and retry the ioctl again with larger array size sufficient
+ * to hold all of the snapshot GMT tokens on the second try.
+ */
+ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
+ ret_data_len = sizeof(struct smb_snapshot_array);
+
+ /*
+ * We return struct SRV_SNAPSHOT_ARRAY, followed by
+ * the snapshot array (of 50 byte GMT tokens) each
+ * representing an available previous version of the data
+ */
+ if (ret_data_len > (snapshot_in.snapshot_array_size +
+ sizeof(struct smb_snapshot_array)))
+ ret_data_len = snapshot_in.snapshot_array_size +
+ sizeof(struct smb_snapshot_array);
if (copy_to_user(ioc_buf, retbuf, ret_data_len))
rc = -EFAULT;
@@ -1487,7 +1523,11 @@ smb2_is_session_expired(char *buf)
shdr->Status != STATUS_USER_SESSION_DELETED)
return false;
+ trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
+ le16_to_cpu(shdr->Command),
+ le64_to_cpu(shdr->MessageId));
cifs_dbg(FYI, "Session expired or deleted\n");
+
return true;
}
@@ -1504,16 +1544,140 @@ smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
CIFS_CACHE_READ(cinode) ? 1 : 0);
}
+static void
+smb2_set_related(struct smb_rqst *rqst)
+{
+ struct smb2_sync_hdr *shdr;
+
+ shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
+}
+
+char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
+
+static void
+smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+{
+ struct smb2_sync_hdr *shdr;
+ unsigned long len = smb_rqst_len(server, rqst);
+
+ /* SMB headers in a compound are 8 byte aligned. */
+ if (len & 7) {
+ rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
+ rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7);
+ rqst->rq_nvec++;
+ len = smb_rqst_len(server, rqst);
+ }
+
+ shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ shdr->NextCommand = cpu_to_le32(len);
+}
+
static int
smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
struct kstatfs *buf)
{
+ struct smb2_query_info_rsp *rsp;
+ struct smb2_fs_full_size_info *info = NULL;
+ struct smb_rqst rqst[3];
+ int resp_buftype[3];
+ struct kvec rsp_iov[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ struct kvec close_iov[1];
+ struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = ses->server;
+ __le16 srch_path = 0; /* Null - open root of share */
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ struct cifs_open_parms oparms;
+ struct cifs_fid fid;
+ int flags = 0;
+ int rc;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(rqst, 0, sizeof(rqst));
+ memset(resp_buftype, 0, sizeof(resp_buftype));
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[0].rq_iov = open_iov;
+ rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+
+ oparms.tcon = tcon;
+ oparms.desired_access = FILE_READ_ATTRIBUTES;
+ oparms.disposition = FILE_OPEN;
+ oparms.create_options = 0;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &srch_path);
+ if (rc)
+ goto qfs_exit;
+ smb2_set_next_command(server, &rqst[0]);
+
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+ FS_FULL_SIZE_INFORMATION,
+ SMB2_O_INFO_FILESYSTEM, 0,
+ sizeof(struct smb2_fs_full_size_info));
+ if (rc)
+ goto qfs_exit;
+ smb2_set_next_command(server, &rqst[1]);
+ smb2_set_related(&rqst[1]);
+
+ memset(&close_iov, 0, sizeof(close_iov));
+ rqst[2].rq_iov = close_iov;
+ rqst[2].rq_nvec = 1;
+
+ rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
+ if (rc)
+ goto qfs_exit;
+ smb2_set_related(&rqst[2]);
+
+ rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ resp_buftype, rsp_iov);
+ if (rc)
+ goto qfs_exit;
+
+ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ buf->f_type = SMB2_MAGIC_NUMBER;
+ info = (struct smb2_fs_full_size_info *)(
+ le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
+ rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength),
+ &rsp_iov[1],
+ sizeof(struct smb2_fs_full_size_info));
+ if (!rc)
+ smb2_copy_fs_info_to_kstatfs(info, buf);
+
+qfs_exit:
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ SMB2_close_free(&rqst[2]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+ return rc;
+}
+
+static int
+smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+ struct kstatfs *buf)
+{
int rc;
__le16 srch_path = 0; /* Null - open root of share */
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct cifs_fid fid;
+ if (!tcon->posix_extensions)
+ return smb2_queryfs(xid, tcon, buf);
+
oparms.tcon = tcon;
oparms.desired_access = FILE_READ_ATTRIBUTES;
oparms.disposition = FILE_OPEN;
@@ -1524,9 +1688,10 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
if (rc)
return rc;
+
+ rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid, buf);
buf->f_type = SMB2_MAGIC_NUMBER;
- rc = SMB2_QFS_info(xid, tcon, fid.persistent_fid, fid.volatile_fid,
- buf);
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
return rc;
}
@@ -1700,7 +1865,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
&resp_buftype);
if (!rc || !err_iov.iov_base) {
rc = -ENOENT;
- goto querty_exit;
+ goto free_path;
}
err_buf = err_iov.iov_base;
@@ -1741,6 +1906,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
querty_exit:
free_rsp_buf(resp_buftype, err_buf);
+ free_path:
kfree(utf16_path);
return rc;
}
@@ -2326,35 +2492,51 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
}
-/* Assumes:
- * rqst->rq_iov[0] is transform header
- * rqst->rq_iov[1+] data to be encrypted/decrypted
+/* Assumes the first rqst has a transform header as the first iov.
+ * I.e.
+ * rqst[0].rq_iov[0] is transform header
+ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
*/
static struct scatterlist *
-init_sg(struct smb_rqst *rqst, u8 *sign)
+init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
{
- unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
- unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
+ unsigned int sg_len;
struct scatterlist *sg;
unsigned int i;
unsigned int j;
+ unsigned int idx = 0;
+ int skip;
+
+ sg_len = 1;
+ for (i = 0; i < num_rqst; i++)
+ sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg)
return NULL;
sg_init_table(sg, sg_len);
- smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
- for (i = 1; i < rqst->rq_nvec; i++)
- smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
- rqst->rq_iov[i].iov_len);
- for (j = 0; i < sg_len - 1; i++, j++) {
- unsigned int len, offset;
+ for (i = 0; i < num_rqst; i++) {
+ for (j = 0; j < rqst[i].rq_nvec; j++) {
+ /*
+ * The first rqst has a transform header where the
+ * first 20 bytes are not part of the encrypted blob
+ */
+ skip = (i == 0) && (j == 0) ? 20 : 0;
+ smb2_sg_set_buf(&sg[idx++],
+ rqst[i].rq_iov[j].iov_base + skip,
+ rqst[i].rq_iov[j].iov_len - skip);
+ }
- rqst_page_get_length(rqst, j, &len, &offset);
- sg_set_page(&sg[i], rqst->rq_pages[j], len, offset);
+ for (j = 0; j < rqst[i].rq_npages; j++) {
+ unsigned int len, offset;
+
+ rqst_page_get_length(&rqst[i], j, &len, &offset);
+ sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
+ }
}
- smb2_sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
+ smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
return sg;
}
@@ -2386,10 +2568,11 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
* untouched.
*/
static int
-crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
+crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst, int enc)
{
struct smb2_transform_hdr *tr_hdr =
- (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
+ (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int rc = 0;
struct scatterlist *sg;
@@ -2440,7 +2623,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
crypt_len += SMB2_SIGNATURE_SIZE;
}
- sg = init_sg(rqst, sign);
+ sg = init_sg(num_rqst, rqst, sign);
if (!sg) {
cifs_dbg(VFS, "%s: Failed to init sg", __func__);
rc = -ENOMEM;
@@ -2477,103 +2660,98 @@ free_req:
return rc;
}
+void
+smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
+{
+ int i, j;
+
+ for (i = 0; i < num_rqst; i++) {
+ if (rqst[i].rq_pages) {
+ for (j = rqst[i].rq_npages - 1; j >= 0; j--)
+ put_page(rqst[i].rq_pages[j]);
+ kfree(rqst[i].rq_pages);
+ }
+ }
+}
+
+/*
+ * This function will initialize new_rq and encrypt the content.
+ * The first entry, new_rq[0], only contains a single iov which contains
+ * a smb2_transform_hdr and is pre-allocated by the caller.
+ * This function then populates new_rq[1+] with the content from olq_rq[0+].
+ *
+ * The end result is an array of smb_rqst structures where the first structure
+ * only contains a single iov for the transform header which we then can pass
+ * to crypt_message().
+ *
+ * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
+ * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
+ */
static int
-smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
- struct smb_rqst *old_rq)
+smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *new_rq, struct smb_rqst *old_rq)
{
- struct kvec *iov;
struct page **pages;
- struct smb2_transform_hdr *tr_hdr;
- unsigned int npages = old_rq->rq_npages;
- unsigned int orig_len;
- int i;
+ struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
+ unsigned int npages;
+ unsigned int orig_len = 0;
+ int i, j;
int rc = -ENOMEM;
- pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- return rc;
-
- new_rq->rq_pages = pages;
- new_rq->rq_offset = old_rq->rq_offset;
- new_rq->rq_npages = old_rq->rq_npages;
- new_rq->rq_pagesz = old_rq->rq_pagesz;
- new_rq->rq_tailsz = old_rq->rq_tailsz;
-
- for (i = 0; i < npages; i++) {
- pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
- if (!pages[i])
- goto err_free_pages;
- }
-
- iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
- GFP_KERNEL);
- if (!iov)
- goto err_free_pages;
+ for (i = 1; i < num_rqst; i++) {
+ npages = old_rq[i - 1].rq_npages;
+ pages = kmalloc_array(npages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages)
+ goto err_free;
+
+ new_rq[i].rq_pages = pages;
+ new_rq[i].rq_npages = npages;
+ new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
+ new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
+ new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
+ new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
+ new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
+
+ orig_len += smb_rqst_len(server, &old_rq[i - 1]);
+
+ for (j = 0; j < npages; j++) {
+ pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+ if (!pages[j])
+ goto err_free;
+ }
- /* copy all iovs from the old */
- memcpy(&iov[1], &old_rq->rq_iov[0],
- sizeof(struct kvec) * old_rq->rq_nvec);
+ /* copy pages form the old */
+ for (j = 0; j < npages; j++) {
+ char *dst, *src;
+ unsigned int offset, len;
- new_rq->rq_iov = iov;
- new_rq->rq_nvec = old_rq->rq_nvec + 1;
+ rqst_page_get_length(&new_rq[i], j, &len, &offset);
- tr_hdr = kmalloc(sizeof(struct smb2_transform_hdr), GFP_KERNEL);
- if (!tr_hdr)
- goto err_free_iov;
+ dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
+ src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
- orig_len = smb_rqst_len(server, old_rq);
+ memcpy(dst, src, len);
+ kunmap(new_rq[i].rq_pages[j]);
+ kunmap(old_rq[i - 1].rq_pages[j]);
+ }
+ }
- /* fill the 2nd iov with a transform header */
+ /* fill the 1st iov with a transform header */
fill_transform_hdr(tr_hdr, orig_len, old_rq);
- new_rq->rq_iov[0].iov_base = tr_hdr;
- new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
-
- /* copy pages form the old */
- for (i = 0; i < npages; i++) {
- char *dst, *src;
- unsigned int offset, len;
-
- rqst_page_get_length(new_rq, i, &len, &offset);
-
- dst = (char *) kmap(new_rq->rq_pages[i]) + offset;
- src = (char *) kmap(old_rq->rq_pages[i]) + offset;
- memcpy(dst, src, len);
- kunmap(new_rq->rq_pages[i]);
- kunmap(old_rq->rq_pages[i]);
- }
-
- rc = crypt_message(server, new_rq, 1);
+ rc = crypt_message(server, num_rqst, new_rq, 1);
cifs_dbg(FYI, "encrypt message returned %d", rc);
if (rc)
- goto err_free_tr_hdr;
+ goto err_free;
return rc;
-err_free_tr_hdr:
- kfree(tr_hdr);
-err_free_iov:
- kfree(iov);
-err_free_pages:
- for (i = i - 1; i >= 0; i--)
- put_page(pages[i]);
- kfree(pages);
+err_free:
+ smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
return rc;
}
-static void
-smb3_free_transform_rq(struct smb_rqst *rqst)
-{
- int i = rqst->rq_npages - 1;
-
- for (; i >= 0; i--)
- put_page(rqst->rq_pages[i]);
- kfree(rqst->rq_pages);
- /* free transform header */
- kfree(rqst->rq_iov[0].iov_base);
- kfree(rqst->rq_iov);
-}
-
static int
smb3_is_transform_hdr(void *buf)
{
@@ -2603,7 +2781,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
rqst.rq_pagesz = PAGE_SIZE;
rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
- rc = crypt_message(server, &rqst, 0);
+ rc = crypt_message(server, 1, &rqst, 0);
cifs_dbg(FYI, "decrypt message returned %d\n", rc);
if (rc)
@@ -2878,13 +3056,20 @@ discard_data:
static int
receive_encrypted_standard(struct TCP_Server_Info *server,
- struct mid_q_entry **mid)
+ struct mid_q_entry **mids, char **bufs,
+ int *num_mids)
{
- int length;
+ int ret, length;
char *buf = server->smallbuf;
+ char *tmpbuf;
+ struct smb2_sync_hdr *shdr;
unsigned int pdu_length = server->pdu_size;
unsigned int buf_size;
struct mid_q_entry *mid_entry;
+ int next_is_large;
+ char *next_buffer = NULL;
+
+ *num_mids = 0;
/* switch to large buffer if too big for a small one */
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
@@ -2905,24 +3090,61 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
if (length)
return length;
+ next_is_large = server->large_buf;
+ one_more:
+ shdr = (struct smb2_sync_hdr *)buf;
+ if (shdr->NextCommand) {
+ if (next_is_large) {
+ tmpbuf = server->bigbuf;
+ next_buffer = (char *)cifs_buf_get();
+ } else {
+ tmpbuf = server->smallbuf;
+ next_buffer = (char *)cifs_small_buf_get();
+ }
+ memcpy(next_buffer,
+ tmpbuf + le32_to_cpu(shdr->NextCommand),
+ pdu_length - le32_to_cpu(shdr->NextCommand));
+ }
+
mid_entry = smb2_find_mid(server, buf);
if (mid_entry == NULL)
cifs_dbg(FYI, "mid not found\n");
else {
cifs_dbg(FYI, "mid found\n");
mid_entry->decrypted = true;
+ mid_entry->resp_buf_size = server->pdu_size;
}
- *mid = mid_entry;
+ if (*num_mids >= MAX_COMPOUND) {
+ cifs_dbg(VFS, "too many PDUs in compound\n");
+ return -1;
+ }
+ bufs[*num_mids] = buf;
+ mids[(*num_mids)++] = mid_entry;
if (mid_entry && mid_entry->handle)
- return mid_entry->handle(server, mid_entry);
+ ret = mid_entry->handle(server, mid_entry);
+ else
+ ret = cifs_handle_standard(server, mid_entry);
- return cifs_handle_standard(server, mid_entry);
+ if (ret == 0 && shdr->NextCommand) {
+ pdu_length -= le32_to_cpu(shdr->NextCommand);
+ server->large_buf = next_is_large;
+ if (next_is_large)
+ server->bigbuf = next_buffer;
+ else
+ server->smallbuf = next_buffer;
+
+ buf += le32_to_cpu(shdr->NextCommand);
+ goto one_more;
+ }
+
+ return ret;
}
static int
-smb3_receive_transform(struct TCP_Server_Info *server, struct mid_q_entry **mid)
+smb3_receive_transform(struct TCP_Server_Info *server,
+ struct mid_q_entry **mids, char **bufs, int *num_mids)
{
char *buf = server->smallbuf;
unsigned int pdu_length = server->pdu_size;
@@ -2945,10 +3167,11 @@ smb3_receive_transform(struct TCP_Server_Info *server, struct mid_q_entry **mid)
return -ECONNABORTED;
}
+ /* TODO: add support for compounds containing READ. */
if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
- return receive_encrypted_read(server, mid);
+ return receive_encrypted_read(server, &mids[0]);
- return receive_encrypted_standard(server, mid);
+ return receive_encrypted_standard(server, mids, bufs, num_mids);
}
int
@@ -3250,7 +3473,6 @@ struct smb_version_operations smb30_operations = {
.fallocate = smb3_fallocate,
.enum_snapshots = smb3_enum_snapshots,
.init_transform_rq = smb3_init_transform_rq,
- .free_transform_rq = smb3_free_transform_rq,
.is_transform_hdr = smb3_is_transform_hdr,
.receive_transform = smb3_receive_transform,
.get_dfs_refer = smb2_get_dfs_refer,
@@ -3267,7 +3489,6 @@ struct smb_version_operations smb30_operations = {
.next_header = smb2_next_header,
};
-#ifdef CONFIG_CIFS_SMB311
struct smb_version_operations smb311_operations = {
.compare_fids = smb2_compare_fids,
.setup_request = smb2_setup_request,
@@ -3335,7 +3556,7 @@ struct smb_version_operations smb311_operations = {
.is_status_pending = smb2_is_status_pending,
.is_session_expired = smb2_is_session_expired,
.oplock_response = smb2_oplock_response,
- .queryfs = smb2_queryfs,
+ .queryfs = smb311_queryfs,
.mand_lock = smb2_mand_lock,
.mand_unlock_range = smb2_unlock_range,
.push_mand_locks = smb2_push_mandatory_locks,
@@ -3357,7 +3578,6 @@ struct smb_version_operations smb311_operations = {
.fallocate = smb3_fallocate,
.enum_snapshots = smb3_enum_snapshots,
.init_transform_rq = smb3_init_transform_rq,
- .free_transform_rq = smb3_free_transform_rq,
.is_transform_hdr = smb3_is_transform_hdr,
.receive_transform = smb3_receive_transform,
.get_dfs_refer = smb2_get_dfs_refer,
@@ -3366,9 +3586,13 @@ struct smb_version_operations smb311_operations = {
.query_all_EAs = smb2_query_eas,
.set_EA = smb2_set_ea,
#endif /* CIFS_XATTR */
+#ifdef CONFIG_CIFS_ACL
+ .get_acl = get_smb2_acl,
+ .get_acl_by_fid = get_smb2_acl_by_fid,
+ .set_acl = set_smb2_acl,
+#endif /* CIFS_ACL */
.next_header = smb2_next_header,
};
-#endif /* CIFS_SMB311 */
struct smb_version_values smb20_values = {
.version_string = SMB20_VERSION_STRING,
@@ -3496,7 +3720,6 @@ struct smb_version_values smb302_values = {
.create_lease_size = sizeof(struct create_lease_v2),
};
-#ifdef CONFIG_CIFS_SMB311
struct smb_version_values smb311_values = {
.version_string = SMB311_VERSION_STRING,
.protocol_id = SMB311_PROT_ID,
@@ -3517,4 +3740,3 @@ struct smb_version_values smb311_values = {
.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
.create_lease_size = sizeof(struct create_lease_v2),
};
-#endif /* SMB311 */
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 3c92678cb45b..5740aa809be6 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -80,7 +80,7 @@ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
/* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
};
-static int smb3_encryption_required(const struct cifs_tcon *tcon)
+int smb3_encryption_required(const struct cifs_tcon *tcon)
{
if (!tcon)
return 0;
@@ -360,17 +360,15 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
total_len);
if (tcon != NULL) {
-#ifdef CONFIG_CIFS_STATS2
uint16_t com_code = le16_to_cpu(smb2_command);
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
-#endif
cifs_stats_inc(&tcon->num_smbs_sent);
}
return rc;
}
-#ifdef CONFIG_CIFS_SMB311
+
/* offset is sizeof smb2_negotiate_req but rounded up to 8 bytes */
#define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) */
@@ -585,13 +583,6 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
return 0;
}
-#else
-static void assemble_neg_contexts(struct smb2_negotiate_req *req,
- unsigned int *total_len)
-{
- return;
-}
-#endif /* SMB311 */
/*
*
@@ -636,10 +627,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
return rc;
req->sync_hdr.SessionId = 0;
-#ifdef CONFIG_CIFS_SMB311
+
memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
-#endif
if (strcmp(ses->server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
@@ -741,10 +731,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
-#ifdef CONFIG_CIFS_SMB311
else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
-#endif /* SMB311 */
else {
cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
le16_to_cpu(rsp->DialectRevision));
@@ -753,9 +741,6 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
}
server->dialect = le16_to_cpu(rsp->DialectRevision);
- /* BB: add check that dialect was valid given dialect(s) we asked for */
-
-#ifdef CONFIG_CIFS_SMB311
/*
* Keep a copy of the hash after negprot. This hash will be
* the starting hash value for all sessions made from this
@@ -763,7 +748,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
*/
memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
-#endif
+
/* SMB2 only has an extended negflavor */
server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
/* set it to the maximum buffer size value we can send with 1 credit */
@@ -804,7 +789,6 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
rc = -EIO;
}
-#ifdef CONFIG_CIFS_SMB311
if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
if (rsp->NegotiateContextCount)
rc = smb311_decode_neg_context(rsp, server,
@@ -812,7 +796,6 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
else
cifs_dbg(VFS, "Missing expected negotiate contexts\n");
}
-#endif /* CONFIG_CIFS_SMB311 */
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
@@ -1373,13 +1356,11 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
sess_data->nls_cp = (struct nls_table *) nls_cp;
sess_data->previous_session = ses->Suid;
-#ifdef CONFIG_CIFS_SMB311
/*
* Initialize the session hash with the server one.
*/
memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
-#endif
while (sess_data->func)
sess_data->func(sess_data);
@@ -1875,6 +1856,51 @@ add_durable_context(struct kvec *iov, unsigned int *num_iovec,
return 0;
}
+/* See MS-SMB2 2.2.13.2.7 */
+static struct crt_twarp_ctxt *
+create_twarp_buf(__u64 timewarp)
+{
+ struct crt_twarp_ctxt *buf;
+
+ buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->ccontext.DataOffset = cpu_to_le16(offsetof
+ (struct crt_twarp_ctxt, Timestamp));
+ buf->ccontext.DataLength = cpu_to_le32(8);
+ buf->ccontext.NameOffset = cpu_to_le16(offsetof
+ (struct crt_twarp_ctxt, Name));
+ buf->ccontext.NameLength = cpu_to_le16(4);
+ /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
+ buf->Name[0] = 'T';
+ buf->Name[1] = 'W';
+ buf->Name[2] = 'r';
+ buf->Name[3] = 'p';
+ buf->Timestamp = cpu_to_le64(timewarp);
+ return buf;
+}
+
+/* See MS-SMB2 2.2.13.2.7 */
+static int
+add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
+{
+ struct smb2_create_req *req = iov[0].iov_base;
+ unsigned int num = *num_iovec;
+
+ iov[num].iov_base = create_twarp_buf(timewarp);
+ if (iov[num].iov_base == NULL)
+ return -ENOMEM;
+ iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
+ if (!req->CreateContextsOffset)
+ req->CreateContextsOffset = cpu_to_le32(
+ sizeof(struct smb2_create_req) +
+ iov[num - 1].iov_len);
+ le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
+ *num_iovec = num + 1;
+ return 0;
+}
+
static int
alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
const char *treename, const __le16 *path)
@@ -1920,7 +1946,6 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
return 0;
}
-#ifdef CONFIG_CIFS_SMB311
int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
umode_t mode, struct cifs_tcon *tcon,
const char *full_path,
@@ -1928,7 +1953,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
{
struct smb_rqst rqst;
struct smb2_create_req *req;
- struct smb2_create_rsp *rsp;
+ struct smb2_create_rsp *rsp = NULL;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[3]; /* make sure at least one for each open context */
@@ -1943,27 +1968,31 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
char *pc_buf = NULL;
int flags = 0;
unsigned int total_len;
- __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
-
- if (!path)
- return -ENOMEM;
+ __le16 *utf16_path = NULL;
cifs_dbg(FYI, "mkdir\n");
+ /* resource #1: path allocation */
+ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
+
if (ses && (ses->server))
server = ses->server;
- else
- return -EIO;
+ else {
+ rc = -EIO;
+ goto err_free_path;
+ }
+ /* resource #2: request */
rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
-
if (rc)
- return rc;
+ goto err_free_path;
+
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
-
req->ImpersonationLevel = IL_IMPERSONATION;
req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
/* File attributes ignored on open (used in create though) */
@@ -1992,50 +2021,44 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
&name_len,
- tcon->treeName, path);
- if (rc) {
- cifs_small_buf_release(req);
- return rc;
- }
+ tcon->treeName, utf16_path);
+ if (rc)
+ goto err_free_req;
+
req->NameLength = cpu_to_le16(name_len * 2);
uni_path_len = copy_size;
- path = copy_path;
+ /* free before overwriting resource */
+ kfree(utf16_path);
+ utf16_path = copy_path;
} else {
- uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+ uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
if (uni_path_len % 8 != 0) {
copy_size = roundup(uni_path_len, 8);
copy_path = kzalloc(copy_size, GFP_KERNEL);
if (!copy_path) {
- cifs_small_buf_release(req);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free_req;
}
- memcpy((char *)copy_path, (const char *)path,
+ memcpy((char *)copy_path, (const char *)utf16_path,
uni_path_len);
uni_path_len = copy_size;
- path = copy_path;
+ /* free before overwriting resource */
+ kfree(utf16_path);
+ utf16_path = copy_path;
}
}
iov[1].iov_len = uni_path_len;
- iov[1].iov_base = path;
+ iov[1].iov_base = utf16_path;
req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
if (tcon->posix_extensions) {
- if (n_iov > 2) {
- struct create_context *ccontext =
- (struct create_context *)iov[n_iov-1].iov_base;
- ccontext->Next =
- cpu_to_le32(iov[n_iov-1].iov_len);
- }
-
+ /* resource #3: posix buf */
rc = add_posix_context(iov, &n_iov, mode);
- if (rc) {
- cifs_small_buf_release(req);
- kfree(copy_path);
- return rc;
- }
+ if (rc)
+ goto err_free_req;
pc_buf = iov[n_iov-1].iov_base;
}
@@ -2044,73 +2067,57 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
rqst.rq_iov = iov;
rqst.rq_nvec = n_iov;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
- &rsp_iov);
-
- cifs_small_buf_release(req);
- rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
-
- if (rc != 0) {
+ /* resource #4: response buffer */
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ if (rc) {
cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
- CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
- goto smb311_mkdir_exit;
- } else
- trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
- ses->Suid, CREATE_NOT_FILE,
- FILE_WRITE_ATTRIBUTES);
+ CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES, rc);
+ goto err_free_rsp_buf;
+ }
+
+ rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+ trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+ ses->Suid, CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES);
SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
/* Eventually save off posix specific response info and timestaps */
-smb311_mkdir_exit:
- kfree(copy_path);
- kfree(pc_buf);
+err_free_rsp_buf:
free_rsp_buf(resp_buftype, rsp);
+ kfree(pc_buf);
+err_free_req:
+ cifs_small_buf_release(req);
+err_free_path:
+ kfree(utf16_path);
return rc;
-
}
-#endif /* SMB311 */
int
-SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
- __u8 *oplock, struct smb2_file_all_info *buf,
- struct kvec *err_iov, int *buftype)
+SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
+ struct cifs_open_parms *oparms, __le16 *path)
{
- struct smb_rqst rqst;
+ struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_create_req *req;
- struct smb2_create_rsp *rsp;
- struct TCP_Server_Info *server;
- struct cifs_tcon *tcon = oparms->tcon;
- struct cifs_ses *ses = tcon->ses;
- struct kvec iov[5]; /* make sure at least one for each open context */
- struct kvec rsp_iov = {NULL, 0};
- int resp_buftype;
- int uni_path_len;
- __le16 *copy_path = NULL;
- int copy_size;
- int rc = 0;
unsigned int n_iov = 2;
__u32 file_attributes = 0;
- char *dhc_buf = NULL, *lc_buf = NULL, *pc_buf = NULL;
- int flags = 0;
+ int copy_size;
+ int uni_path_len;
unsigned int total_len;
-
- cifs_dbg(FYI, "create/open\n");
-
- if (ses && (ses->server))
- server = ses->server;
- else
- return -EIO;
+ struct kvec *iov = rqst->rq_iov;
+ __le16 *copy_path;
+ int rc;
rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
-
if (rc)
return rc;
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
+ iov[0].iov_base = (char *)req;
+ /* -1 since last byte is buf[0] which is sent below (path) */
+ iov[0].iov_len = total_len - 1;
if (oparms->create_options & CREATE_OPTION_READONLY)
file_attributes |= ATTR_READONLY;
@@ -2124,11 +2131,6 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
req->ShareAccess = FILE_SHARE_ALL_LE;
req->CreateDisposition = cpu_to_le32(oparms->disposition);
req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
-
- iov[0].iov_base = (char *)req;
- /* -1 since last byte is buf[0] which is sent below (path) */
- iov[0].iov_len = total_len - 1;
-
req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
/* [MS-SMB2] 2.2.13 NameOffset:
@@ -2146,10 +2148,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
&name_len,
tcon->treeName, path);
- if (rc) {
- cifs_small_buf_release(req);
+ if (rc)
return rc;
- }
req->NameLength = cpu_to_le16(name_len * 2);
uni_path_len = copy_size;
path = copy_path;
@@ -2157,18 +2157,16 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
- if (uni_path_len % 8 != 0) {
- copy_size = roundup(uni_path_len, 8);
- copy_path = kzalloc(copy_size, GFP_KERNEL);
- if (!copy_path) {
- cifs_small_buf_release(req);
- return -ENOMEM;
- }
- memcpy((char *)copy_path, (const char *)path,
- uni_path_len);
- uni_path_len = copy_size;
- path = copy_path;
- }
+ copy_size = uni_path_len;
+ if (copy_size % 8 != 0)
+ copy_size = roundup(copy_size, 8);
+ copy_path = kzalloc(copy_size, GFP_KERNEL);
+ if (!copy_path)
+ return -ENOMEM;
+ memcpy((char *)copy_path, (const char *)path,
+ uni_path_len);
+ uni_path_len = copy_size;
+ path = copy_path;
}
iov[1].iov_len = uni_path_len;
@@ -2183,12 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
else {
rc = add_lease_context(server, iov, &n_iov,
oparms->fid->lease_key, oplock);
- if (rc) {
- cifs_small_buf_release(req);
- kfree(copy_path);
+ if (rc)
return rc;
- }
- lc_buf = iov[n_iov-1].iov_base;
}
if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
@@ -2202,16 +2196,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
rc = add_durable_context(iov, &n_iov, oparms,
tcon->use_persistent);
- if (rc) {
- cifs_small_buf_release(req);
- kfree(copy_path);
- kfree(lc_buf);
+ if (rc)
return rc;
- }
- dhc_buf = iov[n_iov-1].iov_base;
}
-#ifdef CONFIG_CIFS_SMB311
if (tcon->posix_extensions) {
if (n_iov > 2) {
struct create_context *ccontext =
@@ -2221,24 +2209,79 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
}
rc = add_posix_context(iov, &n_iov, oparms->mode);
- if (rc) {
- cifs_small_buf_release(req);
- kfree(copy_path);
- kfree(lc_buf);
- kfree(dhc_buf);
+ if (rc)
return rc;
+ }
+
+ if (tcon->snapshot_time) {
+ cifs_dbg(FYI, "adding snapshot context\n");
+ if (n_iov > 2) {
+ struct create_context *ccontext =
+ (struct create_context *)iov[n_iov-1].iov_base;
+ ccontext->Next =
+ cpu_to_le32(iov[n_iov-1].iov_len);
}
- pc_buf = iov[n_iov-1].iov_base;
+
+ rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
+ if (rc)
+ return rc;
}
-#endif /* SMB311 */
+
+
+ rqst->rq_nvec = n_iov;
+ return 0;
+}
+
+/* rq_iov[0] is the request and is released by cifs_small_buf_release().
+ * All other vectors are freed by kfree().
+ */
+void
+SMB2_open_free(struct smb_rqst *rqst)
+{
+ int i;
+
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base);
+ for (i = 1; i < rqst->rq_nvec; i++)
+ if (rqst->rq_iov[i].iov_base != smb2_padding)
+ kfree(rqst->rq_iov[i].iov_base);
+}
+
+int
+SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ __u8 *oplock, struct smb2_file_all_info *buf,
+ struct kvec *err_iov, int *buftype)
+{
+ struct smb_rqst rqst;
+ struct smb2_create_rsp *rsp = NULL;
+ struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon = oparms->tcon;
+ struct cifs_ses *ses = tcon->ses;
+ struct kvec iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype;
+ int rc = 0;
+ int flags = 0;
+
+ cifs_dbg(FYI, "create/open\n");
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
- rqst.rq_nvec = n_iov;
+ rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
+
+ rc = SMB2_open_init(tcon, &rqst, oplock, oparms, path);
+ if (rc)
+ goto creat_exit;
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
- cifs_small_buf_release(req);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -2275,10 +2318,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
else
*oplock = rsp->OplockLevel;
creat_exit:
- kfree(copy_path);
- kfree(lc_buf);
- kfree(dhc_buf);
- kfree(pc_buf);
+ SMB2_open_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
@@ -2469,43 +2509,62 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
}
int
+SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid)
+{
+ struct smb2_close_req *req;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned int total_len;
+ int rc;
+
+ rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
+ if (rc)
+ return rc;
+
+ req->PersistentFileId = persistent_fid;
+ req->VolatileFileId = volatile_fid;
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ return 0;
+}
+
+void
+SMB2_close_free(struct smb_rqst *rqst)
+{
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+}
+
+int
SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int flags)
{
struct smb_rqst rqst;
- struct smb2_close_req *req;
- struct smb2_close_rsp *rsp;
+ struct smb2_close_rsp *rsp = NULL;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buftype;
int rc = 0;
- unsigned int total_len;
cifs_dbg(FYI, "Close\n");
if (!ses || !(ses->server))
return -EIO;
- rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
- if (rc)
- return rc;
-
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->PersistentFileId = persistent_fid;
- req->VolatileFileId = volatile_fid;
-
- iov[0].iov_base = (char *)req;
- iov[0].iov_len = total_len;
-
memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
+ rc = SMB2_close_init(tcon, &rqst, persistent_fid, volatile_fid);
+ if (rc)
+ goto close_exit;
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(req);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -2518,6 +2577,7 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
/* BB FIXME - decode close response, update inode for caching */
close_exit:
+ SMB2_close_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
@@ -2529,9 +2589,9 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
}
-static int
-validate_iov(unsigned int offset, unsigned int buffer_length,
- struct kvec *iov, unsigned int min_buf_size)
+int
+smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
+ struct kvec *iov, unsigned int min_buf_size)
{
unsigned int smb_len = iov->iov_len;
char *end_of_smb = smb_len + (char *)iov->iov_base;
@@ -2575,7 +2635,7 @@ validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
if (!data)
return -EINVAL;
- rc = validate_iov(offset, buffer_length, iov, minbufsize);
+ rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
if (rc)
return rc;
@@ -2584,36 +2644,22 @@ validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
return 0;
}
-static int
-query_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
- u32 additional_info, size_t output_len, size_t min_len, void **data,
- u32 *dlen)
+int
+SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid,
+ u8 info_class, u8 info_type, u32 additional_info,
+ size_t output_len)
{
- struct smb_rqst rqst;
struct smb2_query_info_req *req;
- struct smb2_query_info_rsp *rsp = NULL;
- struct kvec iov[2];
- struct kvec rsp_iov;
- int rc = 0;
- int resp_buftype;
- struct cifs_ses *ses = tcon->ses;
- int flags = 0;
+ struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
-
- cifs_dbg(FYI, "Query Info\n");
-
- if (!ses || !(ses->server))
- return -EIO;
+ int rc;
rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
&total_len);
if (rc)
return rc;
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
-
req->InfoType = info_type;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
@@ -2630,13 +2676,50 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
+ return 0;
+}
+
+void
+SMB2_query_info_free(struct smb_rqst *rqst)
+{
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+}
+
+static int
+query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
+ u32 additional_info, size_t output_len, size_t min_len, void **data,
+ u32 *dlen)
+{
+ struct smb_rqst rqst;
+ struct smb2_query_info_rsp *rsp = NULL;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+ int flags = 0;
+
+ cifs_dbg(FYI, "Query Info\n");
+
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
memset(&rqst, 0, sizeof(struct smb_rqst));
+ memset(&iov, 0, sizeof(iov));
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
+ rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid,
+ info_class, info_type, additional_info,
+ output_len);
+ if (rc)
+ goto qinf_exit;
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
- cifs_small_buf_release(req);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -2665,6 +2748,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
&rsp_iov, min_len, *data);
qinf_exit:
+ SMB2_query_info_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
@@ -3623,9 +3707,9 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
goto qdir_exit;
}
- rc = validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
- le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
- info_buf_size);
+ rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+ info_buf_size);
if (rc)
goto qdir_exit;
@@ -3927,9 +4011,9 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
-static void
-copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
- struct kstatfs *kst)
+void
+smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
+ struct kstatfs *kst)
{
kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
@@ -3939,6 +4023,25 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
return;
}
+static void
+copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
+ struct kstatfs *kst)
+{
+ kst->f_bsize = le32_to_cpu(response_data->BlockSize);
+ kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
+ kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
+ if (response_data->UserBlocksAvail == cpu_to_le64(-1))
+ kst->f_bavail = kst->f_bfree;
+ else
+ kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
+ if (response_data->TotalFileNodes != cpu_to_le64(-1))
+ kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
+ if (response_data->FreeFileNodes != cpu_to_le64(-1))
+ kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
+
+ return;
+}
+
static int
build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
int outbuf_len, u64 persistent_fid, u64 volatile_fid)
@@ -3976,6 +4079,54 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
}
int
+SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
+{
+ struct smb_rqst rqst;
+ struct smb2_query_info_rsp *rsp = NULL;
+ struct kvec iov;
+ struct kvec rsp_iov;
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+ FILE_SYSTEM_POSIX_INFO *info = NULL;
+ int flags = 0;
+
+ rc = build_qfs_info_req(&iov, tcon, FS_POSIX_INFORMATION,
+ sizeof(FILE_SYSTEM_POSIX_INFO),
+ persistent_fid, volatile_fid);
+ if (rc)
+ return rc;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ cifs_small_buf_release(iov.iov_base);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto posix_qfsinf_exit;
+ }
+ rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
+
+ info = (FILE_SYSTEM_POSIX_INFO *)(
+ le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
+ rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+ sizeof(FILE_SYSTEM_POSIX_INFO));
+ if (!rc)
+ copy_posix_fs_info_to_kstatfs(info, fsdata);
+
+posix_qfsinf_exit:
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+ return rc;
+}
+
+int
SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
{
@@ -4012,11 +4163,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
info = (struct smb2_fs_full_size_info *)(
le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
- rc = validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
- le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
- sizeof(struct smb2_fs_full_size_info));
+ rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
+ sizeof(struct smb2_fs_full_size_info));
if (!rc)
- copy_fs_info_to_kstatfs(info, fsdata);
+ smb2_copy_fs_info_to_kstatfs(info, fsdata);
qfsinf_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
@@ -4046,6 +4197,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
} else if (level == FS_SECTOR_SIZE_INFORMATION) {
max_len = sizeof(struct smb3_fs_ss_info);
min_len = sizeof(struct smb3_fs_ss_info);
+ } else if (level == FS_VOLUME_INFORMATION) {
+ max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
+ min_len = sizeof(struct smb3_fs_vol_info);
} else {
cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
return -EINVAL;
@@ -4073,7 +4227,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
rsp_len = le32_to_cpu(rsp->OutputBufferLength);
offset = le16_to_cpu(rsp->OutputBufferOffset);
- rc = validate_iov(offset, rsp_len, &rsp_iov, min_len);
+ rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
if (rc)
goto qfsattr_exit;
@@ -4090,6 +4244,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
tcon->ss_flags = le32_to_cpu(ss_info->Flags);
tcon->perf_sector_size =
le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
+ } else if (level == FS_VOLUME_INFORMATION) {
+ struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
+ (offset + (char *)rsp);
+ tcon->vol_serial_number = vol_info->VolumeSerialNumber;
+ tcon->vol_create_time = vol_info->VolumeCreationTime;
}
qfsattr_exit:
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index a671adcc44a6..8fb7887f2b3d 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -153,6 +153,8 @@ struct smb2_transform_hdr {
*
*/
+#define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
+
#define SMB2_ERROR_STRUCTURE_SIZE2 cpu_to_le16(9)
struct smb2_err_rsp {
@@ -612,6 +614,18 @@ struct smb2_tree_disconnect_rsp {
#define SMB2_CREATE_TAG_POSIX 0x93AD25509CB411E7B42383DE968BCD7C
+/*
+ * Maximum number of iovs we need for an open/create request.
+ * [0] : struct smb2_create_req
+ * [1] : path
+ * [2] : lease context
+ * [3] : durable context
+ * [4] : posix context
+ * [5] : time warp context
+ * [6] : compound padding
+ */
+#define SMB2_CREATE_IOV_SIZE 7
+
struct smb2_create_req {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 57 */
@@ -765,6 +779,14 @@ struct create_durable_handle_reconnect_v2 {
struct durable_reconnect_context_v2 dcontext;
} __packed;
+/* See MS-SMB2 2.2.13.2.5 */
+struct crt_twarp_ctxt {
+ struct create_context ccontext;
+ __u8 Name[8];
+ __le64 Timestamp;
+
+} __packed;
+
#define COPY_CHUNK_RES_KEY_SIZE 24
struct resume_key_req {
char ResumeKey[COPY_CHUNK_RES_KEY_SIZE];
@@ -1223,6 +1245,7 @@ struct smb2_lease_ack {
#define FS_DRIVER_PATH_INFORMATION 9 /* Local only */
#define FS_VOLUME_FLAGS_INFORMATION 10 /* Local only */
#define FS_SECTOR_SIZE_INFORMATION 11 /* SMB3 or later. Query */
+#define FS_POSIX_INFORMATION 100 /* SMB3.1.1 POSIX. Query */
struct smb2_fs_full_size_info {
__le64 TotalAllocationUnits;
@@ -1248,6 +1271,17 @@ struct smb3_fs_ss_info {
__le32 ByteOffsetForPartitionAlignment;
} __packed;
+/* volume info struct - see MS-FSCC 2.5.9 */
+#define MAX_VOL_LABEL_LEN 32
+struct smb3_fs_vol_info {
+ __le64 VolumeCreationTime;
+ __u32 VolumeSerialNumber;
+ __le32 VolumeLabelLength; /* includes trailing null */
+ __u8 SupportsObjects; /* True if eg like NTFS, supports objects */
+ __u8 Reserved;
+ __u8 VolumeLabel[0]; /* variable len */
+} __packed;
+
/* partial list of QUERY INFO levels */
#define FILE_DIRECTORY_INFORMATION 1
#define FILE_FULL_DIRECTORY_INFORMATION 2
@@ -1361,4 +1395,6 @@ struct smb2_file_eof_info { /* encoding of request for level 10 */
__le64 EndOfFile; /* new end of file value */
} __packed; /* level 20 Set */
+extern char smb2_padding[7];
+
#endif /* _SMB2PDU_H */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 6e6a4f2ec890..b4076577eeb7 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -68,6 +68,7 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *pfid);
+extern void close_shroot(struct cached_fid *cfid);
extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
struct smb2_file_all_info *src);
extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
@@ -132,6 +133,10 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
__le16 *path, __u8 *oplock,
struct smb2_file_all_info *buf,
struct kvec *err_iov, int *resp_buftype);
+extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ __u8 *oplock, struct cifs_open_parms *oparms,
+ __le16 *path);
+extern void SMB2_open_free(struct smb_rqst *rqst);
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen,
@@ -140,6 +145,9 @@ extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int flags);
+extern int SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_file_id, u64 volatile_file_id);
+extern void SMB2_close_free(struct smb_rqst *rqst);
extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
@@ -149,6 +157,11 @@ extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct smb2_file_all_info *data);
+extern int SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid,
+ u8 info_class, u8 info_type,
+ u32 additional_info, size_t output_len);
+extern void SMB2_query_info_free(struct smb_rqst *rqst);
extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
void **data, unsigned int *plen);
@@ -197,6 +210,9 @@ void smb2_cancelled_close_fid(struct work_struct *work);
extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct kstatfs *FSData);
+extern int SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_file_id, u64 volatile_file_id,
+ struct kstatfs *FSData);
extern int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id, int lvl);
extern int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
@@ -213,9 +229,13 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
-#ifdef CONFIG_CIFS_SMB311
+extern int smb3_encryption_required(const struct cifs_tcon *tcon);
+extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
+ struct kvec *iov, unsigned int min_buf_size);
+extern void smb2_copy_fs_info_to_kstatfs(
+ struct smb2_fs_full_size_info *pfs_inf,
+ struct kstatfs *kst);
extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
extern int smb311_update_preauth_hash(struct cifs_ses *ses,
struct kvec *iov, int nvec);
-#endif
#endif /* _SMB2PROTO_H */
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 719d55e63d88..7b351c65ee46 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -70,7 +70,6 @@ err:
return rc;
}
-#ifdef CONFIG_CIFS_SMB311
int
smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
{
@@ -98,7 +97,6 @@ err:
cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
return rc;
}
-#endif
static struct cifs_ses *
smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
@@ -173,7 +171,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
struct kvec *iov = rqst->rq_iov;
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
- struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash;
+ struct shash_desc *shash;
struct smb_rqst drqst;
ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -187,7 +185,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
rc = smb2_crypto_shash_allocate(server);
if (rc) {
- cifs_dbg(VFS, "%s: shah256 alloc failed\n", __func__);
+ cifs_dbg(VFS, "%s: sha256 alloc failed\n", __func__);
return rc;
}
@@ -198,6 +196,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return rc;
}
+ shash = &server->secmech.sdeschmacsha256->shash;
rc = crypto_shash_init(shash);
if (rc) {
cifs_dbg(VFS, "%s: Could not init sha256", __func__);
@@ -395,7 +394,6 @@ generate_smb30signingkey(struct cifs_ses *ses)
return generate_smb3signingkey(ses, &triplet);
}
-#ifdef CONFIG_CIFS_SMB311
int
generate_smb311signingkey(struct cifs_ses *ses)
@@ -423,7 +421,6 @@ generate_smb311signingkey(struct cifs_ses *ses)
return generate_smb3signingkey(ses, &triplet);
}
-#endif /* 311 */
int
smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index c55ea4e6201b..5fdb9a509a97 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -802,7 +802,7 @@ out1:
*/
static int smbd_post_send_negotiate_req(struct smbd_connection *info)
{
- struct ib_send_wr send_wr, *send_wr_fail;
+ struct ib_send_wr send_wr;
int rc = -ENOMEM;
struct smbd_request *request;
struct smbd_negotiate_req *packet;
@@ -854,7 +854,7 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
request->has_payload = false;
atomic_inc(&info->send_pending);
- rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+ rc = ib_post_send(info->id->qp, &send_wr, NULL);
if (!rc)
return 0;
@@ -1024,7 +1024,7 @@ static void smbd_destroy_header(struct smbd_connection *info,
static int smbd_post_send(struct smbd_connection *info,
struct smbd_request *request, bool has_payload)
{
- struct ib_send_wr send_wr, *send_wr_fail;
+ struct ib_send_wr send_wr;
int rc, i;
for (i = 0; i < request->num_sge; i++) {
@@ -1055,7 +1055,7 @@ static int smbd_post_send(struct smbd_connection *info,
atomic_inc(&info->send_pending);
}
- rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+ rc = ib_post_send(info->id->qp, &send_wr, NULL);
if (rc) {
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
if (has_payload) {
@@ -1184,7 +1184,7 @@ static int smbd_post_send_data(
static int smbd_post_recv(
struct smbd_connection *info, struct smbd_response *response)
{
- struct ib_recv_wr recv_wr, *recv_wr_fail = NULL;
+ struct ib_recv_wr recv_wr;
int rc = -EIO;
response->sge.addr = ib_dma_map_single(
@@ -1203,7 +1203,7 @@ static int smbd_post_recv(
recv_wr.sg_list = &response->sge;
recv_wr.num_sge = 1;
- rc = ib_post_recv(info->id->qp, &recv_wr, &recv_wr_fail);
+ rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
if (rc) {
ib_dma_unmap_single(info->id->device, response->sge.addr,
response->sge.length, DMA_FROM_DEVICE);
@@ -1662,9 +1662,16 @@ static struct smbd_connection *_smbd_get_connection(
info->max_receive_size = smbd_max_receive_size;
info->keep_alive_interval = smbd_keep_alive_interval;
- if (info->id->device->attrs.max_sge < SMBDIRECT_MAX_SGE) {
- log_rdma_event(ERR, "warning: device max_sge = %d too small\n",
- info->id->device->attrs.max_sge);
+ if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) {
+ log_rdma_event(ERR,
+ "warning: device max_send_sge = %d too small\n",
+ info->id->device->attrs.max_send_sge);
+ log_rdma_event(ERR, "Queue Pair creation may fail\n");
+ }
+ if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) {
+ log_rdma_event(ERR,
+ "warning: device max_recv_sge = %d too small\n",
+ info->id->device->attrs.max_recv_sge);
log_rdma_event(ERR, "Queue Pair creation may fail\n");
}
@@ -2473,7 +2480,6 @@ struct smbd_mr *smbd_register_mr(
int rc, i;
enum dma_data_direction dir;
struct ib_reg_wr *reg_wr;
- struct ib_send_wr *bad_wr;
if (num_pages > info->max_frmr_depth) {
log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
@@ -2547,7 +2553,7 @@ skip_multiple_pages:
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
* on the next ib_post_send when we actaully send I/O to remote peer
*/
- rc = ib_post_send(info->id->qp, &reg_wr->wr, &bad_wr);
+ rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
if (!rc)
return smbdirect_mr;
@@ -2592,7 +2598,7 @@ static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
*/
int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
{
- struct ib_send_wr *wr, *bad_wr;
+ struct ib_send_wr *wr;
struct smbd_connection *info = smbdirect_mr->conn;
int rc = 0;
@@ -2607,7 +2613,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
wr->send_flags = IB_SEND_SIGNALED;
init_completion(&smbdirect_mr->invalidate_done);
- rc = ib_post_send(info->id->qp, wr, &bad_wr);
+ rc = ib_post_send(info->id->qp, wr, NULL);
if (rc) {
log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
smbd_disconnect_rdma_connection(info);
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index 67e413f6ee4d..d4aed5217a56 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -281,6 +281,44 @@ DEFINE_EVENT(smb3_cmd_done_class, smb3_##name, \
TP_ARGS(tid, sesid, cmd, mid))
DEFINE_SMB3_CMD_DONE_EVENT(cmd_done);
+DEFINE_SMB3_CMD_DONE_EVENT(ses_expired);
+
+DECLARE_EVENT_CLASS(smb3_mid_class,
+ TP_PROTO(__u16 cmd,
+ __u64 mid,
+ __u32 pid,
+ unsigned long when_sent,
+ unsigned long when_received),
+ TP_ARGS(cmd, mid, pid, when_sent, when_received),
+ TP_STRUCT__entry(
+ __field(__u16, cmd)
+ __field(__u64, mid)
+ __field(__u32, pid)
+ __field(unsigned long, when_sent)
+ __field(unsigned long, when_received)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->mid = mid;
+ __entry->pid = pid;
+ __entry->when_sent = when_sent;
+ __entry->when_received = when_received;
+ ),
+ TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
+ __entry->cmd, __entry->mid, __entry->pid, __entry->when_sent,
+ __entry->when_received)
+)
+
+#define DEFINE_SMB3_MID_EVENT(name) \
+DEFINE_EVENT(smb3_mid_class, smb3_##name, \
+ TP_PROTO(__u16 cmd, \
+ __u64 mid, \
+ __u32 pid, \
+ unsigned long when_sent, \
+ unsigned long when_received), \
+ TP_ARGS(cmd, mid, pid, when_sent, when_received))
+
+DEFINE_SMB3_MID_EVENT(slow_rsp);
DECLARE_EVENT_CLASS(smb3_exit_err_class,
TP_PROTO(unsigned int xid,
@@ -422,6 +460,32 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name, \
DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
+DECLARE_EVENT_CLASS(smb3_reconnect_class,
+ TP_PROTO(__u64 currmid,
+ char *hostname),
+ TP_ARGS(currmid, hostname),
+ TP_STRUCT__entry(
+ __field(__u64, currmid)
+ __field(char *, hostname)
+ ),
+ TP_fast_assign(
+ __entry->currmid = currmid;
+ __entry->hostname = hostname;
+ ),
+ TP_printk("server=%s current_mid=0x%llx",
+ __entry->hostname,
+ __entry->currmid)
+)
+
+#define DEFINE_SMB3_RECONNECT_EVENT(name) \
+DEFINE_EVENT(smb3_reconnect_class, smb3_##name, \
+ TP_PROTO(__u64 currmid, \
+ char *hostname), \
+ TP_ARGS(currmid, hostname))
+
+DEFINE_SMB3_RECONNECT_EVENT(reconnect);
+DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
+
#endif /* _CIFS_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index a341ec839c83..78f96fa3d7d9 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -115,8 +115,17 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
now = jiffies;
/* commands taking longer than one second are indications that
something is wrong, unless it is quite a slow link or server */
- if (time_after(now, midEntry->when_alloc + HZ)) {
- if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
+ if (time_after(now, midEntry->when_alloc + HZ) &&
+ (midEntry->command != command)) {
+ /* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
+ if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
+ (le16_to_cpu(midEntry->command) >= 0))
+ cifs_stats_inc(&midEntry->server->smb2slowcmd[le16_to_cpu(midEntry->command)]);
+
+ trace_smb3_slow_rsp(le16_to_cpu(midEntry->command),
+ midEntry->mid, midEntry->pid,
+ midEntry->when_sent, midEntry->when_received);
+ if (cifsFYI & CIFS_TIMER) {
pr_debug(" CIFS slow rsp: cmd %d mid %llu",
midEntry->command, midEntry->mid);
pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
@@ -361,6 +370,8 @@ uncork:
* socket so the server throws away the partial SMB
*/
server->tcpStatus = CifsNeedReconnect;
+ trace_smb3_partial_send_reconnect(server->CurrentMid,
+ server->hostname);
}
smbd_done:
if (rc < 0 && rc != -EINTR)
@@ -373,26 +384,42 @@ smbd_done:
}
static int
-smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
+smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst, int flags)
{
- struct smb_rqst cur_rqst;
+ struct kvec iov;
+ struct smb2_transform_hdr tr_hdr;
+ struct smb_rqst cur_rqst[MAX_COMPOUND];
int rc;
if (!(flags & CIFS_TRANSFORM_REQ))
- return __smb_send_rqst(server, 1, rqst);
+ return __smb_send_rqst(server, num_rqst, rqst);
- if (!server->ops->init_transform_rq ||
- !server->ops->free_transform_rq) {
- cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
+ if (num_rqst > MAX_COMPOUND - 1)
+ return -ENOMEM;
+
+ memset(&cur_rqst[0], 0, sizeof(cur_rqst));
+ memset(&iov, 0, sizeof(iov));
+ memset(&tr_hdr, 0, sizeof(tr_hdr));
+
+ iov.iov_base = &tr_hdr;
+ iov.iov_len = sizeof(tr_hdr);
+ cur_rqst[0].rq_iov = &iov;
+ cur_rqst[0].rq_nvec = 1;
+
+ if (!server->ops->init_transform_rq) {
+ cifs_dbg(VFS, "Encryption requested but transform callback "
+ "is missing\n");
return -EIO;
}
- rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
+ rc = server->ops->init_transform_rq(server, num_rqst + 1,
+ &cur_rqst[0], rqst);
if (rc)
return rc;
- rc = __smb_send_rqst(server, 1, &cur_rqst);
- server->ops->free_transform_rq(&cur_rqst);
+ rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
+ smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
return rc;
}
@@ -606,7 +633,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
*/
cifs_save_when_sent(mid);
cifs_in_send_inc(server);
- rc = smb_send_rqst(server, rqst, flags);
+ rc = smb_send_rqst(server, 1, rqst, flags);
cifs_in_send_dec(server);
if (rc < 0) {
@@ -746,20 +773,21 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
}
int
-cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
- struct smb_rqst *rqst, int *resp_buf_type, const int flags,
- struct kvec *resp_iov)
+compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ const int flags, const int num_rqst, struct smb_rqst *rqst,
+ int *resp_buf_type, struct kvec *resp_iov)
{
- int rc = 0;
+ int i, j, rc = 0;
int timeout, optype;
- struct mid_q_entry *midQ;
+ struct mid_q_entry *midQ[MAX_COMPOUND];
unsigned int credits = 1;
char *buf;
timeout = flags & CIFS_TIMEOUT_MASK;
optype = flags & CIFS_OP_MASK;
- *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
+ for (i = 0; i < num_rqst; i++)
+ resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
if ((ses == NULL) || (ses->server == NULL)) {
cifs_dbg(VFS, "Null session\n");
@@ -786,98 +814,117 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
mutex_lock(&ses->server->srv_mutex);
- midQ = ses->server->ops->setup_request(ses, rqst);
- if (IS_ERR(midQ)) {
- mutex_unlock(&ses->server->srv_mutex);
- /* Update # of requests on wire to server */
- add_credits(ses->server, 1, optype);
- return PTR_ERR(midQ);
+ for (i = 0; i < num_rqst; i++) {
+ midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
+ if (IS_ERR(midQ[i])) {
+ for (j = 0; j < i; j++)
+ cifs_delete_mid(midQ[j]);
+ mutex_unlock(&ses->server->srv_mutex);
+ /* Update # of requests on wire to server */
+ add_credits(ses->server, 1, optype);
+ return PTR_ERR(midQ[i]);
+ }
+
+ midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
}
- midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(ses->server);
- rc = smb_send_rqst(ses->server, rqst, flags);
+ rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
cifs_in_send_dec(ses->server);
- cifs_save_when_sent(midQ);
+
+ for (i = 0; i < num_rqst; i++)
+ cifs_save_when_sent(midQ[i]);
if (rc < 0)
ses->server->sequence_number -= 2;
+
mutex_unlock(&ses->server->srv_mutex);
- if (rc < 0)
- goto out;
+ for (i = 0; i < num_rqst; i++) {
+ if (rc < 0)
+ goto out;
-#ifdef CONFIG_CIFS_SMB311
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
- smb311_update_preauth_hash(ses, rqst->rq_iov,
- rqst->rq_nvec);
-#endif
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
+ smb311_update_preauth_hash(ses, rqst[i].rq_iov,
+ rqst[i].rq_nvec);
- if (timeout == CIFS_ASYNC_OP)
- goto out;
+ if (timeout == CIFS_ASYNC_OP)
+ goto out;
- rc = wait_for_response(ses->server, midQ);
- if (rc != 0) {
- cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
- send_cancel(ses->server, rqst, midQ);
- spin_lock(&GlobalMid_Lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
- midQ->mid_flags |= MID_WAIT_CANCELLED;
- midQ->callback = DeleteMidQEntry;
+ rc = wait_for_response(ses->server, midQ[i]);
+ if (rc != 0) {
+ cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
+ midQ[i]->mid);
+ send_cancel(ses->server, &rqst[i], midQ[i]);
+ spin_lock(&GlobalMid_Lock);
+ if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
+ midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
+ midQ[i]->callback = DeleteMidQEntry;
+ spin_unlock(&GlobalMid_Lock);
+ add_credits(ses->server, 1, optype);
+ return rc;
+ }
spin_unlock(&GlobalMid_Lock);
+ }
+
+ rc = cifs_sync_mid_result(midQ[i], ses->server);
+ if (rc != 0) {
add_credits(ses->server, 1, optype);
return rc;
}
- spin_unlock(&GlobalMid_Lock);
- }
-
- rc = cifs_sync_mid_result(midQ, ses->server);
- if (rc != 0) {
- add_credits(ses->server, 1, optype);
- return rc;
- }
-
- if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
- rc = -EIO;
- cifs_dbg(FYI, "Bad MID state?\n");
- goto out;
- }
- buf = (char *)midQ->resp_buf;
- resp_iov->iov_base = buf;
- resp_iov->iov_len = midQ->resp_buf_size +
- ses->server->vals->header_preamble_size;
- if (midQ->large_buf)
- *resp_buf_type = CIFS_LARGE_BUFFER;
- else
- *resp_buf_type = CIFS_SMALL_BUFFER;
+ if (!midQ[i]->resp_buf ||
+ midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
+ rc = -EIO;
+ cifs_dbg(FYI, "Bad MID state?\n");
+ goto out;
+ }
-#ifdef CONFIG_CIFS_SMB311
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
- struct kvec iov = {
- .iov_base = resp_iov->iov_base,
- .iov_len = resp_iov->iov_len
- };
- smb311_update_preauth_hash(ses, &iov, 1);
- }
-#endif
+ buf = (char *)midQ[i]->resp_buf;
+ resp_iov[i].iov_base = buf;
+ resp_iov[i].iov_len = midQ[i]->resp_buf_size +
+ ses->server->vals->header_preamble_size;
+
+ if (midQ[i]->large_buf)
+ resp_buf_type[i] = CIFS_LARGE_BUFFER;
+ else
+ resp_buf_type[i] = CIFS_SMALL_BUFFER;
+
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
+ struct kvec iov = {
+ .iov_base = resp_iov[i].iov_base,
+ .iov_len = resp_iov[i].iov_len
+ };
+ smb311_update_preauth_hash(ses, &iov, 1);
+ }
- credits = ses->server->ops->get_credits(midQ);
+ credits = ses->server->ops->get_credits(midQ[i]);
- rc = ses->server->ops->check_receive(midQ, ses->server,
- flags & CIFS_LOG_ERROR);
+ rc = ses->server->ops->check_receive(midQ[i], ses->server,
+ flags & CIFS_LOG_ERROR);
- /* mark it so buf will not be freed by cifs_delete_mid */
- if ((flags & CIFS_NO_RESP) == 0)
- midQ->resp_buf = NULL;
+ /* mark it so buf will not be freed by cifs_delete_mid */
+ if ((flags & CIFS_NO_RESP) == 0)
+ midQ[i]->resp_buf = NULL;
+ }
out:
- cifs_delete_mid(midQ);
+ for (i = 0; i < num_rqst; i++)
+ cifs_delete_mid(midQ[i]);
add_credits(ses->server, credits, optype);
return rc;
}
int
+cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct smb_rqst *rqst, int *resp_buf_type, const int flags,
+ struct kvec *resp_iov)
+{
+ return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
+ resp_iov);
+}
+
+int
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
const int flags, struct kvec *resp_iov)
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 316af84674f1..50ddb795aaeb 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -35,6 +35,14 @@
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
+/*
+ * Although these three are just aliases for the above, need to move away from
+ * confusing users and using the 20+ year old term 'cifs' when it is no longer
+ * secure, replaced by SMB2 (then even more highly secure SMB3) many years ago
+ */
+#define SMB3_XATTR_CIFS_ACL "system.smb3_acl"
+#define SMB3_XATTR_ATTRIB "smb3.dosattrib" /* full name: user.smb3.dosattrib */
+#define SMB3_XATTR_CREATETIME "smb3.creationtime" /* user.smb3.creationtime */
/* BB need to add server (Samba e.g) support for security and trusted prefix */
enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
@@ -220,10 +228,12 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
switch (handler->flags) {
case XATTR_USER:
cifs_dbg(FYI, "%s:querying user xattr %s\n", __func__, name);
- if (strcmp(name, CIFS_XATTR_ATTRIB) == 0) {
+ if ((strcmp(name, CIFS_XATTR_ATTRIB) == 0) ||
+ (strcmp(name, SMB3_XATTR_ATTRIB) == 0)) {
rc = cifs_attrib_get(dentry, inode, value, size);
break;
- } else if (strcmp(name, CIFS_XATTR_CREATETIME) == 0) {
+ } else if ((strcmp(name, CIFS_XATTR_CREATETIME) == 0) ||
+ (strcmp(name, SMB3_XATTR_CREATETIME) == 0)) {
rc = cifs_creation_time_get(dentry, inode, value, size);
break;
}
@@ -363,6 +373,19 @@ static const struct xattr_handler cifs_cifs_acl_xattr_handler = {
.set = cifs_xattr_set,
};
+/*
+ * Although this is just an alias for the above, need to move away from
+ * confusing users and using the 20 year old term 'cifs' when it is no
+ * longer secure and was replaced by SMB2/SMB3 a long time ago, and
+ * SMB3 and later are highly secure.
+ */
+static const struct xattr_handler smb3_acl_xattr_handler = {
+ .name = SMB3_XATTR_CIFS_ACL,
+ .flags = XATTR_CIFS_ACL,
+ .get = cifs_xattr_get,
+ .set = cifs_xattr_set,
+};
+
static const struct xattr_handler cifs_posix_acl_access_xattr_handler = {
.name = XATTR_NAME_POSIX_ACL_ACCESS,
.flags = XATTR_ACL_ACCESS,
@@ -381,6 +404,7 @@ const struct xattr_handler *cifs_xattr_handlers[] = {
&cifs_user_xattr_handler,
&cifs_os2_xattr_handler,
&cifs_cifs_acl_xattr_handler,
+ &smb3_acl_xattr_handler, /* alias for above since avoiding "cifs" */
&cifs_posix_acl_access_xattr_handler,
&cifs_posix_acl_default_xattr_handler,
NULL
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 9907475b4226..a9b00942e87d 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -198,34 +198,6 @@ static int do_video_stillpicture(struct file *file,
return err;
}
-struct compat_video_spu_palette {
- int length;
- compat_uptr_t palette;
-};
-
-static int do_video_set_spu_palette(struct file *file,
- unsigned int cmd, struct compat_video_spu_palette __user *up)
-{
- struct video_spu_palette __user *up_native;
- compat_uptr_t palp;
- int length, err;
-
- err = get_user(palp, &up->palette);
- err |= get_user(length, &up->length);
- if (err)
- return -EFAULT;
-
- up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
- err = put_user(compat_ptr(palp), &up_native->palette);
- err |= put_user(length, &up_native->length);
- if (err)
- return -EFAULT;
-
- err = do_ioctl(file, cmd, (unsigned long) up_native);
-
- return err;
-}
-
#ifdef CONFIG_BLOCK
typedef struct sg_io_hdr32 {
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
@@ -1206,9 +1178,6 @@ COMPATIBLE_IOCTL(AUDIO_CLEAR_BUFFER)
COMPATIBLE_IOCTL(AUDIO_SET_ID)
COMPATIBLE_IOCTL(AUDIO_SET_MIXER)
COMPATIBLE_IOCTL(AUDIO_SET_STREAMTYPE)
-COMPATIBLE_IOCTL(AUDIO_SET_EXT_ID)
-COMPATIBLE_IOCTL(AUDIO_SET_ATTRIBUTES)
-COMPATIBLE_IOCTL(AUDIO_SET_KARAOKE)
COMPATIBLE_IOCTL(DMX_START)
COMPATIBLE_IOCTL(DMX_STOP)
COMPATIBLE_IOCTL(DMX_SET_FILTER)
@@ -1233,16 +1202,9 @@ COMPATIBLE_IOCTL(VIDEO_FAST_FORWARD)
COMPATIBLE_IOCTL(VIDEO_SLOWMOTION)
COMPATIBLE_IOCTL(VIDEO_GET_CAPABILITIES)
COMPATIBLE_IOCTL(VIDEO_CLEAR_BUFFER)
-COMPATIBLE_IOCTL(VIDEO_SET_ID)
COMPATIBLE_IOCTL(VIDEO_SET_STREAMTYPE)
COMPATIBLE_IOCTL(VIDEO_SET_FORMAT)
-COMPATIBLE_IOCTL(VIDEO_SET_SYSTEM)
-COMPATIBLE_IOCTL(VIDEO_SET_HIGHLIGHT)
-COMPATIBLE_IOCTL(VIDEO_SET_SPU)
-COMPATIBLE_IOCTL(VIDEO_GET_NAVI)
-COMPATIBLE_IOCTL(VIDEO_SET_ATTRIBUTES)
COMPATIBLE_IOCTL(VIDEO_GET_SIZE)
-COMPATIBLE_IOCTL(VIDEO_GET_FRAME_RATE)
/* cec */
COMPATIBLE_IOCTL(CEC_ADAP_G_CAPS)
COMPATIBLE_IOCTL(CEC_ADAP_G_LOG_ADDRS)
@@ -1347,8 +1309,6 @@ static long do_ioctl_trans(unsigned int cmd,
return do_video_get_event(file, cmd, argp);
case VIDEO_STILLPICTURE:
return do_video_stillpicture(file, cmd, argp);
- case VIDEO_SET_SPU_PALETTE:
- return do_video_set_spu_palette(file, cmd, argp);
}
/*
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 577cff24707b..39843fa7e11b 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1777,6 +1777,16 @@ void configfs_unregister_group(struct config_group *group)
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
+ mutex_lock(&subsys->su_mutex);
+ if (!group->cg_item.ci_parent->ci_group) {
+ /*
+ * The parent has already been unlinked and detached
+ * due to a rmdir.
+ */
+ goto unlink_group;
+ }
+ mutex_unlock(&subsys->su_mutex);
+
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
spin_lock(&configfs_dirent_lock);
configfs_detach_prep(dentry, NULL);
@@ -1791,6 +1801,7 @@ void configfs_unregister_group(struct config_group *group)
dput(dentry);
mutex_lock(&subsys->su_mutex);
+unlink_group:
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
}
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 88f266efc09b..99d491cd01f9 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -64,7 +64,6 @@ static void config_item_init(struct config_item *item)
*/
int config_item_set_name(struct config_item *item, const char *fmt, ...)
{
- int error = 0;
int limit = CONFIGFS_ITEM_NAME_LEN;
int need;
va_list args;
@@ -79,25 +78,11 @@ int config_item_set_name(struct config_item *item, const char *fmt, ...)
if (need < limit)
name = item->ci_namebuf;
else {
- /*
- * Need more space? Allocate it and try again
- */
- limit = need + 1;
- name = kmalloc(limit, GFP_KERNEL);
- if (!name) {
- error = -ENOMEM;
- goto Done;
- }
va_start(args, fmt);
- need = vsnprintf(name, limit, fmt, args);
+ name = kvasprintf(GFP_KERNEL, fmt, args);
va_end(args);
-
- /* Still? Give up. */
- if (need >= limit) {
- kfree(name);
- error = -EFAULT;
- goto Done;
- }
+ if (!name)
+ return -EFAULT;
}
/* Free the old name, if necessary. */
@@ -106,8 +91,7 @@ int config_item_set_name(struct config_item *item, const char *fmt, ...)
/* Now, set the new name */
item->ci_name = name;
- Done:
- return error;
+ return 0;
}
EXPORT_SYMBOL(config_item_set_name);
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 78ffc2699993..a5c54af861f7 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -64,7 +64,7 @@ static void fill_item_path(struct config_item * item, char * buffer, int length)
/* back up enough to print this bus id with '/' */
length -= cur;
- strncpy(buffer + length,config_item_name(p),cur);
+ memcpy(buffer + length, config_item_name(p), cur);
*(buffer + --length) = '/';
}
}
diff --git a/fs/dax.c b/fs/dax.c
index 641192808bb6..f32d7125ad0f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -226,8 +226,8 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
*
* Must be called with the i_pages lock held.
*/
-static void *get_unlocked_mapping_entry(struct address_space *mapping,
- pgoff_t index, void ***slotp)
+static void *__get_unlocked_mapping_entry(struct address_space *mapping,
+ pgoff_t index, void ***slotp, bool (*wait_fn)(void))
{
void *entry, **slot;
struct wait_exceptional_entry_queue ewait;
@@ -237,6 +237,8 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
ewait.wait.func = wake_exceptional_entry_func;
for (;;) {
+ bool revalidate;
+
entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
&slot);
if (!entry ||
@@ -251,14 +253,31 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
prepare_to_wait_exclusive(wq, &ewait.wait,
TASK_UNINTERRUPTIBLE);
xa_unlock_irq(&mapping->i_pages);
- schedule();
+ revalidate = wait_fn();
finish_wait(wq, &ewait.wait);
xa_lock_irq(&mapping->i_pages);
+ if (revalidate)
+ return ERR_PTR(-EAGAIN);
}
}
-static void dax_unlock_mapping_entry(struct address_space *mapping,
- pgoff_t index)
+static bool entry_wait(void)
+{
+ schedule();
+ /*
+ * Never return an ERR_PTR() from
+ * __get_unlocked_mapping_entry(), just keep looping.
+ */
+ return false;
+}
+
+static void *get_unlocked_mapping_entry(struct address_space *mapping,
+ pgoff_t index, void ***slotp)
+{
+ return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
+}
+
+static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
{
void *entry, **slot;
@@ -277,7 +296,7 @@ static void dax_unlock_mapping_entry(struct address_space *mapping,
static void put_locked_mapping_entry(struct address_space *mapping,
pgoff_t index)
{
- dax_unlock_mapping_entry(mapping, index);
+ unlock_mapping_entry(mapping, index);
}
/*
@@ -319,18 +338,27 @@ static unsigned long dax_radix_end_pfn(void *entry)
for (pfn = dax_radix_pfn(entry); \
pfn < dax_radix_end_pfn(entry); pfn++)
-static void dax_associate_entry(void *entry, struct address_space *mapping)
+/*
+ * TODO: for reflink+dax we need a way to associate a single page with
+ * multiple address_space instances at different linear_page_index()
+ * offsets.
+ */
+static void dax_associate_entry(void *entry, struct address_space *mapping,
+ struct vm_area_struct *vma, unsigned long address)
{
- unsigned long pfn;
+ unsigned long size = dax_entry_size(entry), pfn, index;
+ int i = 0;
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return;
+ index = linear_page_index(vma, address & ~(size - 1));
for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn);
WARN_ON_ONCE(page->mapping);
page->mapping = mapping;
+ page->index = index + i++;
}
}
@@ -348,6 +376,7 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
WARN_ON_ONCE(page->mapping && page->mapping != mapping);
page->mapping = NULL;
+ page->index = 0;
}
}
@@ -364,6 +393,84 @@ static struct page *dax_busy_page(void *entry)
return NULL;
}
+static bool entry_wait_revalidate(void)
+{
+ rcu_read_unlock();
+ schedule();
+ rcu_read_lock();
+
+ /*
+ * Tell __get_unlocked_mapping_entry() to take a break, we need
+ * to revalidate page->mapping after dropping locks
+ */
+ return true;
+}
+
+bool dax_lock_mapping_entry(struct page *page)
+{
+ pgoff_t index;
+ struct inode *inode;
+ bool did_lock = false;
+ void *entry = NULL, **slot;
+ struct address_space *mapping;
+
+ rcu_read_lock();
+ for (;;) {
+ mapping = READ_ONCE(page->mapping);
+
+ if (!dax_mapping(mapping))
+ break;
+
+ /*
+ * In the device-dax case there's no need to lock, a
+ * struct dev_pagemap pin is sufficient to keep the
+ * inode alive, and we assume we have dev_pagemap pin
+ * otherwise we would not have a valid pfn_to_page()
+ * translation.
+ */
+ inode = mapping->host;
+ if (S_ISCHR(inode->i_mode)) {
+ did_lock = true;
+ break;
+ }
+
+ xa_lock_irq(&mapping->i_pages);
+ if (mapping != page->mapping) {
+ xa_unlock_irq(&mapping->i_pages);
+ continue;
+ }
+ index = page->index;
+
+ entry = __get_unlocked_mapping_entry(mapping, index, &slot,
+ entry_wait_revalidate);
+ if (!entry) {
+ xa_unlock_irq(&mapping->i_pages);
+ break;
+ } else if (IS_ERR(entry)) {
+ WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
+ continue;
+ }
+ lock_slot(mapping, slot);
+ did_lock = true;
+ xa_unlock_irq(&mapping->i_pages);
+ break;
+ }
+ rcu_read_unlock();
+
+ return did_lock;
+}
+
+void dax_unlock_mapping_entry(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
+
+ if (S_ISCHR(inode->i_mode))
+ return;
+
+ unlock_mapping_entry(mapping, page->index);
+}
+
/*
* Find radix tree entry at given index. If it points to an exceptional entry,
* return it with the radix tree entry locked. If the radix tree doesn't
@@ -566,7 +673,8 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
if (index >= end)
break;
- if (!radix_tree_exceptional_entry(pvec_ent))
+ if (WARN_ON_ONCE(
+ !radix_tree_exceptional_entry(pvec_ent)))
continue;
xa_lock_irq(&mapping->i_pages);
@@ -578,6 +686,13 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
if (page)
break;
}
+
+ /*
+ * We don't expect normal struct page entries to exist in our
+ * tree, but we keep these pagevec calls so that this code is
+ * consistent with the common pattern for handling pagevecs
+ * throughout the kernel.
+ */
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
index++;
@@ -647,7 +762,6 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
{
void *vto, *kaddr;
pgoff_t pgoff;
- pfn_t pfn;
long rc;
int id;
@@ -656,7 +770,7 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
return rc;
id = dax_read_lock();
- rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
+ rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
if (rc < 0) {
dax_read_unlock(id);
return rc;
@@ -701,7 +815,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
new_entry = dax_radix_locked_entry(pfn, flags);
if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping);
+ dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
}
if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
@@ -967,7 +1081,6 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
{
const sector_t sector = dax_iomap_sector(iomap, pos);
pgoff_t pgoff;
- void *kaddr;
int id, rc;
long length;
@@ -976,7 +1089,7 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
return rc;
id = dax_read_lock();
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
- &kaddr, pfnp);
+ NULL, pfnp);
if (length < 0) {
rc = length;
goto out;
@@ -1052,15 +1165,13 @@ int __dax_zero_page_range(struct block_device *bdev,
pgoff_t pgoff;
long rc, id;
void *kaddr;
- pfn_t pfn;
rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
if (rc)
return rc;
id = dax_read_lock();
- rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
- &pfn);
+ rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
if (rc < 0) {
dax_read_unlock(id);
return rc;
@@ -1116,7 +1227,6 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
ssize_t map_len;
pgoff_t pgoff;
void *kaddr;
- pfn_t pfn;
if (fatal_signal_pending(current)) {
ret = -EINTR;
@@ -1128,7 +1238,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
break;
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
- &kaddr, &pfn);
+ &kaddr, NULL);
if (map_len < 0) {
ret = map_len;
break;
diff --git a/fs/dcache.c b/fs/dcache.c
index ceb7b491d1b9..2e7e8d85e9b4 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -292,7 +292,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry
spin_unlock(&dentry->d_lock);
name->name = p->name;
} else {
- memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
+ memcpy(name->inline_name, dentry->d_iname,
+ dentry->d_name.len + 1);
spin_unlock(&dentry->d_lock);
name->name = name->inline_name;
}
@@ -729,16 +730,16 @@ static inline bool fast_dput(struct dentry *dentry)
if (dentry->d_lockref.count > 1) {
dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
* If we weren't the last ref, we're done.
*/
if (ret)
- return 1;
+ return true;
/*
* Careful, careful. The reference count went down
@@ -767,7 +768,7 @@ static inline bool fast_dput(struct dentry *dentry)
/* Nothing to do? Dropping the reference was all we needed? */
if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
- return 1;
+ return true;
/*
* Not the fast normal case? Get the lock. We've already decremented
@@ -784,7 +785,7 @@ static inline bool fast_dput(struct dentry *dentry)
*/
if (dentry->d_lockref.count) {
spin_unlock(&dentry->d_lock);
- return 1;
+ return true;
}
/*
@@ -793,7 +794,7 @@ static inline bool fast_dput(struct dentry *dentry)
* set it to 1.
*/
dentry->d_lockref.count = 1;
- return 0;
+ return false;
}
@@ -1889,40 +1890,13 @@ void d_instantiate_new(struct dentry *entry, struct inode *inode)
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
+ inode->i_state &= ~I_NEW & ~I_CREATING;
smp_mb();
wake_up_bit(&inode->i_state, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(d_instantiate_new);
-/**
- * d_instantiate_no_diralias - instantiate a non-aliased dentry
- * @entry: dentry to complete
- * @inode: inode to attach to this dentry
- *
- * Fill in inode information in the entry. If a directory alias is found, then
- * return an error (and drop inode). Together with d_materialise_unique() this
- * guarantees that a directory inode may never have more than one alias.
- */
-int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
-{
- BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
-
- security_d_instantiate(entry, inode);
- spin_lock(&inode->i_lock);
- if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
- spin_unlock(&inode->i_lock);
- iput(inode);
- return -EBUSY;
- }
- __d_instantiate(entry, inode);
- spin_unlock(&inode->i_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(d_instantiate_no_diralias);
-
struct dentry *d_make_root(struct inode *root_inode)
{
struct dentry *res = NULL;
@@ -2675,33 +2649,6 @@ struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
}
EXPORT_SYMBOL(d_exact_alias);
-/**
- * dentry_update_name_case - update case insensitive dentry with a new name
- * @dentry: dentry to be updated
- * @name: new name
- *
- * Update a case insensitive dentry with new case of name.
- *
- * dentry must have been returned by d_lookup with name @name. Old and new
- * name lengths must match (ie. no d_compare which allows mismatched name
- * lengths).
- *
- * Parent inode i_mutex must be held over d_lookup and into this call (to
- * keep renames and concurrent inserts, and readdir(2) away).
- */
-void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
-{
- BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
- BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
-
- spin_lock(&dentry->d_lock);
- write_seqcount_begin(&dentry->d_seq);
- memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
- write_seqcount_end(&dentry->d_seq);
- spin_unlock(&dentry->d_lock);
-}
-EXPORT_SYMBOL(dentry_update_name_case);
-
static void swap_names(struct dentry *dentry, struct dentry *target)
{
if (unlikely(dname_external(target))) {
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 71fccccf317e..8c6ab6c95727 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -86,7 +86,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
/* length of the variable name itself: remove GUID and separator */
namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
- uuid_le_to_bin(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+ if (err)
+ goto out;
if (efivar_variable_is_removable(var->var.VendorGuid,
dentry->d_name.name, namelen))
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 67db22fe99c5..42bbe6824b4b 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -50,10 +50,10 @@
*
* 1) epmutex (mutex)
* 2) ep->mtx (mutex)
- * 3) ep->lock (spinlock)
+ * 3) ep->wq.lock (spinlock)
*
* The acquire order is the one listed above, from 1 to 3.
- * We need a spinlock (ep->lock) because we manipulate objects
+ * We need a spinlock (ep->wq.lock) because we manipulate objects
* from inside the poll callback, that might be triggered from
* a wake_up() that in turn might be called from IRQ context.
* So we can't sleep inside the poll callback and hence we need
@@ -85,7 +85,7 @@
* of epoll file descriptors, we use the current recursion depth as
* the lockdep subkey.
* It is possible to drop the "ep->mtx" and to use the global
- * mutex "epmutex" (together with "ep->lock") to have it working,
+ * mutex "epmutex" (together with "ep->wq.lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
* Events that require holding "epmutex" are very rare, while for
* normal operations the epoll private "ep->mtx" will guarantee
@@ -182,11 +182,10 @@ struct epitem {
* This structure is stored inside the "private_data" member of the file
* structure and represents the main data structure for the eventpoll
* interface.
+ *
+ * Access to it is protected by the lock inside wq.
*/
struct eventpoll {
- /* Protect the access to this structure */
- spinlock_t lock;
-
/*
* This mutex is used to ensure that files are not removed
* while epoll is using them. This is held during the event
@@ -210,7 +209,7 @@ struct eventpoll {
/*
* This is a single linked list that chains all the "struct epitem" that
* happened while transferring ready events to userspace w/out
- * holding ->lock.
+ * holding ->wq.lock.
*/
struct epitem *ovflist;
@@ -337,9 +336,9 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1,
}
/* Tells us if the item is currently linked */
-static inline int ep_is_linked(struct list_head *p)
+static inline int ep_is_linked(struct epitem *epi)
{
- return !list_empty(p);
+ return !list_empty(&epi->rdllink);
}
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
@@ -392,7 +391,6 @@ static bool ep_busy_loop_end(void *p, unsigned long start_time)
return ep_events_available(ep) || busy_loop_timeout(start_time);
}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
* Busy poll if globally on and supporting sockets found && no events,
@@ -402,20 +400,16 @@ static bool ep_busy_loop_end(void *p, unsigned long start_time)
*/
static void ep_busy_loop(struct eventpoll *ep, int nonblock)
{
-#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int napi_id = READ_ONCE(ep->napi_id);
if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on())
napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep);
-#endif
}
static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
{
-#ifdef CONFIG_NET_RX_BUSY_POLL
if (ep->napi_id)
ep->napi_id = 0;
-#endif
}
/*
@@ -423,7 +417,6 @@ static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
*/
static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
-#ifdef CONFIG_NET_RX_BUSY_POLL
struct eventpoll *ep;
unsigned int napi_id;
struct socket *sock;
@@ -453,9 +446,24 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
/* record NAPI ID for use in next busy poll */
ep->napi_id = napi_id;
-#endif
}
+#else
+
+static inline void ep_busy_loop(struct eventpoll *ep, int nonblock)
+{
+}
+
+static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
+{
+}
+
+static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
+{
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/**
* ep_call_nested - Perform a bound (possibly) nested call, by checking
* that the recursion limit is not exceeded, and that
@@ -668,10 +676,11 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
{
__poll_t res;
int pwake = 0;
- unsigned long flags;
struct epitem *epi, *nepi;
LIST_HEAD(txlist);
+ lockdep_assert_irqs_enabled();
+
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() and epoll_ctl().
@@ -688,17 +697,17 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
* because we want the "sproc" callback to be able to do it
* in a lockless way.
*/
- spin_lock_irqsave(&ep->lock, flags);
+ spin_lock_irq(&ep->wq.lock);
list_splice_init(&ep->rdllist, &txlist);
ep->ovflist = NULL;
- spin_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
/*
* Now call the callback function.
*/
res = (*sproc)(ep, &txlist, priv);
- spin_lock_irqsave(&ep->lock, flags);
+ spin_lock_irq(&ep->wq.lock);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
@@ -712,7 +721,7 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
* queued into ->ovflist but the "txlist" might already
* contain them, and the list_splice() below takes care of them.
*/
- if (!ep_is_linked(&epi->rdllink)) {
+ if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
}
@@ -740,7 +749,7 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
- spin_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
if (!ep_locked)
mutex_unlock(&ep->mtx);
@@ -764,16 +773,12 @@ static void epi_rcu_free(struct rcu_head *head)
*/
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{
- unsigned long flags;
struct file *file = epi->ffd.file;
+ lockdep_assert_irqs_enabled();
+
/*
- * Removes poll wait queue hooks. We _have_ to do this without holding
- * the "ep->lock" otherwise a deadlock might occur. This because of the
- * sequence of the lock acquisition. Here we do "ep->lock" then the wait
- * queue head lock when unregistering the wait queue. The wakeup callback
- * will run by holding the wait queue head lock and will call our callback
- * that will try to get "ep->lock".
+ * Removes poll wait queue hooks.
*/
ep_unregister_pollwait(ep, epi);
@@ -784,10 +789,10 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
rb_erase_cached(&epi->rbn, &ep->rbr);
- spin_lock_irqsave(&ep->lock, flags);
- if (ep_is_linked(&epi->rdllink))
+ spin_lock_irq(&ep->wq.lock);
+ if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
- spin_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
wakeup_source_unregister(ep_wakeup_source(epi));
/*
@@ -837,7 +842,7 @@ static void ep_free(struct eventpoll *ep)
* Walks through the whole tree by freeing each "struct epitem". At this
* point we are sure no poll callbacks will be lingering around, and also by
* holding "epmutex" we can be sure that no file cleanup code will hit
- * us during this operation. So we can avoid the lock on "ep->lock".
+ * us during this operation. So we can avoid the lock on "ep->wq.lock".
* We do not need to lock ep->mtx, either, we only do it to prevent
* a lockdep warning.
*/
@@ -1017,7 +1022,6 @@ static int ep_alloc(struct eventpoll **pep)
if (unlikely(!ep))
goto free_uid;
- spin_lock_init(&ep->lock);
mutex_init(&ep->mtx);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
@@ -1122,7 +1126,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
__poll_t pollflags = key_to_poll(key);
int ewake = 0;
- spin_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->wq.lock, flags);
ep_set_busy_poll_napi_id(epi);
@@ -1167,7 +1171,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
}
/* If this file is already in the ready list we exit soon */
- if (!ep_is_linked(&epi->rdllink)) {
+ if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake_rcu(epi);
}
@@ -1199,7 +1203,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
pwake++;
out_unlock:
- spin_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->wq.lock, flags);
/* We have to call this outside the lock */
if (pwake)
@@ -1417,11 +1421,12 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
{
int error, pwake = 0;
__poll_t revents;
- unsigned long flags;
long user_watches;
struct epitem *epi;
struct ep_pqueue epq;
+ lockdep_assert_irqs_enabled();
+
user_watches = atomic_long_read(&ep->user->epoll_watches);
if (unlikely(user_watches >= max_user_watches))
return -ENOSPC;
@@ -1484,13 +1489,13 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
goto error_remove_epi;
/* We have to drop the new item inside our item list to keep track of it */
- spin_lock_irqsave(&ep->lock, flags);
+ spin_lock_irq(&ep->wq.lock);
/* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi);
/* If the file is already "ready" we drop it inside the ready list */
- if (revents && !ep_is_linked(&epi->rdllink)) {
+ if (revents && !ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
@@ -1501,7 +1506,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
pwake++;
}
- spin_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
atomic_long_inc(&ep->user->epoll_watches);
@@ -1527,10 +1532,10 @@ error_unregister:
* list, since that is used/cleaned only inside a section bound by "mtx".
* And ep_insert() is called with "mtx" held.
*/
- spin_lock_irqsave(&ep->lock, flags);
- if (ep_is_linked(&epi->rdllink))
+ spin_lock_irq(&ep->wq.lock);
+ if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
- spin_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
wakeup_source_unregister(ep_wakeup_source(epi));
@@ -1550,6 +1555,8 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
int pwake = 0;
poll_table pt;
+ lockdep_assert_irqs_enabled();
+
init_poll_funcptr(&pt, NULL);
/*
@@ -1572,9 +1579,9 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
* 1) Flush epi changes above to other CPUs. This ensures
* we do not miss events from ep_poll_callback if an
* event occurs immediately after we call f_op->poll().
- * We need this because we did not take ep->lock while
+ * We need this because we did not take ep->wq.lock while
* changing epi above (but ep_poll_callback does take
- * ep->lock).
+ * ep->wq.lock).
*
* 2) We also need to ensure we do not miss _past_ events
* when calling f_op->poll(). This barrier also
@@ -1593,8 +1600,8 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
* list, push it inside.
*/
if (ep_item_poll(epi, &pt, 1)) {
- spin_lock_irq(&ep->lock);
- if (!ep_is_linked(&epi->rdllink)) {
+ spin_lock_irq(&ep->wq.lock);
+ if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
@@ -1604,7 +1611,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
- spin_unlock_irq(&ep->lock);
+ spin_unlock_irq(&ep->wq.lock);
}
/* We have to call this outside the lock */
@@ -1739,11 +1746,12 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
int res = 0, eavail, timed_out = 0;
- unsigned long flags;
u64 slack = 0;
wait_queue_entry_t wait;
ktime_t expires, *to = NULL;
+ lockdep_assert_irqs_enabled();
+
if (timeout > 0) {
struct timespec64 end_time = ep_set_mstimeout(timeout);
@@ -1756,7 +1764,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
* caller specified a non blocking operation.
*/
timed_out = 1;
- spin_lock_irqsave(&ep->lock, flags);
+ spin_lock_irq(&ep->wq.lock);
goto check_events;
}
@@ -1765,7 +1773,7 @@ fetch_events:
if (!ep_events_available(ep))
ep_busy_loop(ep, timed_out);
- spin_lock_irqsave(&ep->lock, flags);
+ spin_lock_irq(&ep->wq.lock);
if (!ep_events_available(ep)) {
/*
@@ -1807,11 +1815,11 @@ fetch_events:
break;
}
- spin_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
timed_out = 1;
- spin_lock_irqsave(&ep->lock, flags);
+ spin_lock_irq(&ep->wq.lock);
}
__remove_wait_queue(&ep->wq, &wait);
@@ -1821,7 +1829,7 @@ check_events:
/* Is it worth to try to dig for events ? */
eavail = ep_events_available(ep);
- spin_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irq(&ep->wq.lock);
/*
* Try to transfer events to user space. In case we get 0 events and
diff --git a/fs/exec.c b/fs/exec.c
index bdd0eacefdf5..1ebf6e5a521d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1145,6 +1145,7 @@ static int de_thread(struct task_struct *tsk)
*/
tsk->pid = leader->pid;
change_pid(tsk, PIDTYPE_PID, task_pid(leader));
+ transfer_pid(leader, tsk, PIDTYPE_TGID);
transfer_pid(leader, tsk, PIDTYPE_PGID);
transfer_pid(leader, tsk, PIDTYPE_SID);
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 1b8b44637e70..5331a15a61f1 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -873,8 +873,8 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
struct bio *bio;
if (per_dev != master_dev) {
- bio = bio_clone_kmalloc(master_dev->bio,
- GFP_KERNEL);
+ bio = bio_clone_fast(master_dev->bio,
+ GFP_KERNEL, NULL);
if (unlikely(!bio)) {
ORE_DBGMSG(
"Failed to allocate BIO size=%u\n",
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 047c327a6b23..28b2609f25c1 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -126,7 +126,6 @@ static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
vma->vm_ops = &ext2_dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP;
return 0;
}
#else
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 6484199b35d1..5c3d7b7e4975 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -611,8 +611,7 @@ fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
clear_nlink(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return ERR_PTR(err);
fail:
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 71635909df3b..7f7ee18fe179 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -86,7 +86,7 @@ void ext2_evict_inode(struct inode * inode)
if (want_delete) {
sb_start_intwrite(inode->i_sb);
/* set dtime */
- EXT2_I(inode)->i_dtime = get_seconds();
+ EXT2_I(inode)->i_dtime = ktime_get_real_seconds();
mark_inode_dirty(inode);
__ext2_write_inode(inode, inode_needs_sync(inode));
/* truncate to 0 */
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 152453a91877..0c26dcc5d850 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -45,8 +45,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
return 0;
}
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -192,8 +191,7 @@ out:
out_fail:
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput (inode);
+ discard_new_inode(inode);
goto out;
}
@@ -261,8 +259,7 @@ out:
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
out_dir:
inode_dec_link_count(dir);
goto out;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 8ff53f8da3bc..73bd58fa13de 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -682,7 +682,8 @@ static int ext2_setup_super (struct super_block * sb,
"running e2fsck is recommended");
else if (le32_to_cpu(es->s_checkinterval) &&
(le32_to_cpu(es->s_lastcheck) +
- le32_to_cpu(es->s_checkinterval) <= get_seconds()))
+ le32_to_cpu(es->s_checkinterval) <=
+ ktime_get_real_seconds()))
ext2_msg(sb, KERN_WARNING,
"warning: checktime reached, "
"running e2fsck is recommended");
@@ -1248,7 +1249,7 @@ void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
spin_lock(&EXT2_SB(sb)->s_lock);
es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
- es->s_wtime = cpu_to_le32(get_seconds());
+ es->s_wtime = cpu_to_le32(ktime_get_real_seconds());
/* unlock before we do IO */
spin_unlock(&EXT2_SB(sb)->s_lock);
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
@@ -1360,7 +1361,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
* the rdonly flag and then mark the partition as valid again.
*/
es->s_state = cpu_to_le16(sbi->s_mount_state);
- es->s_mtime = cpu_to_le32(get_seconds());
+ es->s_mtime = cpu_to_le32(ktime_get_real_seconds());
spin_unlock(&sbi->s_lock);
err = dquot_suspend(sb, -1);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index aa52d87985aa..e5d6ee61ff48 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -426,9 +426,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
}
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
- ext4_error(sb, "Cannot get buffer for block bitmap - "
- "block_group = %u, block_bitmap = %llu",
- block_group, bitmap_blk);
+ ext4_warning(sb, "Cannot get buffer for block bitmap - "
+ "block_group = %u, block_bitmap = %llu",
+ block_group, bitmap_blk);
return ERR_PTR(-ENOMEM);
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 7c7123f265c2..0f0edd1cd0cd 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -789,17 +789,16 @@ struct move_extent {
* affected filesystem before 2242.
*/
-static inline __le32 ext4_encode_extra_time(struct timespec *time)
+static inline __le32 ext4_encode_extra_time(struct timespec64 *time)
{
- u32 extra = sizeof(time->tv_sec) > 4 ?
- ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
+ u32 extra =((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK;
return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
}
-static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
+static inline void ext4_decode_extra_time(struct timespec64 *time,
+ __le32 extra)
{
- if (unlikely(sizeof(time->tv_sec) > 4 &&
- (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
+ if (unlikely(extra & cpu_to_le32(EXT4_EPOCH_MASK))) {
#if 1
/* Handle legacy encoding of pre-1970 dates with epoch
@@ -821,9 +820,8 @@ static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
do { \
(raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) {\
- struct timespec ts = timespec64_to_timespec((inode)->xtime); \
(raw_inode)->xtime ## _extra = \
- ext4_encode_extra_time(&ts); \
+ ext4_encode_extra_time(&(inode)->xtime); \
} \
} while (0)
@@ -840,10 +838,8 @@ do { \
do { \
(inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \
if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) { \
- struct timespec ts = timespec64_to_timespec((inode)->xtime); \
- ext4_decode_extra_time(&ts, \
+ ext4_decode_extra_time(&(inode)->xtime, \
raw_inode->xtime ## _extra); \
- (inode)->xtime = timespec_to_timespec64(ts); \
} \
else \
(inode)->xtime.tv_nsec = 0; \
@@ -993,9 +989,9 @@ struct ext4_inode_info {
/*
* File creation time. Its function is same as that of
- * struct timespec i_{a,c,m}time in the generic inode.
+ * struct timespec64 i_{a,c,m}time in the generic inode.
*/
- struct timespec i_crtime;
+ struct timespec64 i_crtime;
/* mballoc */
struct list_head i_prealloc_list;
@@ -1299,7 +1295,14 @@ struct ext4_super_block {
__le32 s_lpf_ino; /* Location of the lost+found inode */
__le32 s_prj_quota_inum; /* inode for tracking project quota */
__le32 s_checksum_seed; /* crc32c(uuid) if csum_seed set */
- __le32 s_reserved[98]; /* Padding to the end of the block */
+ __u8 s_wtime_hi;
+ __u8 s_mtime_hi;
+ __u8 s_mkfs_time_hi;
+ __u8 s_lastcheck_hi;
+ __u8 s_first_error_time_hi;
+ __u8 s_last_error_time_hi;
+ __u8 s_pad[2];
+ __le32 s_reserved[96]; /* Padding to the end of the block */
__le32 s_checksum; /* crc32c(superblock) */
};
@@ -2456,6 +2459,7 @@ extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
+extern int ext4_break_layouts(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
extern void ext4_set_inode_flags(struct inode *);
@@ -3058,7 +3062,7 @@ static inline void ext4_set_de_type(struct super_block *sb,
/* readpages.c */
extern int ext4_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
- unsigned nr_pages);
+ unsigned nr_pages, bool is_readahead);
/* symlink.c */
extern const struct inode_operations ext4_encrypted_symlink_inode_operations;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 8ce6fd5b10dd..72a361d5ef74 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4826,6 +4826,13 @@ static long ext4_zero_range(struct file *file, loff_t offset,
* released from page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_break_layouts(inode);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_mmap_sem);
+ goto out_mutex;
+ }
+
ret = ext4_update_disksize_before_punch(inode, offset, len);
if (ret) {
up_write(&EXT4_I(inode)->i_mmap_sem);
@@ -5499,6 +5506,11 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_mmap;
+
/*
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
@@ -5647,6 +5659,11 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_mmap;
+
/*
* Need to round down to align start offset to page size boundary
* for page size > block size.
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 7f8023340eb8..69d65d49837b 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -374,7 +374,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ vma->vm_flags |= VM_HUGEPAGE;
} else {
vma->vm_ops = &ext4_file_vm_ops;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f336cbc6e932..2addcb8730e1 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -138,9 +138,9 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
}
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
- ext4_error(sb, "Cannot read inode bitmap - "
- "block_group = %u, inode_bitmap = %llu",
- block_group, bitmap_blk);
+ ext4_warning(sb, "Cannot read inode bitmap - "
+ "block_group = %u, inode_bitmap = %llu",
+ block_group, bitmap_blk);
return ERR_PTR(-ENOMEM);
}
if (bitmap_uptodate(bh))
@@ -1086,7 +1086,7 @@ got:
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- ei->i_crtime = timespec64_to_timespec(inode->i_mtime);
+ ei->i_crtime = inode->i_mtime;
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4efe77286ecd..d0dd585add6a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -317,7 +317,7 @@ stop_handle:
* (Well, we could do this if we need to, but heck - it works)
*/
ext4_orphan_del(handle, inode);
- EXT4_I(inode)->i_dtime = get_seconds();
+ EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
/*
* One subtle ordering requirement: if anything has gone wrong
@@ -3325,7 +3325,8 @@ static int ext4_readpage(struct file *file, struct page *page)
ret = ext4_readpage_inline(inode, page);
if (ret == -EAGAIN)
- return ext4_mpage_readpages(page->mapping, NULL, page, 1);
+ return ext4_mpage_readpages(page->mapping, NULL, page, 1,
+ false);
return ret;
}
@@ -3340,7 +3341,7 @@ ext4_readpages(struct file *file, struct address_space *mapping,
if (ext4_has_inline_data(inode))
return 0;
- return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
+ return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
@@ -4191,6 +4192,39 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
return 0;
}
+static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock)
+{
+ *did_unlock = true;
+ up_write(&ei->i_mmap_sem);
+ schedule();
+ down_write(&ei->i_mmap_sem);
+}
+
+int ext4_break_layouts(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct page *page;
+ bool retry;
+ int error;
+
+ if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
+ return -EINVAL;
+
+ do {
+ retry = false;
+ page = dax_layout_busy_page(inode->i_mapping);
+ if (!page)
+ return 0;
+
+ error = ___wait_var_event(&page->_refcount,
+ atomic_read(&page->_refcount) == 1,
+ TASK_INTERRUPTIBLE, 0, 0,
+ ext4_wait_dax_page(ei, &retry));
+ } while (error == 0 && retry);
+
+ return error;
+}
+
/*
* ext4_punch_hole: punches a hole in a file by releasing the blocks
* associated with the given offset and length
@@ -4264,6 +4298,11 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_dio;
+
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
@@ -4944,17 +4983,14 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ret = -EFSCORRUPTED;
goto bad_inode;
} else if (!ext4_has_inline_data(inode)) {
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- (S_ISLNK(inode->i_mode) &&
- !ext4_inode_is_fast_symlink(inode))))
- /* Validate extent which is part of inode */
+ /* validate the block references in the inode */
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ (S_ISLNK(inode->i_mode) &&
+ !ext4_inode_is_fast_symlink(inode))) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_check_inode(inode);
- } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- (S_ISLNK(inode->i_mode) &&
- !ext4_inode_is_fast_symlink(inode))) {
- /* Validate block references which are part of inode */
- ret = ext4_ind_check_inode(inode);
+ else
+ ret = ext4_ind_check_inode(inode);
}
}
if (ret)
@@ -5553,6 +5589,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
ext4_wait_for_tail_page_commit(inode);
}
down_write(&EXT4_I(inode)->i_mmap_sem);
+
+ rc = ext4_break_layouts(inode);
+ if (rc) {
+ up_write(&EXT4_I(inode)->i_mmap_sem);
+ error = rc;
+ goto err_out;
+ }
+
/*
* Truncate pagecache after we've waited for commit
* in data=journal mode to make pages freeable.
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f7ab34088162..e29fce2fbf25 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -14,6 +14,7 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/backing-dev.h>
#include <trace/events/ext4.h>
@@ -2140,7 +2141,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* This should tell if fe_len is exactly power of 2
*/
if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
- ac->ac_2order = i - 1;
+ ac->ac_2order = array_index_nospec(i - 1,
+ sb->s_blocksize_bits + 2);
}
/* if stream allocation is enabled, use global goal */
@@ -3799,7 +3801,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
ext4_group_t group;
ext4_grpblk_t bit;
unsigned long long grp_blk_start;
- int err = 0;
int free = 0;
BUG_ON(pa->pa_deleted == 0);
@@ -3840,7 +3841,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
}
atomic_add(free, &sbi->s_mb_discarded);
- return err;
+ return 0;
}
static noinline_for_stack int
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 638ad4743477..39b07c2d3384 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -147,7 +147,7 @@ static int kmmpd(void *data)
mmp_block = le64_to_cpu(es->s_mmp_block);
mmp = (struct mmp_struct *)(bh->b_data);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
/*
* Start with the higher mmp_check_interval and reduce it if
* the MMP block is being updated on time.
@@ -165,7 +165,7 @@ static int kmmpd(void *data)
seq = 1;
mmp->mmp_seq = cpu_to_le32(seq);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
last_update_time = jiffies;
retval = write_mmp_block(sb, bh);
@@ -241,7 +241,7 @@ static int kmmpd(void *data)
* Unmount seems to be clean.
*/
mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
- mmp->mmp_time = cpu_to_le64(get_seconds());
+ mmp->mmp_time = cpu_to_le64(ktime_get_real_seconds());
retval = write_mmp_block(sb, bh);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 8e17efdcbf11..a409ff70d67b 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -134,9 +134,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
mapping[0] = inode1->i_mapping;
mapping[1] = inode2->i_mapping;
} else {
- pgoff_t tmp = index1;
- index1 = index2;
- index2 = tmp;
+ swap(index1, index2);
mapping[0] = inode2->i_mapping;
mapping[1] = inode1->i_mapping;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2a4c25c4681d..116ff68c5bd4 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1398,6 +1398,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
goto cleanup_and_exit;
dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
"falling back\n"));
+ ret = NULL;
}
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
if (!nblocks) {
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 19b87a8de6ff..f461d75ac049 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -98,7 +98,7 @@ static void mpage_end_io(struct bio *bio)
int ext4_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
- unsigned nr_pages)
+ unsigned nr_pages, bool is_readahead)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
@@ -259,7 +259,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio_set_op_attrs(bio, REQ_OP_READ,
+ is_readahead ? REQ_RAHEAD : 0);
}
length = first_hole << blkbits;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b7f7922061be..5863fd22e90b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -312,6 +312,24 @@ void ext4_itable_unused_set(struct super_block *sb,
bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
}
+static void __ext4_update_tstamp(__le32 *lo, __u8 *hi)
+{
+ time64_t now = ktime_get_real_seconds();
+
+ now = clamp_val(now, 0, (1ull << 40) - 1);
+
+ *lo = cpu_to_le32(lower_32_bits(now));
+ *hi = upper_32_bits(now);
+}
+
+static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
+{
+ return ((time64_t)(*hi) << 32) + le32_to_cpu(*lo);
+}
+#define ext4_update_tstamp(es, tstamp) \
+ __ext4_update_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
+#define ext4_get_tstamp(es, tstamp) \
+ __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
static void __save_error_info(struct super_block *sb, const char *func,
unsigned int line)
@@ -322,11 +340,12 @@ static void __save_error_info(struct super_block *sb, const char *func,
if (bdev_read_only(sb->s_bdev))
return;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
- es->s_last_error_time = cpu_to_le32(get_seconds());
+ ext4_update_tstamp(es, s_last_error_time);
strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
es->s_last_error_line = cpu_to_le32(line);
if (!es->s_first_error_time) {
es->s_first_error_time = es->s_last_error_time;
+ es->s_first_error_time_hi = es->s_last_error_time_hi;
strncpy(es->s_first_error_func, func,
sizeof(es->s_first_error_func));
es->s_first_error_line = cpu_to_le32(line);
@@ -776,26 +795,26 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
+ int ret;
- if ((flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) &&
- !EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) {
- percpu_counter_sub(&sbi->s_freeclusters_counter,
- grp->bb_free);
- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
- &grp->bb_state);
+ if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
+ ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
+ &grp->bb_state);
+ if (!ret)
+ percpu_counter_sub(&sbi->s_freeclusters_counter,
+ grp->bb_free);
}
- if ((flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) &&
- !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
- if (gdp) {
+ if (flags & EXT4_GROUP_INFO_IBITMAP_CORRUPT) {
+ ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
+ &grp->bb_state);
+ if (!ret && gdp) {
int count;
count = ext4_free_inodes_count(sb, gdp);
percpu_counter_sub(&sbi->s_freeinodes_counter,
count);
}
- set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT,
- &grp->bb_state);
}
}
@@ -2174,8 +2193,8 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
"warning: maximal mount count reached, "
"running e2fsck is recommended");
else if (le32_to_cpu(es->s_checkinterval) &&
- (le32_to_cpu(es->s_lastcheck) +
- le32_to_cpu(es->s_checkinterval) <= get_seconds()))
+ (ext4_get_tstamp(es, s_lastcheck) +
+ le32_to_cpu(es->s_checkinterval) <= ktime_get_real_seconds()))
ext4_msg(sb, KERN_WARNING,
"warning: checktime reached, "
"running e2fsck is recommended");
@@ -2184,7 +2203,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
- es->s_mtime = cpu_to_le32(get_seconds());
+ ext4_update_tstamp(es, s_mtime);
ext4_update_dynamic_rev(sb);
if (sbi->s_journal)
ext4_set_feature_journal_needs_recovery(sb);
@@ -2875,8 +2894,9 @@ static void print_daily_error_info(struct timer_list *t)
ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
le32_to_cpu(es->s_error_count));
if (es->s_first_error_time) {
- printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
- sb->s_id, le32_to_cpu(es->s_first_error_time),
+ printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %llu: %.*s:%d",
+ sb->s_id,
+ ext4_get_tstamp(es, s_first_error_time),
(int) sizeof(es->s_first_error_func),
es->s_first_error_func,
le32_to_cpu(es->s_first_error_line));
@@ -2889,8 +2909,9 @@ static void print_daily_error_info(struct timer_list *t)
printk(KERN_CONT "\n");
}
if (es->s_last_error_time) {
- printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
- sb->s_id, le32_to_cpu(es->s_last_error_time),
+ printk(KERN_NOTICE "EXT4-fs (%s): last error at time %llu: %.*s:%d",
+ sb->s_id,
+ ext4_get_tstamp(es, s_last_error_time),
(int) sizeof(es->s_last_error_func),
es->s_last_error_func,
le32_to_cpu(es->s_last_error_line));
@@ -3508,7 +3529,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_sb_block = sb_block;
if (sb->s_bdev->bd_part)
sbi->s_sectors_written_start =
- part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+ part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
/* Cleanup superblock name */
strreplace(sb->s_id, '/', '!');
@@ -4813,11 +4834,12 @@ static int ext4_commit_super(struct super_block *sb, int sync)
* to complain and force a full file system check.
*/
if (!(sb->s_flags & SB_RDONLY))
- es->s_wtime = cpu_to_le32(get_seconds());
+ ext4_update_tstamp(es, s_wtime);
if (sb->s_bdev->bd_part)
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
- ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
+ ((part_stat_read(sb->s_bdev->bd_part,
+ sectors[STAT_WRITE]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1));
else
es->s_kbytes_written =
@@ -5080,6 +5102,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
#endif
char *orig_data = kstrdup(data, GFP_KERNEL);
+ if (data && !orig_data)
+ return -ENOMEM;
+
/* Store the original options */
old_sb_flags = sb->s_flags;
old_opts.s_mount_opt = sbi->s_mount_opt;
@@ -5665,13 +5690,13 @@ static int ext4_enable_quotas(struct super_block *sb)
DQUOT_USAGE_ENABLED |
(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
if (err) {
- for (type--; type >= 0; type--)
- dquot_quota_off(sb, type);
-
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "
"e2fsck to fix.", type, err);
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+
return err;
}
}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index f34da0bb8f17..9212a026a1f1 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -25,6 +25,8 @@ typedef enum {
attr_reserved_clusters,
attr_inode_readahead,
attr_trigger_test_error,
+ attr_first_error_time,
+ attr_last_error_time,
attr_feature,
attr_pointer_ui,
attr_pointer_atomic,
@@ -56,7 +58,8 @@ static ssize_t session_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%lu\n",
- (part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
+ (part_stat_read(sb->s_bdev->bd_part,
+ sectors[STAT_WRITE]) -
sbi->s_sectors_written_start) >> 1);
}
@@ -68,7 +71,8 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)(sbi->s_kbytes_written +
- ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
+ ((part_stat_read(sb->s_bdev->bd_part,
+ sectors[STAT_WRITE]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1)));
}
@@ -182,8 +186,8 @@ EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
-EXT4_RO_ATTR_ES_UI(first_error_time, s_first_error_time);
-EXT4_RO_ATTR_ES_UI(last_error_time, s_last_error_time);
+EXT4_ATTR(first_error_time, 0444, first_error_time);
+EXT4_ATTR(last_error_time, 0444, last_error_time);
static unsigned int old_bump_val = 128;
EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
@@ -249,6 +253,15 @@ static void *calc_ptr(struct ext4_attr *a, struct ext4_sb_info *sbi)
return NULL;
}
+static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi)
+{
+ return snprintf(buf, PAGE_SIZE, "%lld",
+ ((time64_t)hi << 32) + le32_to_cpu(lo));
+}
+
+#define print_tstamp(buf, es, tstamp) \
+ __print_tstamp(buf, (es)->tstamp, (es)->tstamp ## _hi)
+
static ssize_t ext4_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -274,8 +287,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
case attr_pointer_ui:
if (!ptr)
return 0;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- *((unsigned int *) ptr));
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ le32_to_cpup(ptr));
+ else
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ *((unsigned int *) ptr));
case attr_pointer_atomic:
if (!ptr)
return 0;
@@ -283,6 +300,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
atomic_read((atomic_t *) ptr));
case attr_feature:
return snprintf(buf, PAGE_SIZE, "supported\n");
+ case attr_first_error_time:
+ return print_tstamp(buf, sbi->s_es, s_first_error_time);
+ case attr_last_error_time:
+ return print_tstamp(buf, sbi->s_es, s_last_error_time);
}
return 0;
@@ -308,7 +329,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret)
return ret;
- *((unsigned int *) ptr) = t;
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ *((__le32 *) ptr) = cpu_to_le32(t);
+ else
+ *((unsigned int *) ptr) = t;
return len;
case attr_inode_readahead:
return inode_readahead_blks_store(sbi, buf, len);
diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
index 0cb13badf473..bcbe3668c1d4 100644
--- a/fs/ext4/truncate.h
+++ b/fs/ext4/truncate.h
@@ -11,6 +11,10 @@
*/
static inline void ext4_truncate_failed_write(struct inode *inode)
{
+ /*
+ * We don't need to call ext4_break_layouts() because the blocks we
+ * are truncating were never visible to userspace.
+ */
down_write(&EXT4_I(inode)->i_mmap_sem);
truncate_inode_pages(inode->i_mapping, inode->i_size);
ext4_truncate(inode);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 723df14f4084..f36fc5d5b257 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -190,6 +190,8 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
if ((void *)next >= end)
return -EFSCORRUPTED;
+ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
+ return -EFSCORRUPTED;
e = next;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 9f1c96caebda..e8b6b89bddb8 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -28,6 +28,7 @@ struct kmem_cache *f2fs_inode_entry_slab;
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{
+ f2fs_build_fault_attr(sbi, 0, 0);
set_ckpt_flags(sbi, CP_ERROR_FLAG);
if (!end_io)
f2fs_flush_merged_writes(sbi);
@@ -70,6 +71,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
.encrypted_page = NULL,
.is_meta = is_meta,
};
+ int err;
if (unlikely(!is_meta))
fio.op_flags &= ~REQ_META;
@@ -84,9 +86,10 @@ repeat:
fio.page = page;
- if (f2fs_submit_page_bio(&fio)) {
+ err = f2fs_submit_page_bio(&fio);
+ if (err) {
f2fs_put_page(page, 1);
- goto repeat;
+ return ERR_PTR(err);
}
lock_page(page);
@@ -95,14 +98,9 @@ repeat:
goto repeat;
}
- /*
- * if there is any IO error when accessing device, make our filesystem
- * readonly and make sure do not write checkpoint with non-uptodate
- * meta page.
- */
if (unlikely(!PageUptodate(page))) {
- memset(page_address(page), 0, PAGE_SIZE);
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
}
out:
return page;
@@ -113,13 +111,32 @@ struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
return __get_meta_page(sbi, index, true);
}
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+ struct page *page;
+ int count = 0;
+
+retry:
+ page = __get_meta_page(sbi, index, true);
+ if (IS_ERR(page)) {
+ if (PTR_ERR(page) == -EIO &&
+ ++count <= DEFAULT_RETRY_IO_COUNT)
+ goto retry;
+
+ f2fs_stop_checkpoint(sbi, false);
+ f2fs_bug_on(sbi, 1);
+ }
+
+ return page;
+}
+
/* for POR only */
struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
return __get_meta_page(sbi, index, false);
}
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
switch (type) {
@@ -140,8 +157,20 @@ bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
return false;
break;
case META_POR:
+ case DATA_GENERIC:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
- blkaddr < MAIN_BLKADDR(sbi)))
+ blkaddr < MAIN_BLKADDR(sbi))) {
+ if (type == DATA_GENERIC) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "access invalid blkaddr:%u", blkaddr);
+ WARN_ON(1);
+ }
+ return false;
+ }
+ break;
+ case META_GENERIC:
+ if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
+ blkaddr >= MAIN_BLKADDR(sbi)))
return false;
break;
default:
@@ -176,7 +205,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkno, type))
+ if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
goto out;
switch (type) {
@@ -242,11 +271,8 @@ static int __f2fs_write_meta_page(struct page *page,
trace_f2fs_writepage(page, META);
- if (unlikely(f2fs_cp_error(sbi))) {
- dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
- return 0;
- }
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto redirty_out;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
@@ -529,13 +555,12 @@ int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
spin_lock(&im->ino_lock);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
f2fs_show_injection_info(FAULT_ORPHAN);
return -ENOSPC;
}
-#endif
+
if (unlikely(im->ino_num >= sbi->max_orphans))
err = -ENOSPC;
else
@@ -572,12 +597,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
struct node_info ni;
- int err = f2fs_acquire_orphan_inode(sbi);
-
- if (err)
- goto err_out;
-
- __add_ino_entry(sbi, ino, 0, ORPHAN_INO);
+ int err;
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
@@ -600,14 +620,15 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */
iput(inode);
- f2fs_get_node_info(sbi, ino, &ni);
+ err = f2fs_get_node_info(sbi, ino, &ni);
+ if (err)
+ goto err_out;
/* ENOMEM was fully retried in f2fs_evict_inode. */
if (ni.blk_addr != NULL_ADDR) {
err = -EIO;
goto err_out;
}
- __remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
err_out:
@@ -639,7 +660,10 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
/* Needed for iput() to work correctly and not trash data */
sbi->sb->s_flags |= SB_ACTIVE;
- /* Turn on quotas so that they are updated correctly */
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif
@@ -649,9 +673,15 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
for (i = 0; i < orphan_blocks; i++) {
- struct page *page = f2fs_get_meta_page(sbi, start_blk + i);
+ struct page *page;
struct f2fs_orphan_block *orphan_blk;
+ page = f2fs_get_meta_page(sbi, start_blk + i);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
+ }
+
orphan_blk = (struct f2fs_orphan_block *)page_address(page);
for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
@@ -742,10 +772,14 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
__u32 crc = 0;
*cp_page = f2fs_get_meta_page(sbi, cp_addr);
+ if (IS_ERR(*cp_page))
+ return PTR_ERR(*cp_page);
+
*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
if (crc_offset > (blk_size - sizeof(__le32))) {
+ f2fs_put_page(*cp_page, 1);
f2fs_msg(sbi->sb, KERN_WARNING,
"invalid crc_offset: %zu", crc_offset);
return -EINVAL;
@@ -753,6 +787,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
crc = cur_cp_crc(*cp_block);
if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+ f2fs_put_page(*cp_page, 1);
f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
return -EINVAL;
}
@@ -772,14 +807,22 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_1, version);
if (err)
- goto invalid_cp1;
+ return NULL;
+
+ if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
+ sbi->blocks_per_seg) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid cp_pack_total_block_count:%u",
+ le32_to_cpu(cp_block->cp_pack_total_block_count));
+ goto invalid_cp;
+ }
pre_version = *version;
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_2, version);
if (err)
- goto invalid_cp2;
+ goto invalid_cp;
cur_version = *version;
if (cur_version == pre_version) {
@@ -787,9 +830,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
f2fs_put_page(cp_page_2, 1);
return cp_page_1;
}
-invalid_cp2:
f2fs_put_page(cp_page_2, 1);
-invalid_cp1:
+invalid_cp:
f2fs_put_page(cp_page_1, 1);
return NULL;
}
@@ -838,15 +880,15 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
memcpy(sbi->ckpt, cp_block, blk_size);
- /* Sanity checking of checkpoint */
- if (f2fs_sanity_check_ckpt(sbi))
- goto free_fail_no_cp;
-
if (cur_page == cp1)
sbi->cur_cp_pack = 1;
else
sbi->cur_cp_pack = 2;
+ /* Sanity checking of checkpoint */
+ if (f2fs_sanity_check_ckpt(sbi))
+ goto free_fail_no_cp;
+
if (cp_blks <= 1)
goto done;
@@ -859,6 +901,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
unsigned char *ckpt = (unsigned char *)sbi->ckpt;
cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
+ if (IS_ERR(cur_page))
+ goto free_fail_no_cp;
sit_bitmap_ptr = page_address(cur_page);
memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
f2fs_put_page(cur_page, 1);
@@ -980,12 +1024,10 @@ retry:
iput(inode);
/* We need to give cpu to another writers. */
- if (ino == cur_ino) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ if (ino == cur_ino)
cond_resched();
- } else {
+ else
ino = cur_ino;
- }
} else {
/*
* We should submit bio, since it exists several
@@ -1119,7 +1161,7 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
f2fs_unlock_all(sbi);
}
-static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
{
DEFINE_WAIT(wait);
@@ -1129,6 +1171,9 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
if (!get_pages(sbi, F2FS_WB_CP_DATA))
break;
+ if (unlikely(f2fs_cp_error(sbi)))
+ break;
+
io_schedule_timeout(5*HZ);
}
finish_wait(&sbi->cp_wait, &wait);
@@ -1202,8 +1247,12 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
/* writeout cp pack 2 page */
err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
- f2fs_bug_on(sbi, err);
+ if (unlikely(err && f2fs_cp_error(sbi))) {
+ f2fs_put_page(page, 1);
+ return;
+ }
+ f2fs_bug_on(sbi, err);
f2fs_put_page(page, 0);
/* submit checkpoint (with barrier if NOBARRIER is not set) */
@@ -1229,7 +1278,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
while (get_pages(sbi, F2FS_DIRTY_META)) {
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ break;
}
/*
@@ -1309,7 +1358,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_sync_meta_pages(sbi, META, LONG_MAX,
FS_CP_META_IO);
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ break;
}
}
@@ -1348,10 +1397,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
/* wait for previous submitted meta pages writeback */
- wait_on_all_pages_writeback(sbi);
-
- if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ f2fs_wait_on_all_pages_writeback(sbi);
/* flush all device cache */
err = f2fs_flush_device_cache(sbi);
@@ -1360,12 +1406,19 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* barrier and flush checkpoint cp pack 2 page if it can */
commit_checkpoint(sbi, ckpt, start_blk);
- wait_on_all_pages_writeback(sbi);
+ f2fs_wait_on_all_pages_writeback(sbi);
+
+ /*
+ * invalidate intermediate page cache borrowed from meta inode
+ * which are used for migration of encrypted inode's blocks.
+ */
+ if (f2fs_sb_has_encrypt(sbi->sb))
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
f2fs_release_ino_entry(sbi, false);
- if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ f2fs_reset_fsync_node_info(sbi);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
@@ -1381,7 +1434,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
- return 0;
+ return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
}
/*
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 8f931d699287..382c1ef9a9e4 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -126,12 +126,10 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
static void f2fs_read_end_io(struct bio *bio)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO);
bio->bi_status = BLK_STS_IOERR;
}
-#endif
if (f2fs_bio_post_read_required(bio)) {
struct bio_post_read_ctx *ctx = bio->bi_private;
@@ -177,6 +175,8 @@ static void f2fs_write_end_io(struct bio *bio)
page->index != nid_of_node(page));
dec_page_count(sbi, type);
+ if (f2fs_in_warm_node_list(sbi, page))
+ f2fs_del_fsync_node_entry(sbi, page);
clear_cold_data(page);
end_page_writeback(page);
}
@@ -264,7 +264,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
if (type != DATA && type != NODE)
goto submit_io;
- if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
+ if (test_opt(sbi, LFS) && current->plug)
blk_finish_plug(current->plug);
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
@@ -441,7 +441,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
- verify_block_addr(fio, fio->new_blkaddr);
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+ __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+ return -EFAULT;
+
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
@@ -485,7 +488,7 @@ next:
spin_unlock(&io->io_lock);
}
- if (is_valid_blkaddr(fio->old_blkaddr))
+ if (__is_valid_data_blkaddr(fio->old_blkaddr))
verify_block_addr(fio, fio->old_blkaddr);
verify_block_addr(fio, fio->new_blkaddr);
@@ -534,19 +537,22 @@ out:
}
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
- unsigned nr_pages)
+ unsigned nr_pages, unsigned op_flag)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
struct bio_post_read_ctx *ctx;
unsigned int post_read_steps = 0;
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+ return ERR_PTR(-EFAULT);
+
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
if (!bio)
return ERR_PTR(-ENOMEM);
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
@@ -571,7 +577,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
block_t blkaddr)
{
- struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+ struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -869,6 +875,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
+ block_t old_blkaddr;
pgoff_t fofs;
blkcnt_t count = 1;
int err;
@@ -876,6 +883,10 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
+ err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ if (err)
+ return err;
+
dn->data_blkaddr = datablock_addr(dn->inode,
dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
@@ -885,11 +896,13 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
return err;
alloc:
- f2fs_get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
-
- f2fs_allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+ old_blkaddr = dn->data_blkaddr;
+ f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
&sum, seg_type, NULL, false);
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ old_blkaddr, old_blkaddr);
f2fs_set_data_blkaddr(dn);
/* update i_size */
@@ -1045,7 +1058,13 @@ next_dnode:
next_block:
blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
- if (!is_valid_blkaddr(blkaddr)) {
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+ err = -EFAULT;
+ goto sync_out;
+ }
+
+ if (!is_valid_data_blkaddr(sbi, blkaddr)) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
@@ -1282,7 +1301,11 @@ static int f2fs_xattr_fiemap(struct inode *inode,
if (!page)
return -ENOMEM;
- f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return err;
+ }
phys = (__u64)blk_to_logical(inode, ni.blk_addr);
offset = offsetof(struct f2fs_inode, i_addr) +
@@ -1309,7 +1332,11 @@ static int f2fs_xattr_fiemap(struct inode *inode,
if (!page)
return -ENOMEM;
- f2fs_get_node_info(sbi, xnid, &ni);
+ err = f2fs_get_node_info(sbi, xnid, &ni);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return err;
+ }
phys = (__u64)blk_to_logical(inode, ni.blk_addr);
len = inode->i_sb->s_blocksize;
@@ -1421,10 +1448,15 @@ out:
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
+ *
+ * Note that the aops->readpages() function is ONLY used for read-ahead. If
+ * this function ever deviates from doing just read-ahead, it should either
+ * use ->readpage() or do the necessary surgery to decouple ->readpages()
+ * from read-ahead.
*/
static int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
- unsigned nr_pages)
+ unsigned nr_pages, bool is_readahead)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
@@ -1495,6 +1527,10 @@ got_it:
SetPageUptodate(page);
goto confused;
}
+
+ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+ DATA_GENERIC))
+ goto set_error_page;
} else {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
@@ -1514,7 +1550,8 @@ submit_and_realloc:
bio = NULL;
}
if (bio == NULL) {
- bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
+ bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
+ is_readahead ? REQ_RAHEAD : 0);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
@@ -1558,7 +1595,7 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
- ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
+ ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
return ret;
}
@@ -1575,12 +1612,13 @@ static int f2fs_read_data_pages(struct file *file,
if (f2fs_has_inline_data(inode))
return 0;
- return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
+ return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
}
static int encrypt_one_page(struct f2fs_io_info *fio)
{
struct inode *inode = fio->page->mapping->host;
+ struct page *mpage;
gfp_t gfp_flags = GFP_NOFS;
if (!f2fs_encrypted_file(inode))
@@ -1592,17 +1630,25 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
- if (!IS_ERR(fio->encrypted_page))
- return 0;
+ if (IS_ERR(fio->encrypted_page)) {
+ /* flush pending IOs and wait for a while in the ENOMEM case */
+ if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
+ f2fs_flush_merged_writes(fio->sbi);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ gfp_flags |= __GFP_NOFAIL;
+ goto retry_encrypt;
+ }
+ return PTR_ERR(fio->encrypted_page);
+ }
- /* flush pending IOs and wait for a while in the ENOMEM case */
- if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
- f2fs_flush_merged_writes(fio->sbi);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- gfp_flags |= __GFP_NOFAIL;
- goto retry_encrypt;
+ mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
+ if (mpage) {
+ if (PageUptodate(mpage))
+ memcpy(page_address(mpage),
+ page_address(fio->encrypted_page), PAGE_SIZE);
+ f2fs_put_page(mpage, 1);
}
- return PTR_ERR(fio->encrypted_page);
+ return 0;
}
static inline bool check_inplace_update_policy(struct inode *inode,
@@ -1686,6 +1732,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
struct extent_info ei = {0,0,0};
+ struct node_info ni;
bool ipu_force = false;
int err = 0;
@@ -1694,11 +1741,13 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
f2fs_lookup_extent_cache(inode, page->index, &ei)) {
fio->old_blkaddr = ei.blk + page->index - ei.fofs;
- if (is_valid_blkaddr(fio->old_blkaddr)) {
- ipu_force = true;
- fio->need_lock = LOCK_DONE;
- goto got_it;
- }
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+ DATA_GENERIC))
+ return -EFAULT;
+
+ ipu_force = true;
+ fio->need_lock = LOCK_DONE;
+ goto got_it;
}
/* Deadlock due to between page->lock and f2fs_lock_op */
@@ -1717,11 +1766,17 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
goto out_writepage;
}
got_it:
+ if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
+ !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+ DATA_GENERIC)) {
+ err = -EFAULT;
+ goto out_writepage;
+ }
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (ipu_force || (is_valid_blkaddr(fio->old_blkaddr) &&
+ if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
need_inplace_update(fio))) {
err = encrypt_one_page(fio);
if (err)
@@ -1746,6 +1801,12 @@ got_it:
fio->need_lock = LOCK_REQ;
}
+ err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
+ if (err)
+ goto out_writepage;
+
+ fio->version = ni.version;
+
err = encrypt_one_page(fio);
if (err)
goto out_writepage;
@@ -2074,6 +2135,18 @@ continue_unlock:
return ret;
}
+static inline bool __should_serialize_io(struct inode *inode,
+ struct writeback_control *wbc)
+{
+ if (!S_ISREG(inode->i_mode))
+ return false;
+ if (wbc->sync_mode != WB_SYNC_ALL)
+ return true;
+ if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
+ return true;
+ return false;
+}
+
static int __f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc,
enum iostat_type io_type)
@@ -2082,6 +2155,7 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct blk_plug plug;
int ret;
+ bool locked = false;
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
@@ -2112,10 +2186,18 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
else if (atomic_read(&sbi->wb_sync_req[DATA]))
goto skip_write;
+ if (__should_serialize_io(inode, wbc)) {
+ mutex_lock(&sbi->writepages);
+ locked = true;
+ }
+
blk_start_plug(&plug);
ret = f2fs_write_cache_pages(mapping, wbc, io_type);
blk_finish_plug(&plug);
+ if (locked)
+ mutex_unlock(&sbi->writepages);
+
if (wbc->sync_mode == WB_SYNC_ALL)
atomic_dec(&sbi->wb_sync_req[DATA]);
/*
@@ -2148,10 +2230,14 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
loff_t i_size = i_size_read(inode);
if (to > i_size) {
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
+
truncate_pagecache(inode, i_size);
f2fs_truncate_blocks(inode, i_size, true);
+
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
@@ -2246,8 +2332,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
trace_f2fs_write_begin(inode, pos, len, flags);
- if (f2fs_is_atomic_file(inode) &&
- !f2fs_available_free_memory(sbi, INMEM_PAGES)) {
+ if ((f2fs_is_atomic_file(inode) &&
+ !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
+ is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
err = -ENOMEM;
drop_atomic = true;
goto fail;
@@ -2371,14 +2458,20 @@ unlock_out:
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
loff_t offset)
{
- unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
-
- if (offset & blocksize_mask)
- return -EINVAL;
-
- if (iov_iter_alignment(iter) & blocksize_mask)
- return -EINVAL;
-
+ unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
+ unsigned blkbits = i_blkbits;
+ unsigned blocksize_mask = (1 << blkbits) - 1;
+ unsigned long align = offset | iov_iter_alignment(iter);
+ struct block_device *bdev = inode->i_sb->s_bdev;
+
+ if (align & blocksize_mask) {
+ if (bdev)
+ blkbits = blksize_bits(bdev_logical_block_size(bdev));
+ blocksize_mask = (1 << blkbits) - 1;
+ if (align & blocksize_mask)
+ return -EINVAL;
+ return 1;
+ }
return 0;
}
@@ -2396,7 +2489,7 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
err = check_direct_IO(inode, iter, offset);
if (err)
- return err;
+ return err < 0 ? err : 0;
if (f2fs_force_buffered_io(inode, rw))
return 0;
@@ -2490,6 +2583,10 @@ static int f2fs_set_data_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
+ /* don't remain PG_checked flag which was set during GC */
+ if (is_cold_data(page))
+ clear_cold_data(page);
+
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
f2fs_register_inmem_page(inode, page);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 2d65e77ae5cf..214a968962a1 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -215,7 +215,8 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
- si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
+ si->base_mem += NM_I(sbi)->nat_blocks *
+ f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
si->base_mem += NM_I(sbi)->nat_blocks / 8;
si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 7f955c4e86a4..ecc3a4e2be96 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -517,12 +517,11 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
}
start:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
f2fs_show_injection_info(FAULT_DIR_DEPTH);
return -ENOSPC;
}
-#endif
+
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
return -ENOSPC;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 4d8b1de83143..abf925664d9c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -41,7 +41,6 @@
} while (0)
#endif
-#ifdef CONFIG_F2FS_FAULT_INJECTION
enum {
FAULT_KMALLOC,
FAULT_KVMALLOC,
@@ -56,16 +55,20 @@ enum {
FAULT_TRUNCATE,
FAULT_IO,
FAULT_CHECKPOINT,
+ FAULT_DISCARD,
FAULT_MAX,
};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
+
struct f2fs_fault_info {
atomic_t inject_ops;
unsigned int inject_rate;
unsigned int inject_type;
};
-extern char *fault_name[FAULT_MAX];
+extern char *f2fs_fault_name[FAULT_MAX];
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
#endif
@@ -178,7 +181,6 @@ enum {
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
-#define DEF_MAX_DISCARD_LEN 512 /* Max. 2MB per discard */
#define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
#define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
#define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
@@ -194,7 +196,7 @@ struct cp_control {
};
/*
- * For CP/NAT/SIT/SSA readahead
+ * indicate meta/data type
*/
enum {
META_CP,
@@ -202,6 +204,8 @@ enum {
META_SIT,
META_SSA,
META_POR,
+ DATA_GENERIC,
+ META_GENERIC,
};
/* for the list of ino */
@@ -226,6 +230,12 @@ struct inode_entry {
struct inode *inode; /* vfs inode pointer */
};
+struct fsync_node_entry {
+ struct list_head list; /* list head */
+ struct page *page; /* warm node page pointer */
+ unsigned int seq_id; /* sequence id */
+};
+
/* for the bitmap indicate blocks to be discarded */
struct discard_entry {
struct list_head list; /* list head */
@@ -242,9 +252,10 @@ struct discard_entry {
(MAX_PLIST_NUM - 1) : (blk_num - 1))
enum {
- D_PREP,
- D_SUBMIT,
- D_DONE,
+ D_PREP, /* initial */
+ D_PARTIAL, /* partially submitted */
+ D_SUBMIT, /* all submitted */
+ D_DONE, /* finished */
};
struct discard_info {
@@ -269,7 +280,10 @@ struct discard_cmd {
struct block_device *bdev; /* bdev */
unsigned short ref; /* reference count */
unsigned char state; /* state */
+ unsigned char issuing; /* issuing discard */
int error; /* bio error */
+ spinlock_t lock; /* for state/bio_ref updating */
+ unsigned short bio_ref; /* bio reference count */
};
enum {
@@ -289,6 +303,7 @@ struct discard_policy {
unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
bool io_aware; /* issue discard in idle time */
bool sync; /* submit discard with REQ_SYNC flag */
+ bool ordered; /* issue discard by lba order */
unsigned int granularity; /* discard granularity */
};
@@ -305,10 +320,12 @@ struct discard_cmd_control {
unsigned int max_discards; /* max. discards to be issued */
unsigned int discard_granularity; /* discard granularity */
unsigned int undiscard_blks; /* # of undiscard blocks */
+ unsigned int next_pos; /* next discard position */
atomic_t issued_discard; /* # of issued discard */
atomic_t issing_discard; /* # of issing discard */
atomic_t discard_cmd_cnt; /* # of cached cmd count */
struct rb_root root; /* root of discard rb-tree */
+ bool rbtree_check; /* config for consistence check */
};
/* for the list of fsync inodes, used only during recovery */
@@ -508,13 +525,12 @@ enum {
*/
};
+#define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
+
#define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
-/* vector size for gang look-up from extent cache that consists of radix tree */
-#define EXT_TREE_VEC_SIZE 64
-
/* for in-memory extent cache entry */
#define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
@@ -600,6 +616,8 @@ enum {
#define FADVISE_HOT_BIT 0x20
#define FADVISE_VERITY_BIT 0x40 /* reserved */
+#define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
+
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
#define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
@@ -669,8 +687,8 @@ struct f2fs_inode_info {
int i_extra_isize; /* size of extra space located in i_addr */
kprojid_t i_projid; /* id for project quota */
int i_inline_xattr_size; /* inline xattr size */
- struct timespec i_crtime; /* inode creation time */
- struct timespec i_disk_time[4]; /* inode disk times */
+ struct timespec64 i_crtime; /* inode creation time */
+ struct timespec64 i_disk_time[4];/* inode disk times */
};
static inline void get_extent_info(struct extent_info *ext,
@@ -698,22 +716,22 @@ static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
}
static inline bool __is_discard_mergeable(struct discard_info *back,
- struct discard_info *front)
+ struct discard_info *front, unsigned int max_len)
{
return (back->lstart + back->len == front->lstart) &&
- (back->len + front->len < DEF_MAX_DISCARD_LEN);
+ (back->len + front->len <= max_len);
}
static inline bool __is_discard_back_mergeable(struct discard_info *cur,
- struct discard_info *back)
+ struct discard_info *back, unsigned int max_len)
{
- return __is_discard_mergeable(back, cur);
+ return __is_discard_mergeable(back, cur, max_len);
}
static inline bool __is_discard_front_mergeable(struct discard_info *cur,
- struct discard_info *front)
+ struct discard_info *front, unsigned int max_len)
{
- return __is_discard_mergeable(cur, front);
+ return __is_discard_mergeable(cur, front, max_len);
}
static inline bool __is_extent_mergeable(struct extent_info *back,
@@ -768,6 +786,7 @@ struct f2fs_nm_info {
struct radix_tree_root nat_set_root;/* root of the nat set cache */
struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
struct list_head nat_entries; /* cached nat entry list (clean) */
+ spinlock_t nat_list_lock; /* protect clean nat entry list */
unsigned int nat_cnt; /* the # of cached nat entries */
unsigned int dirty_nat_cnt; /* total num of nat entries in set */
unsigned int nat_blocks; /* # of nat blocks */
@@ -894,6 +913,7 @@ struct f2fs_sm_info {
unsigned int ipu_policy; /* in-place-update policy */
unsigned int min_ipu_util; /* in-place-update threshold */
unsigned int min_fsync_blocks; /* threshold for fsync */
+ unsigned int min_seq_blocks; /* threshold for sequential blocks */
unsigned int min_hot_blocks; /* threshold for hot block allocation */
unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
@@ -1015,6 +1035,7 @@ struct f2fs_io_info {
bool retry; /* need to reallocate block address */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
+ unsigned char version; /* version of the node */
};
#define is_read_io(rw) ((rw) == READ)
@@ -1066,6 +1087,7 @@ enum {
SBI_POR_DOING, /* recovery is doing or not */
SBI_NEED_SB_WRITE, /* need to recover superblock */
SBI_NEED_CP, /* need to checkpoint */
+ SBI_IS_SHUTDOWN, /* shutdown by ioctl */
};
enum {
@@ -1112,6 +1134,7 @@ struct f2fs_sb_info {
struct rw_semaphore sb_lock; /* lock for raw super block */
int valid_super_block; /* valid super block no */
unsigned long s_flag; /* flags for sbi */
+ struct mutex writepages; /* mutex for writepages() */
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
@@ -1148,6 +1171,11 @@ struct f2fs_sb_info {
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
+ spinlock_t fsync_node_lock; /* for node entry lock */
+ struct list_head fsync_node_list; /* node list head */
+ unsigned int fsync_seg_id; /* sequence id */
+ unsigned int fsync_node_num; /* number of node entries */
+
/* for orphan inode, use 0'th array */
unsigned int max_orphans; /* max orphan inodes */
@@ -1215,6 +1243,7 @@ struct f2fs_sb_info {
unsigned int gc_mode; /* current GC state */
/* for skip statistic */
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
+ unsigned long long skipped_gc_rwsem; /* FG_GC only */
/* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold;
@@ -1279,7 +1308,7 @@ struct f2fs_sb_info {
#ifdef CONFIG_F2FS_FAULT_INJECTION
#define f2fs_show_injection_info(type) \
printk("%sF2FS-fs : inject %s in %s of %pF\n", \
- KERN_INFO, fault_name[type], \
+ KERN_INFO, f2fs_fault_name[type], \
__func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
@@ -1298,13 +1327,19 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
}
return false;
}
+#else
+#define f2fs_show_injection_info(type) do { } while (0)
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+ return false;
+}
#endif
/* For write statistics. Suppose sector size is 512 bytes,
* and the return value is in kbytes. s is of struct f2fs_sb_info.
*/
#define BD_PART_WRITTEN(s) \
-(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[1]) - \
+(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \
(s)->sectors_written_start) >> 1)
static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
@@ -1326,7 +1361,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi)
struct request_list *rl = &q->root_rl;
if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
- return 0;
+ return false;
return f2fs_time_over(sbi, REQ_TIME);
}
@@ -1650,13 +1685,12 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
if (ret)
return ret;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_BLOCK)) {
f2fs_show_injection_info(FAULT_BLOCK);
release = *count;
goto enospc;
}
-#endif
+
/*
* let's increase this in prior to actual block count change in order
* for f2fs_sync_file to avoid data races when deciding checkpoint.
@@ -1680,18 +1714,20 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
- percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
goto enospc;
}
}
spin_unlock(&sbi->stat_lock);
- if (unlikely(release))
+ if (unlikely(release)) {
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
+ }
f2fs_i_blocks_write(inode, *count, true, true);
return 0;
enospc:
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
return -ENOSPC;
}
@@ -1863,12 +1899,10 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
return ret;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_BLOCK)) {
f2fs_show_injection_info(FAULT_BLOCK);
goto enospc;
}
-#endif
spin_lock(&sbi->stat_lock);
@@ -1953,17 +1987,23 @@ static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- struct page *page = find_lock_page(mapping, index);
+ struct page *page;
- if (page)
- return page;
+ if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+ if (!for_write)
+ page = find_get_page_flags(mapping, index,
+ FGP_LOCK | FGP_ACCESSED);
+ else
+ page = find_lock_page(mapping, index);
+ if (page)
+ return page;
- if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
- f2fs_show_injection_info(FAULT_PAGE_ALLOC);
- return NULL;
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
+ f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+ return NULL;
+ }
}
-#endif
+
if (!for_write)
return grab_cache_page(mapping, index);
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
@@ -1973,12 +2013,11 @@ static inline struct page *f2fs_pagecache_get_page(
struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
f2fs_show_injection_info(FAULT_PAGE_GET);
return NULL;
}
-#endif
+
return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
}
@@ -2043,12 +2082,11 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
return bio;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
f2fs_show_injection_info(FAULT_ALLOC_BIO);
return NULL;
}
-#endif
+
return bio_alloc(GFP_KERNEL, npages);
}
@@ -2518,7 +2556,6 @@ static inline void clear_file(struct inode *inode, int type)
static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
- struct timespec ts;
bool ret;
if (dsync) {
@@ -2534,16 +2571,13 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
i_size_read(inode) & ~PAGE_MASK)
return false;
- ts = timespec64_to_timespec(inode->i_atime);
- if (!timespec_equal(F2FS_I(inode)->i_disk_time, &ts))
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
return false;
- ts = timespec64_to_timespec(inode->i_ctime);
- if (!timespec_equal(F2FS_I(inode)->i_disk_time + 1, &ts))
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
return false;
- ts = timespec64_to_timespec(inode->i_mtime);
- if (!timespec_equal(F2FS_I(inode)->i_disk_time + 2, &ts))
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
return false;
- if (!timespec_equal(F2FS_I(inode)->i_disk_time + 3,
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
&F2FS_I(inode)->i_crtime))
return false;
@@ -2587,12 +2621,11 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_KMALLOC)) {
f2fs_show_injection_info(FAULT_KMALLOC);
return NULL;
}
-#endif
+
return kmalloc(size, flags);
}
@@ -2605,12 +2638,11 @@ static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_KVMALLOC)) {
f2fs_show_injection_info(FAULT_KVMALLOC);
return NULL;
}
-#endif
+
return kvmalloc(size, flags);
}
@@ -2669,13 +2701,39 @@ static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
spin_unlock(&sbi->iostat_lock);
}
-static inline bool is_valid_blkaddr(block_t blkaddr)
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
+ (!is_read_io(fio->op) || fio->is_meta))
+
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
+{
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "invalid blkaddr: %u, type: %d, run fsck to fix.",
+ blkaddr, type);
+ f2fs_bug_on(sbi, 1);
+ }
+}
+
+static inline bool __is_valid_data_blkaddr(block_t blkaddr)
{
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return false;
return true;
}
+static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ if (!__is_valid_data_blkaddr(blkaddr))
+ return false;
+ verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
+ return true;
+}
+
/*
* file.c
*/
@@ -2790,16 +2848,21 @@ struct node_info;
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct node_info *ni);
pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
int f2fs_truncate_xattr_node(struct inode *inode);
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+ unsigned int seq_id);
int f2fs_remove_inode_page(struct inode *inode);
struct page *f2fs_new_inode_page(struct inode *inode);
struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
@@ -2808,11 +2871,12 @@ struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
void f2fs_move_node_page(struct page *node_page, int gc_type);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
- struct writeback_control *wbc, bool atomic);
+ struct writeback_control *wbc, bool atomic,
+ unsigned int *seq_id);
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type);
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
@@ -2820,7 +2884,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
@@ -2898,9 +2962,10 @@ enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
- block_t blkaddr, int type);
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync);
void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
@@ -2924,6 +2989,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
void f2fs_update_dirty_page(struct inode *inode, struct page *page);
void f2fs_remove_dirty_inode(struct inode *inode);
int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi);
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
int __init f2fs_create_checkpoint_caches(void);
@@ -3362,7 +3428,7 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
#else
- return 0;
+ return false;
#endif
}
@@ -3373,4 +3439,11 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
F2FS_I_SB(inode)->s_ndevs);
}
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+ unsigned int type);
+#else
+#define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
+#endif
+
#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 6880c6f78d58..5474aaa274b9 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -213,6 +213,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
+ unsigned int seq_id = 0;
if (unlikely(f2fs_readonly(inode->i_sb)))
return 0;
@@ -275,7 +276,7 @@ go_write:
}
sync_nodes:
atomic_inc(&sbi->wb_sync_req[NODE]);
- ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic);
+ ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
atomic_dec(&sbi->wb_sync_req[NODE]);
if (ret)
goto out;
@@ -301,7 +302,7 @@ sync_nodes:
* given fsync mark.
*/
if (!atomic) {
- ret = f2fs_wait_on_node_pages_writeback(sbi, ino);
+ ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
if (ret)
goto out;
}
@@ -350,13 +351,13 @@ static pgoff_t __get_first_dirty_index(struct address_space *mapping,
return pgofs;
}
-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
- int whence)
+static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
+ pgoff_t dirty, pgoff_t pgofs, int whence)
{
switch (whence) {
case SEEK_DATA:
if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
- is_valid_blkaddr(blkaddr))
+ is_valid_data_blkaddr(sbi, blkaddr))
return true;
break;
case SEEK_HOLE:
@@ -420,7 +421,15 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
blkaddr = datablock_addr(dn.inode,
dn.node_page, dn.ofs_in_node);
- if (__found_offset(blkaddr, dirty, pgofs, whence)) {
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+ blkaddr, DATA_GENERIC)) {
+ f2fs_put_dnode(&dn);
+ goto fail;
+ }
+
+ if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+ pgofs, whence)) {
f2fs_put_dnode(&dn);
goto found;
}
@@ -513,6 +522,11 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
dn->data_blkaddr = NULL_ADDR;
f2fs_set_data_blkaddr(dn);
+
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+ continue;
+
f2fs_invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
@@ -654,12 +668,11 @@ int f2fs_truncate(struct inode *inode)
trace_f2fs_truncate(inode);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
f2fs_show_injection_info(FAULT_TRUNCATE);
return -EIO;
}
-#endif
+
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
@@ -782,22 +795,26 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size <= i_size_read(inode)) {
- down_write(&F2FS_I(inode)->i_mmap_sem);
- truncate_setsize(inode, attr->ia_size);
+ bool to_smaller = (attr->ia_size <= i_size_read(inode));
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ truncate_setsize(inode, attr->ia_size);
+
+ if (to_smaller)
err = f2fs_truncate(inode);
- up_write(&F2FS_I(inode)->i_mmap_sem);
- if (err)
- return err;
- } else {
- /*
- * do not trim all blocks after i_size if target size is
- * larger than i_size.
- */
- down_write(&F2FS_I(inode)->i_mmap_sem);
- truncate_setsize(inode, attr->ia_size);
- up_write(&F2FS_I(inode)->i_mmap_sem);
+ /*
+ * do not trim all blocks after i_size if target size is
+ * larger than i_size.
+ */
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+ if (err)
+ return err;
+ if (!to_smaller) {
/* should convert inline inode here */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
@@ -944,14 +961,19 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
blk_start = (loff_t)pg_start << PAGE_SHIFT;
blk_end = (loff_t)pg_end << PAGE_SHIFT;
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
+
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
f2fs_lock_op(sbi);
ret = f2fs_truncate_hole(inode, pg_start, pg_end);
f2fs_unlock_op(sbi);
+
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
@@ -1054,7 +1076,12 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
if (ret)
return ret;
- f2fs_get_node_info(sbi, dn.nid, &ni);
+ ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (ret) {
+ f2fs_put_dnode(&dn);
+ return ret;
+ }
+
ilen = min((pgoff_t)
ADDRS_PER_PAGE(dn.node_page, dst_inode) -
dn.ofs_in_node, len - i);
@@ -1161,25 +1188,33 @@ roll_back:
return ret;
}
-static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + len) >> PAGE_SHIFT;
int ret;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
- f2fs_drop_extent_tree(inode);
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+ f2fs_lock_op(sbi);
+ f2fs_drop_extent_tree(inode);
+ truncate_pagecache(inode, offset);
ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
f2fs_unlock_op(sbi);
+
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
- pgoff_t pg_start, pg_end;
loff_t new_size;
int ret;
@@ -1194,25 +1229,17 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (ret)
return ret;
- pg_start = offset >> PAGE_SHIFT;
- pg_end = (offset + len) >> PAGE_SHIFT;
-
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
- down_write(&F2FS_I(inode)->i_mmap_sem);
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- goto out_unlock;
-
- truncate_pagecache(inode, offset);
+ return ret;
- ret = f2fs_do_collapse(inode, pg_start, pg_end);
+ ret = f2fs_do_collapse(inode, offset, len);
if (ret)
- goto out_unlock;
+ return ret;
/* write out all moved pages, if possible */
+ down_write(&F2FS_I(inode)->i_mmap_sem);
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
@@ -1220,11 +1247,9 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
truncate_pagecache(inode, new_size);
ret = f2fs_truncate_blocks(inode, new_size, true);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
-out_unlock:
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
@@ -1290,12 +1315,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret)
return ret;
- down_write(&F2FS_I(inode)->i_mmap_sem);
ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
if (ret)
- goto out_sem;
-
- truncate_pagecache_range(inode, offset, offset + len - 1);
+ return ret;
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1307,7 +1329,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
ret = fill_zero(inode, pg_start, off_start,
off_end - off_start);
if (ret)
- goto out_sem;
+ return ret;
new_size = max_t(loff_t, new_size, offset + len);
} else {
@@ -1315,7 +1337,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
ret = fill_zero(inode, pg_start++, off_start,
PAGE_SIZE - off_start);
if (ret)
- goto out_sem;
+ return ret;
new_size = max_t(loff_t, new_size,
(loff_t)pg_start << PAGE_SHIFT);
@@ -1326,12 +1348,21 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
unsigned int end_offset;
pgoff_t end;
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ truncate_pagecache_range(inode,
+ (loff_t)index << PAGE_SHIFT,
+ ((loff_t)pg_end << PAGE_SHIFT) - 1);
+
f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
f2fs_unlock_op(sbi);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
@@ -1340,7 +1371,10 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn);
+
f2fs_unlock_op(sbi);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_balance_fs(sbi, dn.node_changed);
@@ -1368,9 +1402,6 @@ out:
else
f2fs_i_size_write(inode, new_size);
}
-out_sem:
- up_write(&F2FS_I(inode)->i_mmap_sem);
-
return ret;
}
@@ -1399,26 +1430,27 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true);
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
down_write(&F2FS_I(inode)->i_mmap_sem);
ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret)
- goto out;
+ return ret;
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- goto out;
-
- truncate_pagecache(inode, offset);
+ return ret;
pg_start = offset >> PAGE_SHIFT;
pg_end = (offset + len) >> PAGE_SHIFT;
delta = pg_end - pg_start;
idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+ truncate_pagecache(inode, offset);
+
while (!ret && idx > pg_start) {
nr = idx - pg_start;
if (nr > delta)
@@ -1432,16 +1464,17 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
idx + delta, nr, false);
f2fs_unlock_op(sbi);
}
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
/* write out all moved pages, if possible */
+ down_write(&F2FS_I(inode)->i_mmap_sem);
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
-out:
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
@@ -1597,7 +1630,7 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags = fi->i_flags;
- if (file_is_encrypt(inode))
+ if (f2fs_encrypted_inode(inode))
flags |= F2FS_ENCRYPT_FL;
if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
flags |= F2FS_INLINE_DATA_FL;
@@ -1688,15 +1721,18 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
inode_lock(inode);
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_is_atomic_file(inode)) {
+ if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
+ ret = -EINVAL;
goto out;
+ }
ret = f2fs_convert_inline_inode(inode);
if (ret)
goto out;
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
if (!get_dirty_pages(inode))
goto skip_flush;
@@ -1704,18 +1740,20 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
"Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
- if (ret)
+ if (ret) {
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
+ }
skip_flush:
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
F2FS_I(inode)->inmem_task = current;
stat_inc_atomic_write(inode);
stat_update_max_atomic_write(inode);
out:
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -1733,9 +1771,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
if (ret)
return ret;
- inode_lock(inode);
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ inode_lock(inode);
if (f2fs_is_volatile_file(inode)) {
ret = -EINVAL;
@@ -1761,7 +1799,6 @@ err_out:
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
ret = -EINVAL;
}
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -1853,6 +1890,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
}
+ clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+
inode_unlock(inode);
mnt_drop_write_file(filp);
@@ -1866,7 +1905,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
__u32 in;
- int ret;
+ int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1889,6 +1928,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
}
if (sb) {
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
thaw_bdev(sb->s_bdev, sb);
}
break;
@@ -1898,13 +1938,16 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (ret)
goto out;
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NOSYNC:
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
default:
ret = -EINVAL;
@@ -2107,7 +2150,7 @@ out:
return ret;
}
-static int f2fs_ioc_f2fs_write_checkpoint(struct file *filp, unsigned long arg)
+static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -2351,15 +2394,10 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
}
inode_lock(src);
- down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
if (src != dst) {
ret = -EBUSY;
if (!inode_trylock(dst))
goto out;
- if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) {
- inode_unlock(dst);
- goto out;
- }
}
ret = -EINVAL;
@@ -2404,6 +2442,14 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
goto out_unlock;
f2fs_balance_fs(sbi, true);
+
+ down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ if (src != dst) {
+ ret = -EBUSY;
+ if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
+ goto out_src;
+ }
+
f2fs_lock_op(sbi);
ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
pos_out >> F2FS_BLKSIZE_BITS,
@@ -2416,13 +2462,15 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
f2fs_i_size_write(dst, dst_osize);
}
f2fs_unlock_op(sbi);
-out_unlock:
- if (src != dst) {
+
+ if (src != dst)
up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
+out_src:
+ up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+out_unlock:
+ if (src != dst)
inode_unlock(dst);
- }
out:
- up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
inode_unlock(src);
return ret;
}
@@ -2782,7 +2830,7 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 1;
+ f2fs_i_gc_failures_write(inode, 0);
goto done;
}
@@ -2888,7 +2936,7 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case F2FS_IOC_GARBAGE_COLLECT_RANGE:
return f2fs_ioc_gc_range(filp, arg);
case F2FS_IOC_WRITE_CHECKPOINT:
- return f2fs_ioc_f2fs_write_checkpoint(filp, arg);
+ return f2fs_ioc_write_checkpoint(filp, arg);
case F2FS_IOC_DEFRAGMENT:
return f2fs_ioc_defragment(filp, arg);
case F2FS_IOC_MOVE_RANGE:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 9093be6e7a7d..5c8d00422237 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -53,12 +53,10 @@ static int gc_thread_func(void *data)
continue;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
f2fs_show_injection_info(FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
-#endif
if (!sb_start_write_trylock(sbi->sb))
continue;
@@ -517,7 +515,11 @@ next_step:
continue;
}
- f2fs_get_node_info(sbi, nid, &ni);
+ if (f2fs_get_node_info(sbi, nid, &ni)) {
+ f2fs_put_page(node_page, 1);
+ continue;
+ }
+
if (ni.blk_addr != start_addr + off) {
f2fs_put_page(node_page, 1);
continue;
@@ -576,7 +578,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (IS_ERR(node_page))
return false;
- f2fs_get_node_info(sbi, nid, dni);
+ if (f2fs_get_node_info(sbi, nid, dni)) {
+ f2fs_put_page(node_page, 1);
+ return false;
+ }
if (sum->version != dni->version) {
f2fs_msg(sbi->sb, KERN_WARNING,
@@ -594,6 +599,72 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
return true;
}
+static int ra_data_block(struct inode *inode, pgoff_t index)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct address_space *mapping = inode->i_mapping;
+ struct dnode_of_data dn;
+ struct page *page;
+ struct extent_info ei = {0, 0, 0};
+ struct f2fs_io_info fio = {
+ .sbi = sbi,
+ .ino = inode->i_ino,
+ .type = DATA,
+ .temp = COLD,
+ .op = REQ_OP_READ,
+ .op_flags = 0,
+ .encrypted_page = NULL,
+ .in_list = false,
+ .retry = false,
+ };
+ int err;
+
+ page = f2fs_grab_cache_page(mapping, index, true);
+ if (!page)
+ return -ENOMEM;
+
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ goto got_it;
+ }
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (err)
+ goto put_page;
+ f2fs_put_dnode(&dn);
+
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+ DATA_GENERIC))) {
+ err = -EFAULT;
+ goto put_page;
+ }
+got_it:
+ /* read page */
+ fio.page = page;
+ fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+
+ fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
+ dn.data_blkaddr,
+ FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (!fio.encrypted_page) {
+ err = -ENOMEM;
+ goto put_page;
+ }
+
+ err = f2fs_submit_page_bio(&fio);
+ if (err)
+ goto put_encrypted_page;
+ f2fs_put_page(fio.encrypted_page, 0);
+ f2fs_put_page(page, 1);
+ return 0;
+put_encrypted_page:
+ f2fs_put_page(fio.encrypted_page, 1);
+put_page:
+ f2fs_put_page(page, 1);
+ return err;
+}
+
/*
* Move data block via META_MAPPING while keeping locked data page.
* This can be used to move blocks, aka LBAs, directly on disk.
@@ -615,7 +686,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
- struct page *page;
+ struct page *page, *mpage;
block_t newaddr;
int err;
bool lfs_mode = test_opt(fio.sbi, LFS);
@@ -655,7 +726,10 @@ static void move_data_block(struct inode *inode, block_t bidx,
*/
f2fs_wait_on_page_writeback(page, DATA, true);
- f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ if (err)
+ goto put_out;
+
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* read page */
@@ -675,6 +749,23 @@ static void move_data_block(struct inode *inode, block_t bidx,
goto recover_block;
}
+ mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
+ if (mpage) {
+ bool updated = false;
+
+ if (PageUptodate(mpage)) {
+ memcpy(page_address(fio.encrypted_page),
+ page_address(mpage), PAGE_SIZE);
+ updated = true;
+ }
+ f2fs_put_page(mpage, 1);
+ invalidate_mapping_pages(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, fio.old_blkaddr);
+ if (updated)
+ goto write_page;
+ }
+
err = f2fs_submit_page_bio(&fio);
if (err)
goto put_page_out;
@@ -691,6 +782,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
goto put_page_out;
}
+write_page:
set_page_dirty(fio.encrypted_page);
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
if (clear_page_dirty_for_io(fio.encrypted_page))
@@ -865,22 +957,30 @@ next_step:
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
- /* if inode uses special I/O path, let's go phase 3 */
- if (f2fs_post_read_required(inode)) {
- add_gc_inode(gc_list, inode);
- continue;
- }
-
if (!down_write_trylock(
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
iput(inode);
+ sbi->skipped_gc_rwsem++;
+ continue;
+ }
+
+ start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
+ ofs_in_node;
+
+ if (f2fs_post_read_required(inode)) {
+ int err = ra_data_block(inode, start_bidx);
+
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ if (err) {
+ iput(inode);
+ continue;
+ }
+ add_gc_inode(gc_list, inode);
continue;
}
- start_bidx = f2fs_start_bidx_of_node(nofs, inode);
data_page = f2fs_get_read_data_page(inode,
- start_bidx + ofs_in_node, REQ_RAHEAD,
- true);
+ start_bidx, REQ_RAHEAD, true);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (IS_ERR(data_page)) {
iput(inode);
@@ -903,6 +1003,7 @@ next_step:
continue;
if (!down_write_trylock(
&fi->i_gc_rwsem[WRITE])) {
+ sbi->skipped_gc_rwsem++;
up_write(&fi->i_gc_rwsem[READ]);
continue;
}
@@ -986,7 +1087,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
goto next;
sum = page_address(sum_page);
- f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+ if (type != GET_SUM_TYPE((&sum->footer))) {
+ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
+ "type [%d, %d] in SSA and SIT",
+ segno, type, GET_SUM_TYPE((&sum->footer)));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ goto next;
+ }
/*
* this is to avoid deadlock:
@@ -1034,6 +1141,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
+ unsigned long long first_skipped;
unsigned int skipped_round = 0, round = 0;
trace_f2fs_gc_begin(sbi->sb, sync, background,
@@ -1046,6 +1154,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
prefree_segments(sbi));
cpc.reason = __get_cp_reason(sbi);
+ sbi->skipped_gc_rwsem = 0;
+ first_skipped = last_skipped;
gc_more:
if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
ret = -EINVAL;
@@ -1087,7 +1197,8 @@ gc_more:
total_freed += seg_freed;
if (gc_type == FG_GC) {
- if (sbi->skipped_atomic_files[FG_GC] > last_skipped)
+ if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
+ sbi->skipped_gc_rwsem)
skipped_round++;
last_skipped = sbi->skipped_atomic_files[FG_GC];
round++;
@@ -1096,15 +1207,23 @@ gc_more:
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
- if (!sync) {
- if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
- if (skipped_round > MAX_SKIP_ATOMIC_COUNT &&
- skipped_round * 2 >= round)
- f2fs_drop_inmem_pages_all(sbi, true);
+ if (sync)
+ goto stop;
+
+ if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+ if (skipped_round <= MAX_SKIP_GC_COUNT ||
+ skipped_round * 2 < round) {
segno = NULL_SEGNO;
goto gc_more;
}
+ if (first_skipped < last_skipped &&
+ (last_skipped - first_skipped) >
+ sbi->skipped_gc_rwsem) {
+ f2fs_drop_inmem_pages_all(sbi, true);
+ segno = NULL_SEGNO;
+ goto gc_more;
+ }
if (gc_type == FG_GC)
ret = f2fs_write_checkpoint(sbi, &cpc);
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 043830be5662..115dc219344b 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -121,6 +121,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
.encrypted_page = NULL,
.io_type = FS_DATA_IO,
};
+ struct node_info ni;
int dirty, err;
if (!f2fs_exist_data(dn->inode))
@@ -130,6 +131,24 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
if (err)
return err;
+ err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+ if (err) {
+ f2fs_put_dnode(dn);
+ return err;
+ }
+
+ fio.version = ni.version;
+
+ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
+ f2fs_put_dnode(dn);
+ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
+ f2fs_msg(fio.sbi->sb, KERN_WARNING,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dn->inode->i_ino, dn->data_blkaddr);
+ return -EINVAL;
+ }
+
f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
f2fs_do_read_inline_data(page, dn->inode_page);
@@ -363,6 +382,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
if (err)
goto out;
+ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
+ f2fs_put_dnode(&dn);
+ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
+ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dir->i_ino, dn.data_blkaddr);
+ err = -EINVAL;
+ goto out;
+ }
+
f2fs_wait_on_page_writeback(page, DATA, true);
dentry_blk = page_address(page);
@@ -477,6 +507,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
return 0;
recover:
lock_page(ipage);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
f2fs_i_depth_write(dir, 0);
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
@@ -668,7 +699,10 @@ int f2fs_inline_data_fiemap(struct inode *inode,
ilen = start + len;
ilen -= start;
- f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ if (err)
+ goto out;
+
byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
byteaddr += (char *)inline_data_addr(inode, ipage) -
(char *)F2FS_INODE(ipage);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index f121c864f4c0..959df2249875 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -68,13 +68,16 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
}
}
-static bool __written_first_block(struct f2fs_inode *ri)
+static int __written_first_block(struct f2fs_sb_info *sbi,
+ struct f2fs_inode *ri)
{
block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
- if (is_valid_blkaddr(addr))
- return true;
- return false;
+ if (!__is_valid_data_blkaddr(addr))
+ return 1;
+ if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+ return -EFAULT;
+ return 0;
}
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -121,7 +124,7 @@ static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page
if (!f2fs_sb_has_inode_chksum(sbi->sb))
return false;
- if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+ if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
return false;
if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
@@ -159,8 +162,15 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
struct f2fs_inode *ri;
__u32 provided, calculated;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
+ return true;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (!f2fs_enable_inode_chksum(sbi, page))
+#else
if (!f2fs_enable_inode_chksum(sbi, page) ||
PageDirty(page) || PageWriteback(page))
+#endif
return true;
ri = &F2FS_NODE(page)->i;
@@ -185,9 +195,31 @@ void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
}
-static bool sanity_check_inode(struct inode *inode)
+static bool sanity_check_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ unsigned long long iblocks;
+
+ iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+ if (!iblocks) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
+ "run fsck to fix.",
+ __func__, inode->i_ino, iblocks);
+ return false;
+ }
+
+ if (ino_of_node(node_page) != nid_of_node(node_page)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted inode footer i_ino=%lx, ino,nid: "
+ "[%u, %u] run fsck to fix.",
+ __func__, inode->i_ino,
+ ino_of_node(node_page), nid_of_node(node_page));
+ return false;
+ }
if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
&& !f2fs_has_extra_attr(inode)) {
@@ -197,6 +229,64 @@ static bool sanity_check_inode(struct inode *inode)
__func__, inode->i_ino);
return false;
}
+
+ if (f2fs_has_extra_attr(inode) &&
+ !f2fs_sb_has_extra_attr(sbi->sb)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) is with extra_attr, "
+ "but extra_attr feature is off",
+ __func__, inode->i_ino);
+ return false;
+ }
+
+ if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+ fi->i_extra_isize % sizeof(__le32)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
+ "max: %zu",
+ __func__, inode->i_ino, fi->i_extra_isize,
+ F2FS_TOTAL_EXTRA_ATTR_SIZE);
+ return false;
+ }
+
+ if (F2FS_I(inode)->extent_tree) {
+ struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+
+ if (ei->len &&
+ (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+ !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+ DATA_GENERIC))) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) extent info [%u, %u, %u] "
+ "is incorrect, run fsck to fix",
+ __func__, inode->i_ino,
+ ei->blk, ei->fofs, ei->len);
+ return false;
+ }
+ }
+
+ if (f2fs_has_inline_data(inode) &&
+ (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx, mode=%u) should not have "
+ "inline_data, run fsck to fix",
+ __func__, inode->i_ino, inode->i_mode);
+ return false;
+ }
+
+ if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx, mode=%u) should not have "
+ "inline_dentry, run fsck to fix",
+ __func__, inode->i_ino, inode->i_mode);
+ return false;
+ }
+
return true;
}
@@ -207,6 +297,7 @@ static int do_read_inode(struct inode *inode)
struct page *node_page;
struct f2fs_inode *ri;
projid_t i_projid;
+ int err;
/* Check if ino is within scope */
if (f2fs_check_nid_range(sbi, inode->i_ino))
@@ -268,6 +359,11 @@ static int do_read_inode(struct inode *inode)
fi->i_inline_xattr_size = 0;
}
+ if (!sanity_check_inode(inode, node_page)) {
+ f2fs_put_page(node_page, 1);
+ return -EINVAL;
+ }
+
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
@@ -275,8 +371,15 @@ static int do_read_inode(struct inode *inode)
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
- if (__written_first_block(ri))
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ if (S_ISREG(inode->i_mode)) {
+ err = __written_first_block(sbi, ri);
+ if (err < 0) {
+ f2fs_put_page(node_page, 1);
+ return err;
+ }
+ if (!err)
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ }
if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
fi->last_disk_size = inode->i_size;
@@ -297,9 +400,9 @@ static int do_read_inode(struct inode *inode)
fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
}
- F2FS_I(inode)->i_disk_time[0] = timespec64_to_timespec(inode->i_atime);
- F2FS_I(inode)->i_disk_time[1] = timespec64_to_timespec(inode->i_ctime);
- F2FS_I(inode)->i_disk_time[2] = timespec64_to_timespec(inode->i_mtime);
+ F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
+ F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
+ F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
f2fs_put_page(node_page, 1);
@@ -330,10 +433,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
ret = do_read_inode(inode);
if (ret)
goto bad_inode;
- if (!sanity_check_inode(inode)) {
- ret = -EINVAL;
- goto bad_inode;
- }
make_now:
if (ino == F2FS_NODE_INO(sbi)) {
inode->i_mapping->a_ops = &f2fs_node_aops;
@@ -470,10 +569,14 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
if (inode->i_nlink == 0)
clear_inline_node(node_page);
- F2FS_I(inode)->i_disk_time[0] = timespec64_to_timespec(inode->i_atime);
- F2FS_I(inode)->i_disk_time[1] = timespec64_to_timespec(inode->i_ctime);
- F2FS_I(inode)->i_disk_time[2] = timespec64_to_timespec(inode->i_mtime);
+ F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
+ F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
+ F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
+#endif
}
void f2fs_update_inode_page(struct inode *inode)
@@ -558,12 +661,11 @@ retry:
if (F2FS_HAS_BLOCKS(inode))
err = f2fs_truncate(inode);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
f2fs_show_injection_info(FAULT_EVICT_INODE);
err = -EIO;
}
-#endif
+
if (!err) {
f2fs_lock_op(sbi);
err = f2fs_remove_inode_page(inode);
@@ -626,6 +728,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct node_info ni;
+ int err;
/*
* clear nlink of inode in order to release resource of inode
@@ -648,10 +751,16 @@ void f2fs_handle_failed_inode(struct inode *inode)
* so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off.
*/
- f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "May loss orphan inode, run fsck to fix.");
+ goto out;
+ }
if (ni.blk_addr != NULL_ADDR) {
- int err = f2fs_acquire_orphan_inode(sbi);
+ err = f2fs_acquire_orphan_inode(sbi);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
@@ -664,6 +773,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
set_inode_flag(inode, FI_FREE_NID);
}
+out:
f2fs_unlock_op(sbi);
/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 231b7f3ea7d3..1f67e389169f 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -51,7 +51,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
inode->i_ino = ino;
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- F2FS_I(inode)->i_crtime = timespec64_to_timespec(inode->i_mtime);
+ F2FS_I(inode)->i_crtime = inode->i_mtime;
inode->i_generation = sbi->s_next_generation++;
if (S_ISDIR(inode->i_mode))
@@ -246,7 +246,7 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
return -EINVAL;
if (hot) {
- strncpy(extlist[count], name, strlen(name));
+ memcpy(extlist[count], name, strlen(name));
sbi->raw_super->hot_ext_count = hot_count + 1;
} else {
char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
@@ -254,7 +254,7 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
memcpy(buf, &extlist[cold_count],
F2FS_EXTENSION_LEN * hot_count);
memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
- strncpy(extlist[cold_count], name, strlen(name));
+ memcpy(extlist[cold_count], name, strlen(name));
memcpy(&extlist[cold_count + 1], buf,
F2FS_EXTENSION_LEN * hot_count);
sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 10643b11bd59..dd2e45a661aa 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -28,6 +28,7 @@
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
static struct kmem_cache *nat_entry_set_slab;
+static struct kmem_cache *fsync_node_entry_slab;
/*
* Check whether the given nid is within node id range.
@@ -112,25 +113,22 @@ static void clear_node_page_dirty(struct page *page)
static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
- pgoff_t index = current_nat_addr(sbi, nid);
- return f2fs_get_meta_page(sbi, index);
+ return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
}
static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
struct page *src_page;
struct page *dst_page;
- pgoff_t src_off;
pgoff_t dst_off;
void *src_addr;
void *dst_addr;
struct f2fs_nm_info *nm_i = NM_I(sbi);
- src_off = current_nat_addr(sbi, nid);
- dst_off = next_nat_addr(sbi, src_off);
+ dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
/* get current nat block page with lock */
- src_page = f2fs_get_meta_page(sbi, src_off);
+ src_page = get_current_nat_page(sbi, nid);
dst_page = f2fs_grab_meta_page(sbi, dst_off);
f2fs_bug_on(sbi, PageDirty(src_page));
@@ -176,14 +174,30 @@ static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
if (raw_ne)
node_info_from_raw_nat(&ne->ni, raw_ne);
+
+ spin_lock(&nm_i->nat_list_lock);
list_add_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+
nm_i->nat_cnt++;
return ne;
}
static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
{
- return radix_tree_lookup(&nm_i->nat_root, n);
+ struct nat_entry *ne;
+
+ ne = radix_tree_lookup(&nm_i->nat_root, n);
+
+ /* for recent accessed nat entry, move it to tail of lru list */
+ if (ne && !get_nat_flag(ne, IS_DIRTY)) {
+ spin_lock(&nm_i->nat_list_lock);
+ if (!list_empty(&ne->list))
+ list_move_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+ }
+
+ return ne;
}
static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
@@ -194,7 +208,6 @@ static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
{
- list_del(&e->list);
radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
nm_i->nat_cnt--;
__free_nat_entry(e);
@@ -245,16 +258,21 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
nm_i->dirty_nat_cnt++;
set_nat_flag(ne, IS_DIRTY, true);
refresh_list:
+ spin_lock(&nm_i->nat_list_lock);
if (new_ne)
list_del_init(&ne->list);
else
list_move_tail(&ne->list, &head->entry_list);
+ spin_unlock(&nm_i->nat_list_lock);
}
static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
struct nat_entry_set *set, struct nat_entry *ne)
{
+ spin_lock(&nm_i->nat_list_lock);
list_move_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+
set_nat_flag(ne, IS_DIRTY, false);
set->entry_cnt--;
nm_i->dirty_nat_cnt--;
@@ -267,6 +285,72 @@ static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
start, nr);
}
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+{
+ return NODE_MAPPING(sbi) == page->mapping &&
+ IS_DNODE(page) && is_cold_node(page);
+}
+
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+ spin_lock_init(&sbi->fsync_node_lock);
+ INIT_LIST_HEAD(&sbi->fsync_node_list);
+ sbi->fsync_seg_id = 0;
+ sbi->fsync_node_num = 0;
+}
+
+static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
+ struct page *page)
+{
+ struct fsync_node_entry *fn;
+ unsigned long flags;
+ unsigned int seq_id;
+
+ fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
+
+ get_page(page);
+ fn->page = page;
+ INIT_LIST_HEAD(&fn->list);
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ list_add_tail(&fn->list, &sbi->fsync_node_list);
+ fn->seq_id = sbi->fsync_seg_id++;
+ seq_id = fn->seq_id;
+ sbi->fsync_node_num++;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+ return seq_id;
+}
+
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
+{
+ struct fsync_node_entry *fn;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ list_for_each_entry(fn, &sbi->fsync_node_list, list) {
+ if (fn->page == page) {
+ list_del(&fn->list);
+ sbi->fsync_node_num--;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ kmem_cache_free(fsync_node_entry_slab, fn);
+ put_page(page);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ f2fs_bug_on(sbi, 1);
+}
+
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ sbi->fsync_seg_id = 0;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+}
+
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -371,7 +455,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
new_blkaddr == NULL_ADDR);
f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
new_blkaddr == NEW_ADDR);
- f2fs_bug_on(sbi, is_valid_blkaddr(nat_get_blkaddr(e)) &&
+ f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
new_blkaddr == NEW_ADDR);
/* increment version no as node is removed */
@@ -382,7 +466,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
/* change address */
nat_set_blkaddr(e, new_blkaddr);
- if (!is_valid_blkaddr(new_blkaddr))
+ if (!is_valid_data_blkaddr(sbi, new_blkaddr))
set_nat_flag(e, IS_CHECKPOINTED, false);
__set_nat_cache_dirty(nm_i, e);
@@ -405,13 +489,25 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
if (!down_write_trylock(&nm_i->nat_tree_lock))
return 0;
- while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
+ spin_lock(&nm_i->nat_list_lock);
+ while (nr_shrink) {
struct nat_entry *ne;
+
+ if (list_empty(&nm_i->nat_entries))
+ break;
+
ne = list_first_entry(&nm_i->nat_entries,
struct nat_entry, list);
+ list_del(&ne->list);
+ spin_unlock(&nm_i->nat_list_lock);
+
__del_from_nat_cache(nm_i, ne);
nr_shrink--;
+
+ spin_lock(&nm_i->nat_list_lock);
}
+ spin_unlock(&nm_i->nat_list_lock);
+
up_write(&nm_i->nat_tree_lock);
return nr - nr_shrink;
}
@@ -419,7 +515,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
/*
* This function always returns success
*/
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct node_info *ni)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -443,7 +539,7 @@ void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
up_read(&nm_i->nat_tree_lock);
- return;
+ return 0;
}
memset(&ne, 0, sizeof(struct f2fs_nat_entry));
@@ -466,6 +562,9 @@ void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
up_read(&nm_i->nat_tree_lock);
page = f2fs_get_meta_page(sbi, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
nat_blk = (struct f2fs_nat_block *)page_address(page);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
@@ -473,6 +572,7 @@ void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
cache:
/* cache nat entry */
cache_nat_entry(sbi, nid, &ne);
+ return 0;
}
/*
@@ -722,12 +822,15 @@ release_out:
return err;
}
-static void truncate_node(struct dnode_of_data *dn)
+static int truncate_node(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni;
+ int err;
- f2fs_get_node_info(sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ if (err)
+ return err;
/* Deallocate node address */
f2fs_invalidate_blocks(sbi, ni.blk_addr);
@@ -750,11 +853,14 @@ static void truncate_node(struct dnode_of_data *dn)
dn->node_page = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
+
+ return 0;
}
static int truncate_dnode(struct dnode_of_data *dn)
{
struct page *page;
+ int err;
if (dn->nid == 0)
return 1;
@@ -770,7 +876,10 @@ static int truncate_dnode(struct dnode_of_data *dn)
dn->node_page = page;
dn->ofs_in_node = 0;
f2fs_truncate_data_blocks(dn);
- truncate_node(dn);
+ err = truncate_node(dn);
+ if (err)
+ return err;
+
return 1;
}
@@ -835,7 +944,9 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
if (!ofs) {
/* remove current indirect node */
dn->node_page = page;
- truncate_node(dn);
+ ret = truncate_node(dn);
+ if (ret)
+ goto out_err;
freed++;
} else {
f2fs_put_page(page, 1);
@@ -893,7 +1004,9 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
if (offset[idx + 1] == 0) {
dn->node_page = pages[idx];
dn->nid = nid[idx];
- truncate_node(dn);
+ err = truncate_node(dn);
+ if (err)
+ goto fail;
} else {
f2fs_put_page(pages[idx], 1);
}
@@ -1014,6 +1127,7 @@ int f2fs_truncate_xattr_node(struct inode *inode)
nid_t nid = F2FS_I(inode)->i_xattr_nid;
struct dnode_of_data dn;
struct page *npage;
+ int err;
if (!nid)
return 0;
@@ -1022,10 +1136,15 @@ int f2fs_truncate_xattr_node(struct inode *inode)
if (IS_ERR(npage))
return PTR_ERR(npage);
+ set_new_dnode(&dn, inode, NULL, npage, nid);
+ err = truncate_node(&dn);
+ if (err) {
+ f2fs_put_page(npage, 1);
+ return err;
+ }
+
f2fs_i_xnid_write(inode, 0);
- set_new_dnode(&dn, inode, NULL, npage, nid);
- truncate_node(&dn);
return 0;
}
@@ -1055,11 +1174,19 @@ int f2fs_remove_inode_page(struct inode *inode)
f2fs_truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+ f2fs_put_dnode(&dn);
+ return -EIO;
+ }
f2fs_bug_on(F2FS_I_SB(inode),
inode->i_blocks != 0 && inode->i_blocks != 8);
/* will put inode & node pages */
- truncate_node(&dn);
+ err = truncate_node(&dn);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
return 0;
}
@@ -1092,7 +1219,11 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
goto fail;
#ifdef CONFIG_F2FS_CHECK_FS
- f2fs_get_node_info(sbi, dn->nid, &new_ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
+ if (err) {
+ dec_valid_node_count(sbi, dn->inode, !ofs);
+ goto fail;
+ }
f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
#endif
new_ni.nid = dn->nid;
@@ -1140,13 +1271,21 @@ static int read_node_page(struct page *page, int op_flags)
.page = page,
.encrypted_page = NULL,
};
+ int err;
- if (PageUptodate(page))
+ if (PageUptodate(page)) {
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
+#endif
return LOCKED_PAGE;
+ }
- f2fs_get_node_info(sbi, page->index, &ni);
+ err = f2fs_get_node_info(sbi, page->index, &ni);
+ if (err)
+ return err;
- if (unlikely(ni.blk_addr == NULL_ADDR)) {
+ if (unlikely(ni.blk_addr == NULL_ADDR) ||
+ is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
ClearPageUptodate(page);
return -ENOENT;
}
@@ -1348,7 +1487,7 @@ continue_unlock:
static int __write_node_page(struct page *page, bool atomic, bool *submitted,
struct writeback_control *wbc, bool do_balance,
- enum iostat_type io_type)
+ enum iostat_type io_type, unsigned int *seq_id)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
nid_t nid;
@@ -1365,6 +1504,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
.io_type = io_type,
.io_wbc = wbc,
};
+ unsigned int seq;
trace_f2fs_writepage(page, NODE);
@@ -1374,10 +1514,17 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+ IS_DNODE(page) && is_cold_node(page))
+ goto redirty_out;
+
/* get old block addr of this node page */
nid = nid_of_node(page);
f2fs_bug_on(sbi, page->index != nid);
+ if (f2fs_get_node_info(sbi, nid, &ni))
+ goto redirty_out;
+
if (wbc->for_reclaim) {
if (!down_read_trylock(&sbi->node_write))
goto redirty_out;
@@ -1385,8 +1532,6 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
down_read(&sbi->node_write);
}
- f2fs_get_node_info(sbi, nid, &ni);
-
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
ClearPageUptodate(page);
@@ -1396,11 +1541,22 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
return 0;
}
+ if (__is_valid_data_blkaddr(ni.blk_addr) &&
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
+ goto redirty_out;
+
if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
set_page_writeback(page);
ClearPageError(page);
+
+ if (f2fs_in_warm_node_list(sbi, page)) {
+ seq = f2fs_add_fsync_node_entry(sbi, page);
+ if (seq_id)
+ *seq_id = seq;
+ }
+
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
@@ -1448,7 +1604,7 @@ void f2fs_move_node_page(struct page *node_page, int gc_type)
goto out_page;
if (__write_node_page(node_page, false, NULL,
- &wbc, false, FS_GC_NODE_IO))
+ &wbc, false, FS_GC_NODE_IO, NULL))
unlock_page(node_page);
goto release_page;
} else {
@@ -1465,11 +1621,13 @@ release_page:
static int f2fs_write_node_page(struct page *page,
struct writeback_control *wbc)
{
- return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
+ return __write_node_page(page, false, NULL, wbc, false,
+ FS_NODE_IO, NULL);
}
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
- struct writeback_control *wbc, bool atomic)
+ struct writeback_control *wbc, bool atomic,
+ unsigned int *seq_id)
{
pgoff_t index;
pgoff_t last_idx = ULONG_MAX;
@@ -1550,7 +1708,7 @@ continue_unlock:
ret = __write_node_page(page, atomic &&
page == last_page,
&submitted, wbc, true,
- FS_NODE_IO);
+ FS_NODE_IO, seq_id);
if (ret) {
unlock_page(page);
f2fs_put_page(last_page, 0);
@@ -1633,7 +1791,9 @@ next_step:
!is_cold_node(page)))
continue;
lock_node:
- if (!trylock_page(page))
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ lock_page(page);
+ else if (!trylock_page(page))
continue;
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
@@ -1665,7 +1825,7 @@ continue_unlock:
set_dentry_mark(page, 0);
ret = __write_node_page(page, false, &submitted,
- wbc, do_balance, io_type);
+ wbc, do_balance, io_type, NULL);
if (ret)
unlock_page(page);
else if (submitted)
@@ -1684,10 +1844,12 @@ continue_unlock:
}
if (step < 2) {
+ if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
+ goto out;
step++;
goto next_step;
}
-
+out:
if (nwritten)
f2fs_submit_merged_write(sbi, NODE);
@@ -1696,35 +1858,46 @@ continue_unlock:
return ret;
}
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+ unsigned int seq_id)
{
- pgoff_t index = 0;
- struct pagevec pvec;
+ struct fsync_node_entry *fn;
+ struct page *page;
+ struct list_head *head = &sbi->fsync_node_list;
+ unsigned long flags;
+ unsigned int cur_seq_id = 0;
int ret2, ret = 0;
- int nr_pages;
- pagevec_init(&pvec);
+ while (seq_id && cur_seq_id < seq_id) {
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ break;
+ }
+ fn = list_first_entry(head, struct fsync_node_entry, list);
+ if (fn->seq_id > seq_id) {
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ break;
+ }
+ cur_seq_id = fn->seq_id;
+ page = fn->page;
+ get_page(page);
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
- while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_WRITEBACK))) {
- int i;
+ f2fs_wait_on_page_writeback(page, NODE, true);
+ if (TestClearPageError(page))
+ ret = -EIO;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
+ put_page(page);
- if (ino && ino_of_node(page) == ino) {
- f2fs_wait_on_page_writeback(page, NODE, true);
- if (TestClearPageError(page))
- ret = -EIO;
- }
- }
- pagevec_release(&pvec);
- cond_resched();
+ if (ret)
+ break;
}
ret2 = filemap_check_errors(NODE_MAPPING(sbi));
if (!ret)
ret = ret2;
+
return ret;
}
@@ -1774,6 +1947,10 @@ static int f2fs_set_node_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (IS_INODE(page))
+ f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
@@ -1968,7 +2145,7 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
kmem_cache_free(free_nid_slab, i);
}
-static void scan_nat_page(struct f2fs_sb_info *sbi,
+static int scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1986,7 +2163,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
- f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
+
+ if (blk_addr == NEW_ADDR)
+ return -EINVAL;
+
if (blk_addr == NULL_ADDR) {
add_free_nid(sbi, start_nid, true, true);
} else {
@@ -1995,6 +2175,8 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
spin_unlock(&NM_I(sbi)->nid_list_lock);
}
}
+
+ return 0;
}
static void scan_curseg_cache(struct f2fs_sb_info *sbi)
@@ -2050,11 +2232,11 @@ out:
up_read(&nm_i->nat_tree_lock);
}
-static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- int i = 0;
+ int i = 0, ret;
nid_t nid = nm_i->next_scan_nid;
if (unlikely(nid >= nm_i->max_nid))
@@ -2062,17 +2244,17 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
/* Enough entries */
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
- return;
+ return 0;
if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
- return;
+ return 0;
if (!mount) {
/* try to find free nids in free_nid_bitmap */
scan_free_nid_bits(sbi);
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
- return;
+ return 0;
}
/* readahead nat pages to be scanned */
@@ -2086,8 +2268,16 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
nm_i->nat_block_bitmap)) {
struct page *page = get_current_nat_page(sbi, nid);
- scan_nat_page(sbi, page, nid);
+ ret = scan_nat_page(sbi, page, nid);
f2fs_put_page(page, 1);
+
+ if (ret) {
+ up_read(&nm_i->nat_tree_lock);
+ f2fs_bug_on(sbi, !mount);
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "NAT is corrupt, run fsck to fix it");
+ return -EINVAL;
+ }
}
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
@@ -2108,13 +2298,19 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
+
+ return 0;
}
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
{
+ int ret;
+
mutex_lock(&NM_I(sbi)->build_lock);
- __f2fs_build_free_nids(sbi, sync, mount);
+ ret = __f2fs_build_free_nids(sbi, sync, mount);
mutex_unlock(&NM_I(sbi)->build_lock);
+
+ return ret;
}
/*
@@ -2127,12 +2323,11 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
retry:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
f2fs_show_injection_info(FAULT_ALLOC_NID);
return false;
}
-#endif
+
spin_lock(&nm_i->nid_list_lock);
if (unlikely(nm_i->available_nids == 0)) {
@@ -2277,12 +2472,16 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
struct dnode_of_data dn;
struct node_info ni;
struct page *xpage;
+ int err;
if (!prev_xnid)
goto recover_xnid;
/* 1: invalidate the previous xattr nid */
- f2fs_get_node_info(sbi, prev_xnid, &ni);
+ err = f2fs_get_node_info(sbi, prev_xnid, &ni);
+ if (err)
+ return err;
+
f2fs_invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -2317,8 +2516,11 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
nid_t ino = ino_of_node(page);
struct node_info old_ni, new_ni;
struct page *ipage;
+ int err;
- f2fs_get_node_info(sbi, ino, &old_ni);
+ err = f2fs_get_node_info(sbi, ino, &old_ni);
+ if (err)
+ return err;
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
@@ -2372,7 +2574,7 @@ retry:
return 0;
}
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum)
{
struct f2fs_node *rn;
@@ -2394,6 +2596,9 @@ void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
for (idx = addr; idx < addr + nrpages; idx++) {
struct page *page = f2fs_get_tmp_page(sbi, idx);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
rn = F2FS_NODE(page);
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
@@ -2405,6 +2610,7 @@ void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
invalidate_mapping_pages(META_MAPPING(sbi), addr,
addr + nrpages);
}
+ return 0;
}
static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -2582,6 +2788,13 @@ void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
nid_t set_idx = 0;
LIST_HEAD(sets);
+ /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
+ if (enabled_nat_bits(sbi, cpc)) {
+ down_write(&nm_i->nat_tree_lock);
+ remove_nats_in_journal(sbi);
+ up_write(&nm_i->nat_tree_lock);
+ }
+
if (!nm_i->dirty_nat_cnt)
return;
@@ -2634,7 +2847,13 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++) {
- struct page *page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+ struct page *page;
+
+ page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+ if (IS_ERR(page)) {
+ disable_nat_bits(sbi, true);
+ return PTR_ERR(page);
+ }
memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
page_address(page), F2FS_BLKSIZE);
@@ -2718,6 +2937,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
+ spin_lock_init(&nm_i->nat_list_lock);
mutex_init(&nm_i->build_lock);
spin_lock_init(&nm_i->nid_list_lock);
@@ -2762,8 +2982,8 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
for (i = 0; i < nm_i->nat_blocks; i++) {
nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
- NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
- if (!nm_i->free_nid_bitmap)
+ f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
+ if (!nm_i->free_nid_bitmap[i])
return -ENOMEM;
}
@@ -2801,8 +3021,7 @@ int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
/* load free nid status from nat_bits table */
load_free_nid_bitmap(sbi);
- f2fs_build_free_nids(sbi, true, true);
- return 0;
+ return f2fs_build_free_nids(sbi, true, true);
}
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
@@ -2837,8 +3056,13 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
unsigned idx;
nid = nat_get_nid(natvec[found - 1]) + 1;
- for (idx = 0; idx < found; idx++)
+ for (idx = 0; idx < found; idx++) {
+ spin_lock(&nm_i->nat_list_lock);
+ list_del(&natvec[idx]->list);
+ spin_unlock(&nm_i->nat_list_lock);
+
__del_from_nat_cache(nm_i, natvec[idx]);
+ }
}
f2fs_bug_on(sbi, nm_i->nat_cnt);
@@ -2893,8 +3117,15 @@ int __init f2fs_create_node_manager_caches(void)
sizeof(struct nat_entry_set));
if (!nat_entry_set_slab)
goto destroy_free_nid;
+
+ fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
+ sizeof(struct fsync_node_entry));
+ if (!fsync_node_entry_slab)
+ goto destroy_nat_entry_set;
return 0;
+destroy_nat_entry_set:
+ kmem_cache_destroy(nat_entry_set_slab);
destroy_free_nid:
kmem_cache_destroy(free_nid_slab);
destroy_nat_entry:
@@ -2905,6 +3136,7 @@ fail:
void f2fs_destroy_node_manager_caches(void)
{
+ kmem_cache_destroy(fsync_node_entry_slab);
kmem_cache_destroy(nat_entry_set_slab);
kmem_cache_destroy(free_nid_slab);
kmem_cache_destroy(nat_entry_slab);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index b95e49e4a928..0f4db7a61254 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -135,6 +135,11 @@ static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
}
+static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
+{
+ return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
+}
+
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
@@ -444,6 +449,10 @@ static inline void set_mark(struct page *page, int mark, int type)
else
flag &= ~(0x1 << type);
rn->footer.flag = cpu_to_le32(flag);
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
}
#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 38f25f0b193a..95511ed11a22 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -241,8 +241,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
struct page *page = NULL;
block_t blkaddr;
unsigned int loop_cnt = 0;
- unsigned int free_blocks = sbi->user_block_count -
- valid_user_blocks(sbi);
+ unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
+ valid_user_blocks(sbi);
int err = 0;
/* get node pages in the current segment */
@@ -252,10 +252,14 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
while (1) {
struct fsync_inode_entry *entry;
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
page = f2fs_get_tmp_page(sbi, blkaddr);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ break;
+ }
if (!is_recoverable_dnode(page))
break;
@@ -471,7 +475,10 @@ retry_dn:
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
- f2fs_get_node_info(sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (err)
+ goto err;
+
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
@@ -507,14 +514,13 @@ retry_dn:
}
/* dest is valid block, try to recover from src to dest */
- if (f2fs_is_valid_meta_blkaddr(sbi, dest, META_POR)) {
+ if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
if (src == NULL_ADDR) {
err = f2fs_reserve_new_block(&dn);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- while (err)
+ while (err &&
+ IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
err = f2fs_reserve_new_block(&dn);
-#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
if (err)
@@ -568,12 +574,16 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
while (1) {
struct fsync_inode_entry *entry;
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
f2fs_ra_meta_pages_cond(sbi, blkaddr);
page = f2fs_get_tmp_page(sbi, blkaddr);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ break;
+ }
if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1);
@@ -628,7 +638,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
#endif
if (s_flags & SB_RDONLY) {
- f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "recover fsync data on readonly fs");
sbi->sb->s_flags &= ~SB_RDONLY;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 9efce174c51a..30779aaa9dba 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -250,7 +250,13 @@ retry:
err = -EAGAIN;
goto next;
}
- f2fs_get_node_info(sbi, dn.nid, &ni);
+
+ err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
+
if (cur->old_addr == NEW_ADDR) {
f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
@@ -439,8 +445,10 @@ int f2fs_commit_inmem_pages(struct inode *inode)
int err;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ down_write(&fi->i_gc_rwsem[WRITE]);
+
+ f2fs_lock_op(sbi);
set_inode_flag(inode, FI_ATOMIC_COMMIT);
mutex_lock(&fi->inmem_lock);
@@ -455,6 +463,8 @@ int f2fs_commit_inmem_pages(struct inode *inode)
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
f2fs_unlock_op(sbi);
+ up_write(&fi->i_gc_rwsem[WRITE]);
+
return err;
}
@@ -464,12 +474,10 @@ int f2fs_commit_inmem_pages(struct inode *inode)
*/
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
f2fs_show_injection_info(FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
-#endif
/* balance_fs_bg is able to be pending */
if (need && excess_cached_nats(sbi))
@@ -503,7 +511,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
else
f2fs_build_free_nids(sbi, false, false);
- if (!is_idle(sbi) && !excess_dirty_nats(sbi))
+ if (!is_idle(sbi) &&
+ (!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
return;
/* checkpoint is the only way to shrink partial cached entries */
@@ -511,6 +520,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
!f2fs_available_free_memory(sbi, INO_ENTRIES) ||
excess_prefree_segs(sbi) ||
excess_dirty_nats(sbi) ||
+ excess_dirty_nodes(sbi) ||
f2fs_time_over(sbi, CP_TIME)) {
if (test_opt(sbi, DATA_FLUSH)) {
struct blk_plug plug;
@@ -831,9 +841,12 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
dc->len = len;
dc->ref = 0;
dc->state = D_PREP;
+ dc->issuing = 0;
dc->error = 0;
init_completion(&dc->wait);
list_add_tail(&dc->list, pend_list);
+ spin_lock_init(&dc->lock);
+ dc->bio_ref = 0;
atomic_inc(&dcc->discard_cmd_cnt);
dcc->undiscard_blks += len;
@@ -860,7 +873,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc,
struct discard_cmd *dc)
{
if (dc->state == D_DONE)
- atomic_dec(&dcc->issing_discard);
+ atomic_sub(dc->issuing, &dcc->issing_discard);
list_del(&dc->list);
rb_erase(&dc->rb_node, &dcc->root);
@@ -875,9 +888,17 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ unsigned long flags;
trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
+ spin_lock_irqsave(&dc->lock, flags);
+ if (dc->bio_ref) {
+ spin_unlock_irqrestore(&dc->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dc->lock, flags);
+
f2fs_bug_on(sbi, dc->ref);
if (dc->error == -EOPNOTSUPP)
@@ -893,10 +914,17 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
static void f2fs_submit_discard_endio(struct bio *bio)
{
struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
+ unsigned long flags;
dc->error = blk_status_to_errno(bio->bi_status);
- dc->state = D_DONE;
- complete_all(&dc->wait);
+
+ spin_lock_irqsave(&dc->lock, flags);
+ dc->bio_ref--;
+ if (!dc->bio_ref && dc->state == D_SUBMIT) {
+ dc->state = D_DONE;
+ complete_all(&dc->wait);
+ }
+ spin_unlock_irqrestore(&dc->lock, flags);
bio_put(bio);
}
@@ -934,6 +962,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
/* common policy */
dpolicy->type = discard_type;
dpolicy->sync = true;
+ dpolicy->ordered = false;
dpolicy->granularity = granularity;
dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
@@ -945,6 +974,7 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
dpolicy->io_aware = true;
dpolicy->sync = false;
+ dpolicy->ordered = true;
if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
dpolicy->granularity = 1;
dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
@@ -962,48 +992,115 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
}
}
-
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len);
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
+static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
- struct discard_cmd *dc)
+ struct discard_cmd *dc,
+ unsigned int *issued)
{
+ struct block_device *bdev = dc->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned int max_discard_blocks =
+ SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
&(dcc->fstrim_list) : &(dcc->wait_list);
- struct bio *bio = NULL;
int flag = dpolicy->sync ? REQ_SYNC : 0;
+ block_t lstart, start, len, total_len;
+ int err = 0;
if (dc->state != D_PREP)
- return;
+ return 0;
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
- return;
+ return 0;
- trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
+ trace_f2fs_issue_discard(bdev, dc->start, dc->len);
- dc->error = __blkdev_issue_discard(dc->bdev,
- SECTOR_FROM_BLOCK(dc->start),
- SECTOR_FROM_BLOCK(dc->len),
- GFP_NOFS, 0, &bio);
- if (!dc->error) {
- /* should keep before submission to avoid D_DONE right away */
- dc->state = D_SUBMIT;
- atomic_inc(&dcc->issued_discard);
- atomic_inc(&dcc->issing_discard);
- if (bio) {
- bio->bi_private = dc;
- bio->bi_end_io = f2fs_submit_discard_endio;
- bio->bi_opf |= flag;
- submit_bio(bio);
- list_move_tail(&dc->list, wait_list);
- __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
-
- f2fs_update_iostat(sbi, FS_DISCARD, 1);
+ lstart = dc->lstart;
+ start = dc->start;
+ len = dc->len;
+ total_len = len;
+
+ dc->len = 0;
+
+ while (total_len && *issued < dpolicy->max_requests && !err) {
+ struct bio *bio = NULL;
+ unsigned long flags;
+ bool last = true;
+
+ if (len > max_discard_blocks) {
+ len = max_discard_blocks;
+ last = false;
}
- } else {
- __remove_discard_cmd(sbi, dc);
+
+ (*issued)++;
+ if (*issued == dpolicy->max_requests)
+ last = true;
+
+ dc->len += len;
+
+ if (time_to_inject(sbi, FAULT_DISCARD)) {
+ f2fs_show_injection_info(FAULT_DISCARD);
+ err = -EIO;
+ goto submit;
+ }
+ err = __blkdev_issue_discard(bdev,
+ SECTOR_FROM_BLOCK(start),
+ SECTOR_FROM_BLOCK(len),
+ GFP_NOFS, 0, &bio);
+submit:
+ if (err) {
+ spin_lock_irqsave(&dc->lock, flags);
+ if (dc->state == D_PARTIAL)
+ dc->state = D_SUBMIT;
+ spin_unlock_irqrestore(&dc->lock, flags);
+
+ break;
+ }
+
+ f2fs_bug_on(sbi, !bio);
+
+ /*
+ * should keep before submission to avoid D_DONE
+ * right away
+ */
+ spin_lock_irqsave(&dc->lock, flags);
+ if (last)
+ dc->state = D_SUBMIT;
+ else
+ dc->state = D_PARTIAL;
+ dc->bio_ref++;
+ spin_unlock_irqrestore(&dc->lock, flags);
+
+ atomic_inc(&dcc->issing_discard);
+ dc->issuing++;
+ list_move_tail(&dc->list, wait_list);
+
+ /* sanity check on discard range */
+ __check_sit_bitmap(sbi, start, start + len);
+
+ bio->bi_private = dc;
+ bio->bi_end_io = f2fs_submit_discard_endio;
+ bio->bi_opf |= flag;
+ submit_bio(bio);
+
+ atomic_inc(&dcc->issued_discard);
+
+ f2fs_update_iostat(sbi, FS_DISCARD, 1);
+
+ lstart += len;
+ start += len;
+ total_len -= len;
+ len = total_len;
}
+
+ if (!err && len)
+ __update_discard_tree_range(sbi, bdev, lstart, start, len);
+ return err;
}
static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
@@ -1084,10 +1181,11 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
struct discard_cmd *dc;
struct discard_info di = {0};
struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned int max_discard_blocks =
+ SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
block_t end = lstart + len;
- mutex_lock(&dcc->cmd_lock);
-
dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
NULL, lstart,
(struct rb_entry **)&prev_dc,
@@ -1127,7 +1225,8 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
if (prev_dc && prev_dc->state == D_PREP &&
prev_dc->bdev == bdev &&
- __is_discard_back_mergeable(&di, &prev_dc->di)) {
+ __is_discard_back_mergeable(&di, &prev_dc->di,
+ max_discard_blocks)) {
prev_dc->di.len += di.len;
dcc->undiscard_blks += di.len;
__relocate_discard_cmd(dcc, prev_dc);
@@ -1138,7 +1237,8 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
if (next_dc && next_dc->state == D_PREP &&
next_dc->bdev == bdev &&
- __is_discard_front_mergeable(&di, &next_dc->di)) {
+ __is_discard_front_mergeable(&di, &next_dc->di,
+ max_discard_blocks)) {
next_dc->di.lstart = di.lstart;
next_dc->di.len += di.len;
next_dc->di.start = di.start;
@@ -1161,8 +1261,6 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
node = rb_next(&prev_dc->rb_node);
next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
}
-
- mutex_unlock(&dcc->cmd_lock);
}
static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
@@ -1177,10 +1275,72 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
blkstart -= FDEV(devi).start_blk;
}
+ mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
+ mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
return 0;
}
+static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ struct discard_cmd *dc;
+ struct blk_plug plug;
+ unsigned int pos = dcc->next_pos;
+ unsigned int issued = 0;
+ bool io_interrupted = false;
+
+ mutex_lock(&dcc->cmd_lock);
+ dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+ NULL, pos,
+ (struct rb_entry **)&prev_dc,
+ (struct rb_entry **)&next_dc,
+ &insert_p, &insert_parent, true);
+ if (!dc)
+ dc = next_dc;
+
+ blk_start_plug(&plug);
+
+ while (dc) {
+ struct rb_node *node;
+ int err = 0;
+
+ if (dc->state != D_PREP)
+ goto next;
+
+ if (dpolicy->io_aware && !is_idle(sbi)) {
+ io_interrupted = true;
+ break;
+ }
+
+ dcc->next_pos = dc->lstart + dc->len;
+ err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+ if (issued >= dpolicy->max_requests)
+ break;
+next:
+ node = rb_next(&dc->rb_node);
+ if (err)
+ __remove_discard_cmd(sbi, dc);
+ dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+ }
+
+ blk_finish_plug(&plug);
+
+ if (!dc)
+ dcc->next_pos = 0;
+
+ mutex_unlock(&dcc->cmd_lock);
+
+ if (!issued && io_interrupted)
+ issued = -1;
+
+ return issued;
+}
+
static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
{
@@ -1188,19 +1348,24 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
- int i, iter = 0, issued = 0;
+ int i, issued = 0;
bool io_interrupted = false;
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
if (i + 1 < dpolicy->granularity)
break;
+
+ if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+ return __issue_discard_cmd_orderly(sbi, dpolicy);
+
pend_list = &dcc->pend_list[i];
mutex_lock(&dcc->cmd_lock);
if (list_empty(pend_list))
goto next;
- f2fs_bug_on(sbi,
- !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+ if (unlikely(dcc->rbtree_check))
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+ &dcc->root));
blk_start_plug(&plug);
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
@@ -1208,20 +1373,19 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
!is_idle(sbi)) {
io_interrupted = true;
- goto skip;
+ break;
}
- __submit_discard_cmd(sbi, dpolicy, dc);
- issued++;
-skip:
- if (++iter >= dpolicy->max_requests)
+ __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+ if (issued >= dpolicy->max_requests)
break;
}
blk_finish_plug(&plug);
next:
mutex_unlock(&dcc->cmd_lock);
- if (iter >= dpolicy->max_requests)
+ if (issued >= dpolicy->max_requests || io_interrupted)
break;
}
@@ -1319,21 +1483,22 @@ next:
return trimmed;
}
-static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
{
struct discard_policy dp;
+ unsigned int discard_blks;
- if (dpolicy) {
- __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
- return;
- }
+ if (dpolicy)
+ return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
/* wait all */
__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
- __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+ discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
- __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+ discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+
+ return discard_blks;
}
/* This should be covered by global mutex, &sit_i->sentry_lock */
@@ -1386,6 +1551,8 @@ bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
/* just to make sure there is no pending discard commands */
__wait_all_discard_cmd(sbi, NULL);
+
+ f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
return dropped;
}
@@ -1643,21 +1810,30 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason & CP_DISCARD);
+ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
mutex_lock(&dirty_i->seglist_lock);
while (1) {
int i;
+
+ if (need_align && end != -1)
+ end--;
start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
if (start >= MAIN_SEGS(sbi))
break;
end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
start + 1);
- for (i = start; i < end; i++)
- clear_bit(i, prefree_map);
+ if (need_align) {
+ start = rounddown(start, sbi->segs_per_sec);
+ end = roundup(end, sbi->segs_per_sec);
+ }
- dirty_i->nr_dirty[PRE] -= end - start;
+ for (i = start; i < end; i++) {
+ if (test_and_clear_bit(i, prefree_map))
+ dirty_i->nr_dirty[PRE]--;
+ }
if (!test_opt(sbi, DISCARD))
continue;
@@ -1751,7 +1927,9 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
dcc->nr_discards = 0;
dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
dcc->undiscard_blks = 0;
+ dcc->next_pos = 0;
dcc->root = RB_ROOT;
+ dcc->rbtree_check = false;
init_waitqueue_head(&dcc->discard_wait_queue);
SM_I(sbi)->dcc_info = dcc;
@@ -1901,6 +2079,8 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
if (addr == NEW_ADDR)
return;
+ invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
@@ -1919,7 +2099,7 @@ bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
struct seg_entry *se;
bool is_cp = false;
- if (!is_valid_blkaddr(blkaddr))
+ if (!is_valid_data_blkaddr(sbi, blkaddr))
return true;
down_read(&sit_i->sentry_lock);
@@ -1983,7 +2163,7 @@ int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
*/
struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
{
- return f2fs_get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
+ return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
}
void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
@@ -2366,7 +2546,7 @@ bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
return has_candidate;
}
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
unsigned int start, unsigned int end)
{
@@ -2376,12 +2556,15 @@ static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
struct discard_cmd *dc;
struct blk_plug plug;
int issued;
+ unsigned int trimmed = 0;
next:
issued = 0;
mutex_lock(&dcc->cmd_lock);
- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+ if (unlikely(dcc->rbtree_check))
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+ &dcc->root));
dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
NULL, start,
@@ -2395,6 +2578,7 @@ next:
while (dc && dc->lstart <= end) {
struct rb_node *node;
+ int err = 0;
if (dc->len < dpolicy->granularity)
goto skip;
@@ -2404,19 +2588,24 @@ next:
goto skip;
}
- __submit_discard_cmd(sbi, dpolicy, dc);
+ err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
- if (++issued >= dpolicy->max_requests) {
+ if (issued >= dpolicy->max_requests) {
start = dc->lstart + dc->len;
+ if (err)
+ __remove_discard_cmd(sbi, dc);
+
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
- __wait_all_discard_cmd(sbi, NULL);
+ trimmed += __wait_all_discard_cmd(sbi, NULL);
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto next;
}
skip:
node = rb_next(&dc->rb_node);
+ if (err)
+ __remove_discard_cmd(sbi, dc);
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
if (fatal_signal_pending(current))
@@ -2425,6 +2614,8 @@ skip:
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
+
+ return trimmed;
}
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
@@ -2437,12 +2628,13 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
struct discard_policy dpolicy;
unsigned long long trimmed = 0;
int err = 0;
+ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
- if (end <= MAIN_BLKADDR(sbi))
- return -EINVAL;
+ if (end < MAIN_BLKADDR(sbi))
+ goto out;
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
f2fs_msg(sbi->sb, KERN_WARNING,
@@ -2454,6 +2646,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
+ if (need_align) {
+ start_segno = rounddown(start_segno, sbi->segs_per_sec);
+ end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
+ }
cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
@@ -2469,24 +2665,27 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (err)
goto out;
- start_block = START_BLOCK(sbi, start_segno);
- end_block = START_BLOCK(sbi, end_segno + 1);
-
- __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
- __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
-
/*
* We filed discard candidates, but actually we don't need to wait for
* all of them, since they'll be issued in idle time along with runtime
* discard option. User configuration looks like using runtime discard
* or periodic fstrim instead of it.
*/
- if (!test_opt(sbi, DISCARD)) {
- trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
+ if (test_opt(sbi, DISCARD))
+ goto out;
+
+ start_block = START_BLOCK(sbi, start_segno);
+ end_block = START_BLOCK(sbi, end_segno + 1);
+
+ __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+ trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
+ start_block, end_block);
+
+ trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
start_block, end_block);
- range->len = F2FS_BLK_TO_BYTES(trimmed);
- }
out:
+ if (!err)
+ range->len = F2FS_BLK_TO_BYTES(trimmed);
return err;
}
@@ -2639,8 +2838,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
return CURSEG_COLD_DATA;
if (file_is_hot(inode) ||
is_inode_flag_set(inode, FI_HOT_DATA) ||
- is_inode_flag_set(inode, FI_ATOMIC_FILE) ||
- is_inode_flag_set(inode, FI_VOLATILE_FILE))
+ f2fs_is_atomic_file(inode) ||
+ f2fs_is_volatile_file(inode))
return CURSEG_HOT_DATA;
return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
} else {
@@ -2781,6 +2980,9 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
reallocate:
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type, fio, true);
+ if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+ invalidate_mapping_pages(META_MAPPING(fio->sbi),
+ fio->old_blkaddr, fio->old_blkaddr);
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
@@ -2836,11 +3038,9 @@ void f2fs_outplace_write_data(struct dnode_of_data *dn,
{
struct f2fs_sb_info *sbi = fio->sbi;
struct f2fs_summary sum;
- struct node_info ni;
f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
- f2fs_get_node_info(sbi, dn->nid, &ni);
- set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+ set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
do_write_page(&sum, fio);
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
@@ -2937,8 +3137,11 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (!recover_curseg || recover_newaddr)
update_sit_entry(sbi, new_blkaddr, 1);
- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ old_blkaddr, old_blkaddr);
update_sit_entry(sbi, old_blkaddr, -1);
+ }
locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
@@ -2992,7 +3195,7 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
{
struct page *cpage;
- if (!is_valid_blkaddr(blkaddr))
+ if (!is_valid_data_blkaddr(sbi, blkaddr))
return;
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
@@ -3002,7 +3205,7 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
}
}
-static void read_compacted_summaries(struct f2fs_sb_info *sbi)
+static int read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *seg_i;
@@ -3014,6 +3217,8 @@ static void read_compacted_summaries(struct f2fs_sb_info *sbi)
start = start_sum_block(sbi);
page = f2fs_get_meta_page(sbi, start++);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
kaddr = (unsigned char *)page_address(page);
/* Step 1: restore nat cache */
@@ -3054,11 +3259,14 @@ static void read_compacted_summaries(struct f2fs_sb_info *sbi)
page = NULL;
page = f2fs_get_meta_page(sbi, start++);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
kaddr = (unsigned char *)page_address(page);
offset = 0;
}
}
f2fs_put_page(page, 1);
+ return 0;
}
static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
@@ -3070,6 +3278,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
unsigned short blk_off;
unsigned int segno = 0;
block_t blk_addr = 0;
+ int err = 0;
/* get segment number and block addr */
if (IS_DATASEG(type)) {
@@ -3093,6 +3302,8 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
}
new = f2fs_get_meta_page(sbi, blk_addr);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
sum = (struct f2fs_summary_block *)page_address(new);
if (IS_NODESEG(type)) {
@@ -3104,7 +3315,9 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
ns->ofs_in_node = 0;
}
} else {
- f2fs_restore_node_summary(sbi, segno, sum);
+ err = f2fs_restore_node_summary(sbi, segno, sum);
+ if (err)
+ goto out;
}
}
@@ -3124,8 +3337,9 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
curseg->alloc_type = ckpt->alloc_type[type];
curseg->next_blkoff = blk_off;
mutex_unlock(&curseg->curseg_mutex);
+out:
f2fs_put_page(new, 1);
- return 0;
+ return err;
}
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
@@ -3143,7 +3357,9 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
META_CP, true);
/* restore for compacted data summary */
- read_compacted_summaries(sbi);
+ err = read_compacted_summaries(sbi);
+ if (err)
+ return err;
type = CURSEG_HOT_NODE;
}
@@ -3274,7 +3490,7 @@ int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
unsigned int segno)
{
- return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+ return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
}
static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
@@ -3923,6 +4139,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
+ sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->min_ssr_sections = reserved_sections(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index f18fc82fbe99..b3d9e317ff0c 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -85,7 +85,7 @@
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
#define GET_SEGNO(sbi, blk_addr) \
- ((!is_valid_blkaddr(blk_addr)) ? \
+ ((!is_valid_data_blkaddr(sbi, blk_addr)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
#define BLKS_PER_SEC(sbi) \
@@ -215,7 +215,7 @@ struct segment_allocation {
#define IS_DUMMY_WRITTEN_PAGE(page) \
(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
-#define MAX_SKIP_ATOMIC_COUNT 16
+#define MAX_SKIP_GC_COUNT 16
struct inmem_pages {
struct list_head list;
@@ -448,6 +448,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
if (test_and_clear_bit(segno, free_i->free_segmap)) {
free_i->free_segments++;
+ if (IS_CURSEC(sbi, secno))
+ goto skip_free;
next = find_next_bit(free_i->free_segmap,
start_segno + sbi->segs_per_sec, start_segno);
if (next >= start_segno + sbi->segs_per_sec) {
@@ -455,6 +457,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
free_i->free_sections++;
}
}
+skip_free:
spin_unlock(&free_i->segmap_lock);
}
@@ -645,13 +648,10 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
{
struct f2fs_sb_info *sbi = fio->sbi;
- if (PAGE_TYPE_OF_BIO(fio->type) == META &&
- (!is_read_io(fio->op) || fio->is_meta))
- BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
- blk_addr >= MAIN_BLKADDR(sbi));
+ if (__is_meta_io(fio))
+ verify_blkaddr(sbi, blk_addr, META_GENERIC);
else
- BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
- blk_addr >= MAX_BLKADDR(sbi));
+ verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
}
/*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 3995e926ba3a..896b885f504e 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -41,7 +41,7 @@ static struct kmem_cache *f2fs_inode_cachep;
#ifdef CONFIG_F2FS_FAULT_INJECTION
-char *fault_name[FAULT_MAX] = {
+char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
[FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
@@ -55,20 +55,24 @@ char *fault_name[FAULT_MAX] = {
[FAULT_TRUNCATE] = "truncate fail",
[FAULT_IO] = "IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
+ [FAULT_DISCARD] = "discard error",
};
-static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
- unsigned int rate)
+void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+ unsigned int type)
{
struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
if (rate) {
atomic_set(&ffi->inject_ops, 0);
ffi->inject_rate = rate;
- ffi->inject_type = (1 << FAULT_MAX) - 1;
- } else {
- memset(ffi, 0, sizeof(struct f2fs_fault_info));
}
+
+ if (type)
+ ffi->inject_type = type;
+
+ if (!rate && !type)
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
}
#endif
@@ -113,6 +117,7 @@ enum {
Opt_mode,
Opt_io_size_bits,
Opt_fault_injection,
+ Opt_fault_type,
Opt_lazytime,
Opt_nolazytime,
Opt_quota,
@@ -170,6 +175,7 @@ static match_table_t f2fs_tokens = {
{Opt_mode, "mode=%s"},
{Opt_io_size_bits, "io_bits=%u"},
{Opt_fault_injection, "fault_injection=%u"},
+ {Opt_fault_type, "fault_type=%u"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
{Opt_quota, "quota"},
@@ -347,12 +353,6 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
"QUOTA feature is enabled, so ignore jquota_fmt");
F2FS_OPTION(sbi).s_jquota_fmt = 0;
}
- if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
- f2fs_msg(sbi->sb, KERN_INFO,
- "Filesystem with quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
- return -1;
- }
return 0;
}
#endif
@@ -606,7 +606,18 @@ static int parse_options(struct super_block *sb, char *options)
if (args->from && match_int(args, &arg))
return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_build_fault_attr(sbi, arg);
+ f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
+ set_opt(sbi, FAULT_INJECTION);
+#else
+ f2fs_msg(sb, KERN_INFO,
+ "FAULT_INJECTION was not selected");
+#endif
+ break;
+ case Opt_fault_type:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, 0, arg);
set_opt(sbi, FAULT_INJECTION);
#else
f2fs_msg(sb, KERN_INFO,
@@ -775,6 +786,19 @@ static int parse_options(struct super_block *sb, char *options)
#ifdef CONFIG_QUOTA
if (f2fs_check_quota_options(sbi))
return -EINVAL;
+#else
+ if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Filesystem with quota feature cannot be mounted RDWR "
+ "without CONFIG_QUOTA");
+ return -EINVAL;
+ }
+ if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Filesystem with project quota feature cannot be "
+ "mounted RDWR without CONFIG_QUOTA");
+ return -EINVAL;
+ }
#endif
if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
@@ -1030,6 +1054,10 @@ static void f2fs_put_super(struct super_block *sb)
/* our cp_error case, we can wait for any writeback page */
f2fs_flush_merged_writes(sbi);
+ f2fs_wait_on_all_pages_writeback(sbi);
+
+ f2fs_bug_on(sbi, sbi->fsync_node_num);
+
iput(sbi->node_inode);
iput(sbi->meta_inode);
@@ -1118,7 +1146,7 @@ static int f2fs_statfs_project(struct super_block *sb,
dquot = dqget(sb, qid);
if (IS_ERR(dquot))
return PTR_ERR(dquot);
- spin_lock(&dq_data_lock);
+ spin_lock(&dquot->dq_dqb_lock);
limit = (dquot->dq_dqb.dqb_bsoftlimit ?
dquot->dq_dqb.dqb_bsoftlimit :
@@ -1141,7 +1169,7 @@ static int f2fs_statfs_project(struct super_block *sb,
(buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
}
- spin_unlock(&dq_data_lock);
+ spin_unlock(&dquot->dq_dqb_lock);
dqput(dquot);
return 0;
}
@@ -1310,9 +1338,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
if (F2FS_IO_SIZE_BITS(sbi))
seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (test_opt(sbi, FAULT_INJECTION))
+ if (test_opt(sbi, FAULT_INJECTION)) {
seq_printf(seq, ",fault_injection=%u",
F2FS_OPTION(sbi).fault_info.inject_rate);
+ seq_printf(seq, ",fault_type=%u",
+ F2FS_OPTION(sbi).fault_info.inject_type);
+ }
#endif
#ifdef CONFIG_QUOTA
if (test_opt(sbi, QUOTA))
@@ -1343,6 +1374,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_printf(seq, ",fsync_mode=%s", "posix");
else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
seq_printf(seq, ",fsync_mode=%s", "strict");
+ else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
+ seq_printf(seq, ",fsync_mode=%s", "nobarrier");
return 0;
}
@@ -1355,7 +1388,8 @@ static void default_options(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
F2FS_OPTION(sbi).test_dummy_encryption = false;
- sbi->readdir_ra = 1;
+ F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+ F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
@@ -1365,12 +1399,12 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, NOHEAP);
sbi->sb->s_flags |= SB_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
- if (f2fs_sb_has_blkzoned(sbi->sb)) {
- set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
set_opt(sbi, DISCARD);
- } else {
+ if (f2fs_sb_has_blkzoned(sbi->sb))
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ else
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
- }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -1379,9 +1413,7 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, POSIX_ACL);
#endif
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_build_fault_attr(sbi, 0);
-#endif
+ f2fs_build_fault_attr(sbi, 0, 0);
}
#ifdef CONFIG_QUOTA
@@ -2229,9 +2261,9 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return 1;
}
- if (secs_per_zone > total_sections) {
+ if (secs_per_zone > total_sections || !secs_per_zone) {
f2fs_msg(sb, KERN_INFO,
- "Wrong secs_per_zone (%u > %u)",
+ "Wrong secs_per_zone / total_sections (%u, %u)",
secs_per_zone, total_sections);
return 1;
}
@@ -2282,12 +2314,20 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned int ovp_segments, reserved_segments;
unsigned int main_segs, blocks_per_seg;
+ unsigned int sit_segs, nat_segs;
+ unsigned int sit_bitmap_size, nat_bitmap_size;
+ unsigned int log_blocks_per_seg;
+ unsigned int segment_count_main;
+ unsigned int cp_pack_start_sum, cp_payload;
+ block_t user_block_count;
int i;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
- fsmeta += le32_to_cpu(raw_super->segment_count_sit);
- fsmeta += le32_to_cpu(raw_super->segment_count_nat);
+ sit_segs = le32_to_cpu(raw_super->segment_count_sit);
+ fsmeta += sit_segs;
+ nat_segs = le32_to_cpu(raw_super->segment_count_nat);
+ fsmeta += nat_segs;
fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
@@ -2304,6 +2344,16 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
return 1;
}
+ user_block_count = le64_to_cpu(ckpt->user_block_count);
+ segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ if (!user_block_count || user_block_count >=
+ segment_count_main << log_blocks_per_seg) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong user_block_count: %u", user_block_count);
+ return 1;
+ }
+
main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg;
@@ -2318,6 +2368,28 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
return 1;
}
+ sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
+ nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
+
+ if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
+ nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong bitmap size: sit: %u, nat:%u",
+ sit_bitmap_size, nat_bitmap_size);
+ return 1;
+ }
+
+ cp_pack_start_sum = __start_sum_addr(sbi);
+ cp_payload = __cp_payload(sbi);
+ if (cp_pack_start_sum < cp_payload + 1 ||
+ cp_pack_start_sum > blocks_per_seg - 1 -
+ NR_CURSEG_TYPE) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong cp_pack_start_sum: %u",
+ cp_pack_start_sum);
+ return 1;
+ }
+
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
@@ -2651,6 +2723,8 @@ static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
sm_i->dcc_info->discard_granularity = 1;
sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
}
+
+ sbi->readdir_ra = 1;
}
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -2700,9 +2774,6 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
- F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
- F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
-
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sb))
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
@@ -2771,6 +2842,7 @@ try_onemore:
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex);
+ mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
init_rwsem(&sbi->node_change);
@@ -2865,6 +2937,8 @@ try_onemore:
f2fs_init_ino_entry_info(sbi);
+ f2fs_init_fsync_node_info(sbi);
+
/* setup f2fs internal modules */
err = f2fs_build_segment_manager(sbi);
if (err) {
@@ -2882,7 +2956,8 @@ try_onemore:
/* For write statistics */
if (sb->s_bdev->bd_part)
sbi->sectors_written_start =
- (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+ (u64)part_stat_read(sb->s_bdev->bd_part,
+ sectors[STAT_WRITE]);
/* Read accumulated write IO statistics if exists */
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
@@ -2911,10 +2986,11 @@ try_onemore:
err = PTR_ERR(root);
goto free_stats;
}
- if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+ if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
+ !root->i_size || !root->i_nlink) {
iput(root);
err = -EINVAL;
- goto free_node_inode;
+ goto free_stats;
}
sb->s_root = d_make_root(root); /* allocate root dentry */
@@ -2928,10 +3004,7 @@ try_onemore:
goto free_root_inode;
#ifdef CONFIG_QUOTA
- /*
- * Turn on quotas which were not enabled for read-only mounts if
- * filesystem has quota feature, so that they are updated correctly.
- */
+ /* Enable quota usage during mount */
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
err = f2fs_enable_quotas(sb);
if (err) {
@@ -3089,9 +3162,19 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
static void kill_f2fs_super(struct super_block *sb)
{
if (sb->s_root) {
- set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
- f2fs_stop_gc_thread(F2FS_SB(sb));
- f2fs_stop_discard_thread(F2FS_SB(sb));
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ set_sbi_flag(sbi, SBI_IS_CLOSE);
+ f2fs_stop_gc_thread(sbi);
+ f2fs_stop_discard_thread(sbi);
+
+ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ struct cp_control cpc = {
+ .reason = CP_UMOUNT,
+ };
+ f2fs_write_checkpoint(sbi, &cpc);
+ }
}
kill_block_super(sb);
}
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 2e7e611deaef..81c0e5337443 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -9,6 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/compiler.h>
#include <linux/proc_fs.h>
#include <linux/f2fs_fs.h>
#include <linux/seq_file.h>
@@ -252,6 +253,7 @@ out:
if (t >= 1) {
sbi->gc_mode = GC_URGENT;
if (sbi->gc_thread) {
+ sbi->gc_thread->gc_wake = 1;
wake_up_interruptible_all(
&sbi->gc_thread->gc_wait_queue_head);
wake_up_discard_thread(sbi, true);
@@ -286,8 +288,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
a->struct_type == GC_THREAD);
- if (gc_entry)
- down_read(&sbi->sb->s_umount);
+ if (gc_entry) {
+ if (!down_read_trylock(&sbi->sb->s_umount))
+ return -EAGAIN;
+ }
ret = __sbi_store(a, sbi, buf, count);
if (gc_entry)
up_read(&sbi->sb->s_umount);
@@ -393,6 +397,7 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_seq_blocks, min_seq_blocks);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ssr_sections, min_ssr_sections);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
@@ -445,6 +450,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(ipu_policy),
ATTR_LIST(min_ipu_util),
ATTR_LIST(min_fsync_blocks),
+ ATTR_LIST(min_seq_blocks),
ATTR_LIST(min_hot_blocks),
ATTR_LIST(min_ssr_sections),
ATTR_LIST(max_victim_search),
@@ -516,7 +522,8 @@ static struct kobject f2fs_feat = {
.kset = &f2fs_kset,
};
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -543,7 +550,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
return 0;
}
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -567,7 +575,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
return 0;
}
-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -609,6 +618,28 @@ static int iostat_info_seq_show(struct seq_file *seq, void *offset)
return 0;
}
+static int __maybe_unused victim_bits_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ int i;
+
+ seq_puts(seq, "format: victim_secmap bitmaps\n");
+
+ for (i = 0; i < MAIN_SECS(sbi); i++) {
+ if ((i % 10) == 0)
+ seq_printf(seq, "%-10d", i);
+ seq_printf(seq, "%d", test_bit(i, dirty_i->victim_secmap) ? 1 : 0);
+ if ((i % 10) == 9 || i == (MAIN_SECS(sbi) - 1))
+ seq_putc(seq, '\n');
+ else
+ seq_putc(seq, ' ');
+ }
+ return 0;
+}
+
int __init f2fs_init_sysfs(void)
{
int ret;
@@ -658,6 +689,8 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
segment_bits_seq_show, sb);
proc_create_single_data("iostat_info", S_IRUGO, sbi->s_proc,
iostat_info_seq_show, sb);
+ proc_create_single_data("victim_bits", S_IRUGO, sbi->s_proc,
+ victim_bits_seq_show, sb);
}
return 0;
}
@@ -668,6 +701,7 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
remove_proc_entry("iostat_info", sbi->s_proc);
remove_proc_entry("segment_info", sbi->s_proc);
remove_proc_entry("segment_bits", sbi->s_proc);
+ remove_proc_entry("victim_bits", sbi->s_proc);
remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
}
kobject_del(&sbi->s_kobj);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 708271871f94..77a010e625f5 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -37,9 +37,6 @@ static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
return -EOPNOTSUPP;
break;
case F2FS_XATTR_INDEX_TRUSTED:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
case F2FS_XATTR_INDEX_SECURITY:
break;
default:
@@ -62,9 +59,6 @@ static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
return -EOPNOTSUPP;
break;
case F2FS_XATTR_INDEX_TRUSTED:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
case F2FS_XATTR_INDEX_SECURITY:
break;
default:
@@ -100,12 +94,22 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
const char *name, const void *value,
size_t size, int flags)
{
+ unsigned char old_advise = F2FS_I(inode)->i_advise;
+ unsigned char new_advise;
+
if (!inode_owner_or_capable(inode))
return -EPERM;
if (value == NULL)
return -EINVAL;
- F2FS_I(inode)->i_advise |= *(char *)value;
+ new_advise = *(char *)value;
+ if (new_advise & ~FADVISE_MODIFIABLE_BITS)
+ return -EINVAL;
+
+ new_advise = new_advise & FADVISE_MODIFIABLE_BITS;
+ new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS;
+
+ F2FS_I(inode)->i_advise = new_advise;
f2fs_mark_inode_dirty_sync(inode, true);
return 0;
}
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index e9bed49df6b7..78d501c1fb65 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -225,7 +225,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
{
struct super_block *sb = inode->i_sb;
- const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ const int limit = sb->s_maxbytes >> sbi->cluster_bits;
struct fat_entry fatent;
struct fat_cache_id cid;
int nr;
@@ -234,6 +235,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
*fclus = 0;
*dclus = MSDOS_I(inode)->i_start;
+ if (!fat_valid_entry(sbi, *dclus)) {
+ fat_fs_error_ratelimit(sb,
+ "%s: invalid start cluster (i_pos %lld, start %08x)",
+ __func__, MSDOS_I(inode)->i_pos, *dclus);
+ return -EIO;
+ }
if (cluster == 0)
return 0;
@@ -250,9 +257,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
/* prevent the infinite loop of cluster chain */
if (*fclus > limit) {
fat_fs_error_ratelimit(sb,
- "%s: detected the cluster chain loop"
- " (i_pos %lld)", __func__,
- MSDOS_I(inode)->i_pos);
+ "%s: detected the cluster chain loop (i_pos %lld)",
+ __func__, MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
}
@@ -262,9 +268,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
goto out;
else if (nr == FAT_ENT_FREE) {
fat_fs_error_ratelimit(sb,
- "%s: invalid cluster chain (i_pos %lld)",
- __func__,
- MSDOS_I(inode)->i_pos);
+ "%s: invalid cluster chain (i_pos %lld)",
+ __func__, MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
} else if (nr == FAT_ENT_EOF) {
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 8e100c3bf72c..7f5f3699fc6c 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -1130,7 +1130,7 @@ error:
return err;
}
-int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
+int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
{
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 8fc1093da47d..9d7d2d5da28b 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -304,7 +304,7 @@ extern int fat_scan_logstart(struct inode *dir, int i_logstart,
struct fat_slot_info *sinfo);
extern int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
struct msdos_dir_entry **de);
-extern int fat_alloc_new_dir(struct inode *dir, struct timespec *ts);
+extern int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts);
extern int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
struct fat_slot_info *sinfo);
extern int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo);
@@ -348,6 +348,11 @@ static inline void fatent_brelse(struct fat_entry *fatent)
fatent->fat_inode = NULL;
}
+static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
+{
+ return FAT_START_ENT <= entry && entry < sbi->max_cluster;
+}
+
extern void fat_ent_access_init(struct super_block *sb);
extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
int entry);
@@ -357,6 +362,7 @@ extern int fat_alloc_clusters(struct inode *inode, int *cluster,
int nr_cluster);
extern int fat_free_clusters(struct inode *inode, int cluster);
extern int fat_count_free_clusters(struct super_block *sb);
+extern int fat_trim_fs(struct inode *inode, struct fstrim_range *range);
/* fat/file.c */
extern long fat_generic_ioctl(struct file *filp, unsigned int cmd,
@@ -406,9 +412,9 @@ void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
} while (0)
extern int fat_clusters_flush(struct super_block *sb);
extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
-extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
+extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs);
-extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
+extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs);
extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index bac10de678cc..defc2168de91 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -4,6 +4,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/sched/signal.h>
#include "fat.h"
struct fatent_operations {
@@ -23,7 +24,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry,
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = entry + (entry >> 1);
- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
+ WARN_ON(!fat_valid_entry(sbi, entry));
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
@@ -33,7 +34,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry,
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = (entry << sbi->fatent_shift);
- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
+ WARN_ON(!fat_valid_entry(sbi, entry));
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
@@ -353,7 +354,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
int err, offset;
sector_t blocknr;
- if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
+ if (!fat_valid_entry(sbi, entry)) {
fatent_brelse(fatent);
fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
return -EIO;
@@ -690,3 +691,104 @@ out:
unlock_fat(sbi);
return err;
}
+
+static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus),
+ nr_clus * sbi->sec_per_clus, GFP_NOFS, 0);
+}
+
+int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
+{
+ struct super_block *sb = inode->i_sb;
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ const struct fatent_operations *ops = sbi->fatent_ops;
+ struct fat_entry fatent;
+ u64 ent_start, ent_end, minlen, trimmed = 0;
+ u32 free = 0;
+ unsigned long reada_blocks, reada_mask, cur_block = 0;
+ int err = 0;
+
+ /*
+ * FAT data is organized as clusters, trim at the granulary of cluster.
+ *
+ * fstrim_range is in byte, convert vaules to cluster index.
+ * Treat sectors before data region as all used, not to trim them.
+ */
+ ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
+ ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
+ minlen = range->minlen >> sbi->cluster_bits;
+
+ if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
+ return -EINVAL;
+ if (ent_end >= sbi->max_cluster)
+ ent_end = sbi->max_cluster - 1;
+
+ reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
+ reada_mask = reada_blocks - 1;
+
+ fatent_init(&fatent);
+ lock_fat(sbi);
+ fatent_set_entry(&fatent, ent_start);
+ while (fatent.entry <= ent_end) {
+ /* readahead of fat blocks */
+ if ((cur_block & reada_mask) == 0) {
+ unsigned long rest = sbi->fat_length - cur_block;
+ fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
+ }
+ cur_block++;
+
+ err = fat_ent_read_block(sb, &fatent);
+ if (err)
+ goto error;
+ do {
+ if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
+ free++;
+ } else if (free) {
+ if (free >= minlen) {
+ u32 clus = fatent.entry - free;
+
+ err = fat_trim_clusters(sb, clus, free);
+ if (err && err != -EOPNOTSUPP)
+ goto error;
+ if (!err)
+ trimmed += free;
+ err = 0;
+ }
+ free = 0;
+ }
+ } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end);
+
+ if (fatal_signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto error;
+ }
+
+ if (need_resched()) {
+ fatent_brelse(&fatent);
+ unlock_fat(sbi);
+ cond_resched();
+ lock_fat(sbi);
+ }
+ }
+ /* handle scenario when tail entries are all free */
+ if (free && free >= minlen) {
+ u32 clus = fatent.entry - free;
+
+ err = fat_trim_clusters(sb, clus, free);
+ if (err && err != -EOPNOTSUPP)
+ goto error;
+ if (!err)
+ trimmed += free;
+ err = 0;
+ }
+
+error:
+ fatent_brelse(&fatent);
+ unlock_fat(sbi);
+
+ range->len = trimmed << sbi->cluster_bits;
+
+ return err;
+}
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 4724cc9ad650..4f3d72fb1e60 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -121,6 +121,37 @@ static int fat_ioctl_get_volume_id(struct inode *inode, u32 __user *user_attr)
return put_user(sbi->vol_id, user_attr);
}
+static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg)
+{
+ struct super_block *sb = inode->i_sb;
+ struct fstrim_range __user *user_range;
+ struct fstrim_range range;
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
+ user_range = (struct fstrim_range __user *)arg;
+ if (copy_from_user(&range, user_range, sizeof(range)))
+ return -EFAULT;
+
+ range.minlen = max_t(unsigned int, range.minlen,
+ q->limits.discard_granularity);
+
+ err = fat_trim_fs(inode, &range);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(user_range, &range, sizeof(range)))
+ return -EFAULT;
+
+ return 0;
+}
+
long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -133,6 +164,8 @@ long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return fat_ioctl_set_attributes(filp, user_attr);
case FAT_IOCTL_GET_VOLUME_ID:
return fat_ioctl_get_volume_id(inode, user_attr);
+ case FITRIM:
+ return fat_ioctl_fitrim(inode, arg);
default:
return -ENOTTY; /* Inappropriate ioctl for device */
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index bfd589ea74c0..d6b81e31f9f5 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -508,7 +508,6 @@ static int fat_validate_dir(struct inode *dir)
/* doesn't deal with root inode */
int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
{
- struct timespec ts;
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
int error;
@@ -559,14 +558,11 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
- fat_time_fat2unix(sbi, &ts, de->time, de->date, 0);
- inode->i_mtime = timespec_to_timespec64(ts);
+ fat_time_fat2unix(sbi, &inode->i_mtime, de->time, de->date, 0);
if (sbi->options.isvfat) {
- fat_time_fat2unix(sbi, &ts, de->ctime,
+ fat_time_fat2unix(sbi, &inode->i_ctime, de->ctime,
de->cdate, de->ctime_cs);
- inode->i_ctime = timespec_to_timespec64(ts);
- fat_time_fat2unix(sbi, &ts, 0, de->adate, 0);
- inode->i_atime = timespec_to_timespec64(ts);
+ fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
} else
inode->i_ctime = inode->i_atime = inode->i_mtime;
@@ -843,7 +839,6 @@ static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
static int __fat_write_inode(struct inode *inode, int wait)
{
- struct timespec ts;
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
@@ -881,16 +876,13 @@ retry:
raw_entry->size = cpu_to_le32(inode->i_size);
raw_entry->attr = fat_make_attrs(inode);
fat_set_start(raw_entry, MSDOS_I(inode)->i_logstart);
- ts = timespec64_to_timespec(inode->i_mtime);
- fat_time_unix2fat(sbi, &ts, &raw_entry->time,
+ fat_time_unix2fat(sbi, &inode->i_mtime, &raw_entry->time,
&raw_entry->date, NULL);
if (sbi->options.isvfat) {
__le16 atime;
- ts = timespec64_to_timespec(inode->i_ctime);
- fat_time_unix2fat(sbi, &ts, &raw_entry->ctime,
+ fat_time_unix2fat(sbi, &inode->i_ctime, &raw_entry->ctime,
&raw_entry->cdate, &raw_entry->ctime_cs);
- ts = timespec64_to_timespec(inode->i_atime);
- fat_time_unix2fat(sbi, &ts, &atime,
+ fat_time_unix2fat(sbi, &inode->i_atime, &atime,
&raw_entry->adate, NULL);
}
spin_unlock(&sbi->inode_hash_lock);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index f9bdc1e01c98..573836dcaefc 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -180,17 +180,18 @@ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
#define IS_LEAP_YEAR(y) (!((y) & 3) && (y) != YEAR_2100)
/* Linear day numbers of the respective 1sts in non-leap years. */
-static time_t days_in_year[] = {
+static long days_in_year[] = {
/* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */
0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
};
/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
-void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
+void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs)
{
u16 time = le16_to_cpu(__time), date = le16_to_cpu(__date);
- time_t second, day, leap_day, month, year;
+ time64_t second;
+ long day, leap_day, month, year;
year = date >> 9;
month = max(1, (date >> 5) & 0xf);
@@ -205,7 +206,7 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
second = (time & 0x1f) << 1;
second += ((time >> 5) & 0x3f) * SECS_PER_MIN;
second += (time >> 11) * SECS_PER_HOUR;
- second += (year * 365 + leap_day
+ second += (time64_t)(year * 365 + leap_day
+ days_in_year[month] + day
+ DAYS_DELTA) * SECS_PER_DAY;
@@ -224,11 +225,11 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
}
/* Convert linear UNIX date to a FAT time/date pair. */
-void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
+void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs)
{
struct tm tm;
- time_to_tm(ts->tv_sec,
+ time64_to_tm(ts->tv_sec,
(sbi->options.tz_set ? sbi->options.time_offset :
-sys_tz.tz_minuteswest) * SECS_PER_MIN, &tm);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 16a832c37d66..efb8c40c9d27 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -225,7 +225,7 @@ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
/***** Creates a directory entry (name is already formatted). */
static int msdos_add_entry(struct inode *dir, const unsigned char *name,
int is_dir, int is_hid, int cluster,
- struct timespec *ts, struct fat_slot_info *sinfo)
+ struct timespec64 *ts, struct fat_slot_info *sinfo)
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
struct msdos_dir_entry de;
@@ -250,7 +250,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
if (err)
return err;
- dir->i_ctime = dir->i_mtime = timespec_to_timespec64(*ts);
+ dir->i_ctime = dir->i_mtime = *ts;
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
@@ -267,7 +267,6 @@ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct inode *inode = NULL;
struct fat_slot_info sinfo;
struct timespec64 ts;
- struct timespec t;
unsigned char msdos_name[MSDOS_NAME];
int err, is_hid;
@@ -286,8 +285,7 @@ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
ts = current_time(dir);
- t = timespec64_to_timespec(ts);
- err = msdos_add_entry(dir, msdos_name, 0, is_hid, 0, &t, &sinfo);
+ err = msdos_add_entry(dir, msdos_name, 0, is_hid, 0, &ts, &sinfo);
if (err)
goto out;
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
@@ -347,7 +345,6 @@ static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct inode *inode;
unsigned char msdos_name[MSDOS_NAME];
struct timespec64 ts;
- struct timespec t;
int err, is_hid, cluster;
mutex_lock(&MSDOS_SB(sb)->s_lock);
@@ -365,13 +362,12 @@ static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
}
ts = current_time(dir);
- t = timespec64_to_timespec(ts);
- cluster = fat_alloc_new_dir(dir, &t);
+ cluster = fat_alloc_new_dir(dir, &ts);
if (cluster < 0) {
err = cluster;
goto out;
}
- err = msdos_add_entry(dir, msdos_name, 1, is_hid, cluster, &t, &sinfo);
+ err = msdos_add_entry(dir, msdos_name, 1, is_hid, cluster, &ts, &sinfo);
if (err)
goto out_free;
inc_nlink(dir);
@@ -503,9 +499,8 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
new_i_pos = MSDOS_I(new_inode)->i_pos;
fat_detach(new_inode);
} else {
- struct timespec t = timespec64_to_timespec(ts);
err = msdos_add_entry(new_dir, new_name, is_dir, is_hid, 0,
- &t, &sinfo);
+ &ts, &sinfo);
if (err)
goto out;
new_i_pos = sinfo.i_pos;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 9a5469120caa..82cd1e69cbdf 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -577,7 +577,7 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
static int vfat_build_slots(struct inode *dir, const unsigned char *name,
int len, int is_dir, int cluster,
- struct timespec *ts,
+ struct timespec64 *ts,
struct msdos_dir_slot *slots, int *nr_slots)
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
@@ -653,7 +653,7 @@ out_free:
}
static int vfat_add_entry(struct inode *dir, const struct qstr *qname,
- int is_dir, int cluster, struct timespec *ts,
+ int is_dir, int cluster, struct timespec64 *ts,
struct fat_slot_info *sinfo)
{
struct msdos_dir_slot *slots;
@@ -678,7 +678,7 @@ static int vfat_add_entry(struct inode *dir, const struct qstr *qname,
goto cleanup;
/* update timestamp */
- dir->i_ctime = dir->i_mtime = dir->i_atime = timespec_to_timespec64(*ts);
+ dir->i_ctime = dir->i_mtime = dir->i_atime = *ts;
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
@@ -762,14 +762,12 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct inode *inode;
struct fat_slot_info sinfo;
struct timespec64 ts;
- struct timespec t;
int err;
mutex_lock(&MSDOS_SB(sb)->s_lock);
ts = current_time(dir);
- t = timespec64_to_timespec(ts);
- err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &t, &sinfo);
+ err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo);
if (err)
goto out;
inode_inc_iversion(dir);
@@ -853,19 +851,17 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct inode *inode;
struct fat_slot_info sinfo;
struct timespec64 ts;
- struct timespec t;
int err, cluster;
mutex_lock(&MSDOS_SB(sb)->s_lock);
ts = current_time(dir);
- t = timespec64_to_timespec(ts);
- cluster = fat_alloc_new_dir(dir, &t);
+ cluster = fat_alloc_new_dir(dir, &ts);
if (cluster < 0) {
err = cluster;
goto out;
}
- err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &t, &sinfo);
+ err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo);
if (err)
goto out_free;
inode_inc_iversion(dir);
@@ -904,7 +900,6 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec64 ts;
- struct timespec t;
loff_t new_i_pos;
int err, is_dir, update_dotdot, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
@@ -939,9 +934,8 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
new_i_pos = MSDOS_I(new_inode)->i_pos;
fat_detach(new_inode);
} else {
- t = timespec64_to_timespec(ts);
err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0,
- &t, &sinfo);
+ &ts, &sinfo);
if (err)
goto out;
new_i_pos = sinfo.i_pos;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 12273b6ea56d..4137d96534a6 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -116,7 +116,7 @@ int f_setown(struct file *filp, unsigned long arg, int force)
struct pid *pid = NULL;
int who = arg, ret = 0;
- type = PIDTYPE_PID;
+ type = PIDTYPE_TGID;
if (who < 0) {
/* avoid overflow below */
if (who == INT_MIN)
@@ -143,7 +143,7 @@ EXPORT_SYMBOL(f_setown);
void f_delown(struct file *filp)
{
- f_modown(filp, NULL, PIDTYPE_PID, 1);
+ f_modown(filp, NULL, PIDTYPE_TGID, 1);
}
pid_t f_getown(struct file *filp)
@@ -171,11 +171,11 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
switch (owner.type) {
case F_OWNER_TID:
- type = PIDTYPE_MAX;
+ type = PIDTYPE_PID;
break;
case F_OWNER_PID:
- type = PIDTYPE_PID;
+ type = PIDTYPE_TGID;
break;
case F_OWNER_PGRP:
@@ -206,11 +206,11 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
read_lock(&filp->f_owner.lock);
owner.pid = pid_vnr(filp->f_owner.pid);
switch (filp->f_owner.pid_type) {
- case PIDTYPE_MAX:
+ case PIDTYPE_PID:
owner.type = F_OWNER_TID;
break;
- case PIDTYPE_PID:
+ case PIDTYPE_TGID:
owner.type = F_OWNER_PID;
break;
@@ -723,7 +723,7 @@ static inline int sigio_perm(struct task_struct *p,
static void send_sigio_to_task(struct task_struct *p,
struct fown_struct *fown,
- int fd, int reason, int group)
+ int fd, int reason, enum pid_type type)
{
/*
* F_SETSIG can change ->signum lockless in parallel, make
@@ -767,11 +767,11 @@ static void send_sigio_to_task(struct task_struct *p,
else
si.si_band = mangle_poll(band_table[reason - POLL_IN]);
si.si_fd = fd;
- if (!do_send_sig_info(signum, &si, p, group))
+ if (!do_send_sig_info(signum, &si, p, type))
break;
/* fall-through: fall back on the old plain SIGIO signal */
case 0:
- do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
+ do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
}
}
@@ -780,34 +780,36 @@ void send_sigio(struct fown_struct *fown, int fd, int band)
struct task_struct *p;
enum pid_type type;
struct pid *pid;
- int group = 1;
read_lock(&fown->lock);
type = fown->pid_type;
- if (type == PIDTYPE_MAX) {
- group = 0;
- type = PIDTYPE_PID;
- }
-
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
-
- read_lock(&tasklist_lock);
- do_each_pid_task(pid, type, p) {
- send_sigio_to_task(p, fown, fd, band, group);
- } while_each_pid_task(pid, type, p);
- read_unlock(&tasklist_lock);
+
+ if (type <= PIDTYPE_TGID) {
+ rcu_read_lock();
+ p = pid_task(pid, PIDTYPE_PID);
+ if (p)
+ send_sigio_to_task(p, fown, fd, band, type);
+ rcu_read_unlock();
+ } else {
+ read_lock(&tasklist_lock);
+ do_each_pid_task(pid, type, p) {
+ send_sigio_to_task(p, fown, fd, band, type);
+ } while_each_pid_task(pid, type, p);
+ read_unlock(&tasklist_lock);
+ }
out_unlock_fown:
read_unlock(&fown->lock);
}
static void send_sigurg_to_task(struct task_struct *p,
- struct fown_struct *fown, int group)
+ struct fown_struct *fown, enum pid_type type)
{
if (sigio_perm(p, fown, SIGURG))
- do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
+ do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
}
int send_sigurg(struct fown_struct *fown)
@@ -815,28 +817,30 @@ int send_sigurg(struct fown_struct *fown)
struct task_struct *p;
enum pid_type type;
struct pid *pid;
- int group = 1;
int ret = 0;
read_lock(&fown->lock);
type = fown->pid_type;
- if (type == PIDTYPE_MAX) {
- group = 0;
- type = PIDTYPE_PID;
- }
-
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
ret = 1;
-
- read_lock(&tasklist_lock);
- do_each_pid_task(pid, type, p) {
- send_sigurg_to_task(p, fown, group);
- } while_each_pid_task(pid, type, p);
- read_unlock(&tasklist_lock);
+
+ if (type <= PIDTYPE_TGID) {
+ rcu_read_lock();
+ p = pid_task(pid, PIDTYPE_PID);
+ if (p)
+ send_sigurg_to_task(p, fown, type);
+ rcu_read_unlock();
+ } else {
+ read_lock(&tasklist_lock);
+ do_each_pid_task(pid, type, p) {
+ send_sigurg_to_task(p, fown, type);
+ } while_each_pid_task(pid, type, p);
+ read_unlock(&tasklist_lock);
+ }
out_unlock_fown:
read_unlock(&fown->lock);
return ret;
diff --git a/fs/file_table.c b/fs/file_table.c
index 7ec0b3e5f05d..e49af4caf15d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -51,7 +51,9 @@ static void file_free_rcu(struct rcu_head *head)
static inline void file_free(struct file *f)
{
- percpu_counter_dec(&nr_files);
+ security_file_free(f);
+ if (!(f->f_mode & FMODE_NOACCOUNT))
+ percpu_counter_dec(&nr_files);
call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
}
@@ -90,6 +92,34 @@ int proc_nr_files(struct ctl_table *table, int write,
}
#endif
+static struct file *__alloc_file(int flags, const struct cred *cred)
+{
+ struct file *f;
+ int error;
+
+ f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
+ if (unlikely(!f))
+ return ERR_PTR(-ENOMEM);
+
+ f->f_cred = get_cred(cred);
+ error = security_file_alloc(f);
+ if (unlikely(error)) {
+ file_free_rcu(&f->f_u.fu_rcuhead);
+ return ERR_PTR(error);
+ }
+
+ atomic_long_set(&f->f_count, 1);
+ rwlock_init(&f->f_owner.lock);
+ spin_lock_init(&f->f_lock);
+ mutex_init(&f->f_pos_lock);
+ eventpoll_init_file(f);
+ f->f_flags = flags;
+ f->f_mode = OPEN_FMODE(flags);
+ /* f->f_version: 0 */
+
+ return f;
+}
+
/* Find an unused file structure and return a pointer to it.
* Returns an error pointer if some error happend e.g. we over file
* structures limit, run out of memory or operation is not permitted.
@@ -100,12 +130,10 @@ int proc_nr_files(struct ctl_table *table, int write,
* done, you will imbalance int the mount's writer count
* and a warning at __fput() time.
*/
-struct file *get_empty_filp(void)
+struct file *alloc_empty_file(int flags, const struct cred *cred)
{
- const struct cred *cred = current_cred();
static long old_max;
struct file *f;
- int error;
/*
* Privileged users can go above max_files
@@ -119,24 +147,10 @@ struct file *get_empty_filp(void)
goto over;
}
- f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
- if (unlikely(!f))
- return ERR_PTR(-ENOMEM);
-
- percpu_counter_inc(&nr_files);
- f->f_cred = get_cred(cred);
- error = security_file_alloc(f);
- if (unlikely(error)) {
- file_free(f);
- return ERR_PTR(error);
- }
+ f = __alloc_file(flags, cred);
+ if (!IS_ERR(f))
+ percpu_counter_inc(&nr_files);
- atomic_long_set(&f->f_count, 1);
- rwlock_init(&f->f_owner.lock);
- spin_lock_init(&f->f_lock);
- mutex_init(&f->f_pos_lock);
- eventpoll_init_file(f);
- /* f->f_version: 0 */
return f;
over:
@@ -148,19 +162,34 @@ over:
return ERR_PTR(-ENFILE);
}
+/*
+ * Variant of alloc_empty_file() that doesn't check and modify nr_files.
+ *
+ * Should not be used unless there's a very good reason to do so.
+ */
+struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
+{
+ struct file *f = __alloc_file(flags, cred);
+
+ if (!IS_ERR(f))
+ f->f_mode |= FMODE_NOACCOUNT;
+
+ return f;
+}
+
/**
* alloc_file - allocate and initialize a 'struct file'
*
* @path: the (dentry, vfsmount) pair for the new file
- * @mode: the mode with which the new file will be opened
+ * @flags: O_... flags with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
*/
-struct file *alloc_file(const struct path *path, fmode_t mode,
+static struct file *alloc_file(const struct path *path, int flags,
const struct file_operations *fop)
{
struct file *file;
- file = get_empty_filp();
+ file = alloc_empty_file(flags, current_cred());
if (IS_ERR(file))
return file;
@@ -168,19 +197,56 @@ struct file *alloc_file(const struct path *path, fmode_t mode,
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
- if ((mode & FMODE_READ) &&
+ if ((file->f_mode & FMODE_READ) &&
likely(fop->read || fop->read_iter))
- mode |= FMODE_CAN_READ;
- if ((mode & FMODE_WRITE) &&
+ file->f_mode |= FMODE_CAN_READ;
+ if ((file->f_mode & FMODE_WRITE) &&
likely(fop->write || fop->write_iter))
- mode |= FMODE_CAN_WRITE;
- file->f_mode = mode;
+ file->f_mode |= FMODE_CAN_WRITE;
+ file->f_mode |= FMODE_OPENED;
file->f_op = fop;
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(path->dentry->d_inode);
return file;
}
-EXPORT_SYMBOL(alloc_file);
+
+struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt,
+ const char *name, int flags,
+ const struct file_operations *fops)
+{
+ static const struct dentry_operations anon_ops = {
+ .d_dname = simple_dname
+ };
+ struct qstr this = QSTR_INIT(name, strlen(name));
+ struct path path;
+ struct file *file;
+
+ path.dentry = d_alloc_pseudo(mnt->mnt_sb, &this);
+ if (!path.dentry)
+ return ERR_PTR(-ENOMEM);
+ if (!mnt->mnt_sb->s_d_op)
+ d_set_d_op(path.dentry, &anon_ops);
+ path.mnt = mntget(mnt);
+ d_instantiate(path.dentry, inode);
+ file = alloc_file(&path, flags, fops);
+ if (IS_ERR(file)) {
+ ihold(inode);
+ path_put(&path);
+ }
+ return file;
+}
+EXPORT_SYMBOL(alloc_file_pseudo);
+
+struct file *alloc_file_clone(struct file *base, int flags,
+ const struct file_operations *fops)
+{
+ struct file *f = alloc_file(&base->f_path, flags, fops);
+ if (!IS_ERR(f)) {
+ path_get(&f->f_path);
+ f->f_mapping = base->f_mapping;
+ }
+ return f;
+}
/* the real guts of fput() - releasing the last reference to file
*/
@@ -190,6 +256,9 @@ static void __fput(struct file *file)
struct vfsmount *mnt = file->f_path.mnt;
struct inode *inode = file->f_inode;
+ if (unlikely(!(file->f_mode & FMODE_OPENED)))
+ goto out;
+
might_sleep();
fsnotify_close(file);
@@ -207,7 +276,6 @@ static void __fput(struct file *file)
}
if (file->f_op->release)
file->f_op->release(inode, file);
- security_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
!(file->f_mode & FMODE_PATH))) {
cdev_put(inode->i_cdev);
@@ -220,12 +288,10 @@ static void __fput(struct file *file)
put_write_access(inode);
__mnt_drop_write(mnt);
}
- file->f_path.dentry = NULL;
- file->f_path.mnt = NULL;
- file->f_inode = NULL;
- file_free(file);
dput(dentry);
mntput(mnt);
+out:
+ file_free(file);
}
static LLIST_HEAD(delayed_fput_list);
@@ -300,14 +366,6 @@ void __fput_sync(struct file *file)
EXPORT_SYMBOL(fput);
-void put_filp(struct file *file)
-{
- if (atomic_long_dec_and_test(&file->f_count)) {
- security_file_free(file);
- file_free(file);
- }
-}
-
void __init files_init(void)
{
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c6b88fa85e2e..11ea2c4a38ab 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -127,6 +127,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
return !fc->initialized || (for_background && fc->blocked);
}
+static void fuse_drop_waiting(struct fuse_conn *fc)
+{
+ if (fc->connected) {
+ atomic_dec(&fc->num_waiting);
+ } else if (atomic_dec_and_test(&fc->num_waiting)) {
+ /* wake up aborters */
+ wake_up_all(&fc->blocked_waitq);
+ }
+}
+
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
bool for_background)
{
@@ -175,7 +185,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
return req;
out:
- atomic_dec(&fc->num_waiting);
+ fuse_drop_waiting(fc);
return ERR_PTR(err);
}
@@ -285,7 +295,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
if (test_bit(FR_WAITING, &req->flags)) {
__clear_bit(FR_WAITING, &req->flags);
- atomic_dec(&fc->num_waiting);
+ fuse_drop_waiting(fc);
}
if (req->stolen_file)
@@ -371,7 +381,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_iqueue *fiq = &fc->iq;
if (test_and_set_bit(FR_FINISHED, &req->flags))
- return;
+ goto put_request;
spin_lock(&fiq->waitq.lock);
list_del_init(&req->intr_entry);
@@ -400,6 +410,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&req->waitq);
if (req->end)
req->end(fc, req);
+put_request:
fuse_put_request(fc, req);
}
@@ -1362,8 +1373,8 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
if (!fud)
return -EPERM;
- bufs = kmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
- GFP_KERNEL);
+ bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
+ GFP_KERNEL);
if (!bufs)
return -ENOMEM;
@@ -1396,7 +1407,7 @@ out:
for (; page_nr < cs.nr_segs; page_nr++)
put_page(bufs[page_nr].page);
- kfree(bufs);
+ kvfree(bufs);
return ret;
}
@@ -1944,12 +1955,15 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
if (!fud)
return -EPERM;
- bufs = kmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
- GFP_KERNEL);
- if (!bufs)
+ pipe_lock(pipe);
+
+ bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
+ GFP_KERNEL);
+ if (!bufs) {
+ pipe_unlock(pipe);
return -ENOMEM;
+ }
- pipe_lock(pipe);
nbuf = 0;
rem = 0;
for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
@@ -2003,7 +2017,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
pipe_buf_release(pipe, &bufs[idx]);
out:
- kfree(bufs);
+ kvfree(bufs);
return ret;
}
@@ -2087,8 +2101,7 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
if (fc->connected) {
struct fuse_dev *fud;
struct fuse_req *req, *next;
- LIST_HEAD(to_end1);
- LIST_HEAD(to_end2);
+ LIST_HEAD(to_end);
fc->connected = 0;
fc->blocked = 0;
@@ -2105,11 +2118,12 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
set_bit(FR_ABORTED, &req->flags);
if (!test_bit(FR_LOCKED, &req->flags)) {
set_bit(FR_PRIVATE, &req->flags);
- list_move(&req->list, &to_end1);
+ __fuse_get_request(req);
+ list_move(&req->list, &to_end);
}
spin_unlock(&req->waitq.lock);
}
- list_splice_init(&fpq->processing, &to_end2);
+ list_splice_tail_init(&fpq->processing, &to_end);
spin_unlock(&fpq->lock);
}
fc->max_background = UINT_MAX;
@@ -2117,9 +2131,9 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
- list_splice_init(&fiq->pending, &to_end2);
- list_for_each_entry(req, &to_end2, list)
+ list_for_each_entry(req, &fiq->pending, list)
clear_bit(FR_PENDING, &req->flags);
+ list_splice_tail_init(&fiq->pending, &to_end);
while (forget_pending(fiq))
kfree(dequeue_forget(fiq, 1, NULL));
wake_up_all_locked(&fiq->waitq);
@@ -2129,19 +2143,18 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
- while (!list_empty(&to_end1)) {
- req = list_first_entry(&to_end1, struct fuse_req, list);
- __fuse_get_request(req);
- list_del_init(&req->list);
- request_end(fc, req);
- }
- end_requests(fc, &to_end2);
+ end_requests(fc, &to_end);
} else {
spin_unlock(&fc->lock);
}
}
EXPORT_SYMBOL_GPL(fuse_abort_conn);
+void fuse_wait_aborted(struct fuse_conn *fc)
+{
+ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
+}
+
int fuse_dev_release(struct inode *inode, struct file *file)
{
struct fuse_dev *fud = fuse_get_dev(file);
@@ -2149,9 +2162,15 @@ int fuse_dev_release(struct inode *inode, struct file *file)
if (fud) {
struct fuse_conn *fc = fud->fc;
struct fuse_pqueue *fpq = &fud->pq;
+ LIST_HEAD(to_end);
+ spin_lock(&fpq->lock);
WARN_ON(!list_empty(&fpq->io));
- end_requests(fc, &fpq->processing);
+ list_splice_init(&fpq->processing, &to_end);
+ spin_unlock(&fpq->lock);
+
+ end_requests(fc, &to_end);
+
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
WARN_ON(fc->iq.fasync != NULL);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 56231b31f806..0979609d6eba 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -355,11 +355,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
struct inode *inode;
struct dentry *newent;
bool outarg_valid = true;
+ bool locked;
- fuse_lock_inode(dir);
+ locked = fuse_lock_inode(dir);
err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
&outarg, &inode);
- fuse_unlock_inode(dir);
+ fuse_unlock_inode(dir, locked);
if (err == -ENOENT) {
outarg_valid = false;
err = 0;
@@ -399,7 +400,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
*/
static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
int err;
struct inode *inode;
@@ -469,7 +470,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
d_instantiate(entry, inode);
fuse_change_entry_timeout(entry, &outentry);
fuse_invalidate_attr(dir);
- err = finish_open(file, entry, generic_file_open, opened);
+ err = finish_open(file, entry, generic_file_open);
if (err) {
fuse_sync_release(ff, flags);
} else {
@@ -489,7 +490,7 @@ out_err:
static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
int err;
struct fuse_conn *fc = get_fuse_conn(dir);
@@ -508,12 +509,12 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
goto no_open;
/* Only creates */
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
if (fc->no_create)
goto mknod;
- err = fuse_create_open(dir, entry, file, flags, mode, opened);
+ err = fuse_create_open(dir, entry, file, flags, mode);
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
@@ -539,6 +540,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
{
struct fuse_entry_out outarg;
struct inode *inode;
+ struct dentry *d;
int err;
struct fuse_forget_link *forget;
@@ -570,11 +572,17 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
}
kfree(forget);
- err = d_instantiate_no_diralias(entry, inode);
- if (err)
- return err;
+ d_drop(entry);
+ d = d_splice_alias(inode, entry);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
- fuse_change_entry_timeout(entry, &outarg);
+ if (d) {
+ fuse_change_entry_timeout(d, &outarg);
+ dput(d);
+ } else {
+ fuse_change_entry_timeout(entry, &outarg);
+ }
fuse_invalidate_attr(dir);
return 0;
@@ -1340,6 +1348,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
u64 attr_version = 0;
+ bool locked;
if (is_bad_inode(inode))
return -EIO;
@@ -1367,9 +1376,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
FUSE_READDIR);
}
- fuse_lock_inode(inode);
+ locked = fuse_lock_inode(inode);
fuse_request_send(fc, req);
- fuse_unlock_inode(inode);
+ fuse_unlock_inode(inode, locked);
nbytes = req->out.args[0].size;
err = req->out.h.error;
fuse_put_request(fc, req);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a201fb0ac64f..32d0b883e74f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/swap.h>
@@ -866,6 +867,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
}
if (WARN_ON(req->num_pages >= req->max_pages)) {
+ unlock_page(page);
fuse_put_request(fc, req);
return -EIO;
}
@@ -2048,7 +2050,7 @@ static void fuse_vma_close(struct vm_area_struct *vma)
* - sync(2)
* - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
*/
-static int fuse_page_mkwrite(struct vm_fault *vmf)
+static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5256ad333b05..f78e9614bb5f 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -862,6 +862,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
+void fuse_wait_aborted(struct fuse_conn *fc);
/**
* Invalidate inode attributes
@@ -974,8 +975,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
void fuse_set_initialized(struct fuse_conn *fc);
-void fuse_unlock_inode(struct inode *inode);
-void fuse_lock_inode(struct inode *inode);
+void fuse_unlock_inode(struct inode *inode, bool locked);
+bool fuse_lock_inode(struct inode *inode);
int fuse_setxattr(struct inode *inode, const char *name, const void *value,
size_t size, int flags);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index a24df8861b40..db9e60b7eb69 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -208,7 +208,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
struct fuse_inode *fi = get_fuse_inode(inode);
bool is_wb = fc->writeback_cache;
loff_t oldsize;
- struct timespec old_mtime;
+ struct timespec64 old_mtime;
spin_lock(&fc->lock);
if ((attr_version != 0 && fi->attr_version > attr_version) ||
@@ -217,7 +217,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
return;
}
- old_mtime = timespec64_to_timespec(inode->i_mtime);
+ old_mtime = inode->i_mtime;
fuse_change_attributes_common(inode, attr, attr_valid);
oldsize = inode->i_size;
@@ -237,7 +237,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
truncate_pagecache(inode, attr->size);
inval = true;
} else if (fc->auto_inval_data) {
- struct timespec new_mtime = {
+ struct timespec64 new_mtime = {
.tv_sec = attr->mtime,
.tv_nsec = attr->mtimensec,
};
@@ -246,7 +246,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
* Auto inval mode also checks and invalidates if mtime
* has changed.
*/
- if (!timespec_equal(&old_mtime, &new_mtime))
+ if (!timespec64_equal(&old_mtime, &new_mtime))
inval = true;
}
@@ -357,15 +357,21 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
return 0;
}
-void fuse_lock_inode(struct inode *inode)
+bool fuse_lock_inode(struct inode *inode)
{
- if (!get_fuse_conn(inode)->parallel_dirops)
+ bool locked = false;
+
+ if (!get_fuse_conn(inode)->parallel_dirops) {
mutex_lock(&get_fuse_inode(inode)->mutex);
+ locked = true;
+ }
+
+ return locked;
}
-void fuse_unlock_inode(struct inode *inode)
+void fuse_unlock_inode(struct inode *inode, bool locked)
{
- if (!get_fuse_conn(inode)->parallel_dirops)
+ if (locked)
mutex_unlock(&get_fuse_inode(inode)->mutex);
}
@@ -391,9 +397,6 @@ static void fuse_put_super(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
- fuse_send_destroy(fc);
-
- fuse_abort_conn(fc, false);
mutex_lock(&fuse_mutex);
list_del(&fc->entry);
fuse_ctl_remove_conn(fc);
@@ -1210,16 +1213,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type,
return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
}
-static void fuse_kill_sb_anon(struct super_block *sb)
+static void fuse_sb_destroy(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
if (fc) {
+ fuse_send_destroy(fc);
+
+ fuse_abort_conn(fc, false);
+ fuse_wait_aborted(fc);
+
down_write(&fc->killsb);
fc->sb = NULL;
up_write(&fc->killsb);
}
+}
+static void fuse_kill_sb_anon(struct super_block *sb)
+{
+ fuse_sb_destroy(sb);
kill_anon_super(sb);
}
@@ -1242,14 +1254,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
static void fuse_kill_sb_blk(struct super_block *sb)
{
- struct fuse_conn *fc = get_fuse_conn_super(sb);
-
- if (fc) {
- down_write(&fc->killsb);
- fc->sb = NULL;
- up_write(&fc->killsb);
- }
-
+ fuse_sb_destroy(sb);
kill_block_super(sb);
}
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 776717f1eeea..af5f87a493d9 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -82,14 +82,12 @@ struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int error;
- int len;
+ size_t len;
char *data;
const char *name = gfs2_acl_name(type);
if (acl) {
- len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
- if (len == 0)
- return 0;
+ len = posix_acl_xattr_size(acl->a_count);
data = kmalloc(len, GFP_NOFS);
if (data == NULL)
return -ENOMEM;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 35f5ee23566d..31e8270d0b26 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -22,6 +22,7 @@
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <trace/events/writeback.h>
+#include <linux/sched/signal.h>
#include "gfs2.h"
#include "incore.h"
@@ -36,10 +37,11 @@
#include "super.h"
#include "util.h"
#include "glops.h"
+#include "aops.h"
-static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
- unsigned int from, unsigned int len)
+void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+ unsigned int from, unsigned int len)
{
struct buffer_head *head = page_buffers(page);
unsigned int bsize = head->b_size;
@@ -82,12 +84,6 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
return 0;
}
-static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
- struct buffer_head *bh_result, int create)
-{
- return gfs2_block_map(inode, lblock, bh_result, 0);
-}
-
/**
* gfs2_writepage_common - Common bits of writepage
* @page: The page to be written
@@ -462,7 +458,7 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
* Returns: errno
*/
-static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
{
struct buffer_head *dibh;
u64 dsize = i_size_read(&ip->i_inode);
@@ -512,9 +508,13 @@ static int __gfs2_readpage(void *file, struct page *page)
{
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
+
int error;
- if (gfs2_is_stuffed(ip)) {
+ if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
+ !page_has_buffers(page)) {
+ error = iomap_readpage(page, &gfs2_iomap_ops);
+ } else if (gfs2_is_stuffed(ip)) {
error = stuffed_readpage(ip, page);
unlock_page(page);
} else {
@@ -644,139 +644,10 @@ out_uninit:
}
/**
- * gfs2_write_begin - Begin to write to a file
- * @file: The file to write to
- * @mapping: The mapping in which to write
- * @pos: The file offset at which to start writing
- * @len: Length of the write
- * @flags: Various flags
- * @pagep: Pointer to return the page
- * @fsdata: Pointer to return fs data (unused by GFS2)
- *
- * Returns: errno
- */
-
-static int gfs2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- struct gfs2_inode *ip = GFS2_I(mapping->host);
- struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
- struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
- unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
- unsigned requested = 0;
- int alloc_required;
- int error = 0;
- pgoff_t index = pos >> PAGE_SHIFT;
- unsigned from = pos & (PAGE_SIZE - 1);
- struct page *page;
-
- gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
- error = gfs2_glock_nq(&ip->i_gh);
- if (unlikely(error))
- goto out_uninit;
- if (&ip->i_inode == sdp->sd_rindex) {
- error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
- GL_NOCACHE, &m_ip->i_gh);
- if (unlikely(error)) {
- gfs2_glock_dq(&ip->i_gh);
- goto out_uninit;
- }
- }
-
- alloc_required = gfs2_write_alloc_required(ip, pos, len);
-
- if (alloc_required || gfs2_is_jdata(ip))
- gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
-
- if (alloc_required) {
- struct gfs2_alloc_parms ap = { .aflags = 0, };
- requested = data_blocks + ind_blocks;
- ap.target = requested;
- error = gfs2_quota_lock_check(ip, &ap);
- if (error)
- goto out_unlock;
-
- error = gfs2_inplace_reserve(ip, &ap);
- if (error)
- goto out_qunlock;
- }
-
- rblocks = RES_DINODE + ind_blocks;
- if (gfs2_is_jdata(ip))
- rblocks += data_blocks ? data_blocks : 1;
- if (ind_blocks || data_blocks)
- rblocks += RES_STATFS + RES_QUOTA;
- if (&ip->i_inode == sdp->sd_rindex)
- rblocks += 2 * RES_STATFS;
- if (alloc_required)
- rblocks += gfs2_rg_blocks(ip, requested);
-
- error = gfs2_trans_begin(sdp, rblocks,
- PAGE_SIZE/sdp->sd_sb.sb_bsize);
- if (error)
- goto out_trans_fail;
-
- error = -ENOMEM;
- flags |= AOP_FLAG_NOFS;
- page = grab_cache_page_write_begin(mapping, index, flags);
- *pagep = page;
- if (unlikely(!page))
- goto out_endtrans;
-
- if (gfs2_is_stuffed(ip)) {
- error = 0;
- if (pos + len > gfs2_max_stuffed_size(ip)) {
- error = gfs2_unstuff_dinode(ip, page);
- if (error == 0)
- goto prepare_write;
- } else if (!PageUptodate(page)) {
- error = stuffed_readpage(ip, page);
- }
- goto out;
- }
-
-prepare_write:
- error = __block_write_begin(page, from, len, gfs2_block_map);
-out:
- if (error == 0)
- return 0;
-
- unlock_page(page);
- put_page(page);
-
- gfs2_trans_end(sdp);
- if (alloc_required) {
- gfs2_inplace_release(ip);
- if (pos + len > ip->i_inode.i_size)
- gfs2_trim_blocks(&ip->i_inode);
- }
- goto out_qunlock;
-
-out_endtrans:
- gfs2_trans_end(sdp);
-out_trans_fail:
- if (alloc_required)
- gfs2_inplace_release(ip);
-out_qunlock:
- if (alloc_required)
- gfs2_quota_unlock(ip);
-out_unlock:
- if (&ip->i_inode == sdp->sd_rindex) {
- gfs2_glock_dq(&m_ip->i_gh);
- gfs2_holder_uninit(&m_ip->i_gh);
- }
- gfs2_glock_dq(&ip->i_gh);
-out_uninit:
- gfs2_holder_uninit(&ip->i_gh);
- return error;
-}
-
-/**
* adjust_fs_space - Adjusts the free space available due to gfs2_grow
* @inode: the rindex inode
*/
-static void adjust_fs_space(struct inode *inode)
+void adjust_fs_space(struct inode *inode)
{
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
@@ -822,11 +693,11 @@ out:
* This copies the data from the page into the inode block after
* the inode data structure itself.
*
- * Returns: errno
+ * Returns: copied bytes or errno
*/
-static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
- loff_t pos, unsigned copied,
- struct page *page)
+int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
+ loff_t pos, unsigned copied,
+ struct page *page)
{
struct gfs2_inode *ip = GFS2_I(inode);
u64 to = pos + copied;
@@ -853,84 +724,6 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
}
/**
- * gfs2_write_end
- * @file: The file to write to
- * @mapping: The address space to write to
- * @pos: The file position
- * @len: The length of the data
- * @copied: How much was actually copied by the VFS
- * @page: The page that has been written
- * @fsdata: The fsdata (unused in GFS2)
- *
- * The main write_end function for GFS2. We just put our locking around the VFS
- * provided functions.
- *
- * Returns: errno
- */
-
-static int gfs2_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = page->mapping->host;
- struct gfs2_inode *ip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
- struct buffer_head *dibh;
- int ret;
- struct gfs2_trans *tr = current->journal_info;
- BUG_ON(!tr);
-
- BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
-
- ret = gfs2_meta_inode_buffer(ip, &dibh);
- if (unlikely(ret))
- goto out;
-
- if (gfs2_is_stuffed(ip)) {
- ret = gfs2_stuffed_write_end(inode, dibh, pos, copied, page);
- page = NULL;
- goto out2;
- }
-
- if (gfs2_is_jdata(ip))
- gfs2_page_add_databufs(ip, page, pos & ~PAGE_MASK, len);
- else
- gfs2_ordered_add_inode(ip);
-
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
- page = NULL;
- if (tr->tr_num_buf_new)
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- else
- gfs2_trans_add_meta(ip->i_gl, dibh);
-
-out2:
- if (inode == sdp->sd_rindex) {
- adjust_fs_space(inode);
- sdp->sd_rindex_uptodate = 0;
- }
-
- brelse(dibh);
-out:
- if (page) {
- unlock_page(page);
- put_page(page);
- }
- gfs2_trans_end(sdp);
- gfs2_inplace_release(ip);
- if (ip->i_qadata && ip->i_qadata->qa_qd_num)
- gfs2_quota_unlock(ip);
- if (inode == sdp->sd_rindex) {
- gfs2_glock_dq(&m_ip->i_gh);
- gfs2_holder_uninit(&m_ip->i_gh);
- }
- gfs2_glock_dq(&ip->i_gh);
- gfs2_holder_uninit(&ip->i_gh);
- return ret;
-}
-
-/**
* jdata_set_page_dirty - Page dirtying function
* @page: The page to dirty
*
@@ -1023,96 +816,6 @@ out:
}
/**
- * gfs2_ok_for_dio - check that dio is valid on this file
- * @ip: The inode
- * @offset: The offset at which we are reading or writing
- *
- * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
- * 1 (to accept the i/o request)
- */
-static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
-{
- /*
- * Should we return an error here? I can't see that O_DIRECT for
- * a stuffed file makes any sense. For now we'll silently fall
- * back to buffered I/O
- */
- if (gfs2_is_stuffed(ip))
- return 0;
-
- if (offset >= i_size_read(&ip->i_inode))
- return 0;
- return 1;
-}
-
-
-
-static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct address_space *mapping = inode->i_mapping;
- struct gfs2_inode *ip = GFS2_I(inode);
- loff_t offset = iocb->ki_pos;
- struct gfs2_holder gh;
- int rv;
-
- /*
- * Deferred lock, even if its a write, since we do no allocation
- * on this path. All we need change is atime, and this lock mode
- * ensures that other nodes have flushed their buffered read caches
- * (i.e. their page cache entries for this inode). We do not,
- * unfortunately have the option of only flushing a range like
- * the VFS does.
- */
- gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
- rv = gfs2_glock_nq(&gh);
- if (rv)
- goto out_uninit;
- rv = gfs2_ok_for_dio(ip, offset);
- if (rv != 1)
- goto out; /* dio not valid, fall back to buffered i/o */
-
- /*
- * Now since we are holding a deferred (CW) lock at this point, you
- * might be wondering why this is ever needed. There is a case however
- * where we've granted a deferred local lock against a cached exclusive
- * glock. That is ok provided all granted local locks are deferred, but
- * it also means that it is possible to encounter pages which are
- * cached and possibly also mapped. So here we check for that and sort
- * them out ahead of the dio. The glock state machine will take care of
- * everything else.
- *
- * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
- * the first place, mapping->nr_pages will always be zero.
- */
- if (mapping->nrpages) {
- loff_t lstart = offset & ~(PAGE_SIZE - 1);
- loff_t len = iov_iter_count(iter);
- loff_t end = PAGE_ALIGN(offset + len) - 1;
-
- rv = 0;
- if (len == 0)
- goto out;
- if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
- unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
- rv = filemap_write_and_wait_range(mapping, lstart, end);
- if (rv)
- goto out;
- if (iov_iter_rw(iter) == WRITE)
- truncate_inode_pages_range(mapping, lstart, end);
- }
-
- rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- gfs2_get_block_direct, NULL, NULL, 0);
-out:
- gfs2_glock_dq(&gh);
-out_uninit:
- gfs2_holder_uninit(&gh);
- return rv;
-}
-
-/**
* gfs2_releasepage - free the metadata associated with a page
* @page: the page that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
@@ -1187,12 +890,10 @@ static const struct address_space_operations gfs2_writeback_aops = {
.writepages = gfs2_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .write_begin = gfs2_write_begin,
- .write_end = gfs2_write_end,
.bmap = gfs2_bmap,
.invalidatepage = gfs2_invalidatepage,
.releasepage = gfs2_releasepage,
- .direct_IO = gfs2_direct_IO,
+ .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -1203,13 +904,11 @@ static const struct address_space_operations gfs2_ordered_aops = {
.writepages = gfs2_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .write_begin = gfs2_write_begin,
- .write_end = gfs2_write_end,
.set_page_dirty = __set_page_dirty_buffers,
.bmap = gfs2_bmap,
.invalidatepage = gfs2_invalidatepage,
.releasepage = gfs2_releasepage,
- .direct_IO = gfs2_direct_IO,
+ .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -1220,8 +919,6 @@ static const struct address_space_operations gfs2_jdata_aops = {
.writepages = gfs2_jdata_writepages,
.readpage = gfs2_readpage,
.readpages = gfs2_readpages,
- .write_begin = gfs2_write_begin,
- .write_end = gfs2_write_end,
.set_page_dirty = jdata_set_page_dirty,
.bmap = gfs2_bmap,
.invalidatepage = gfs2_invalidatepage,
diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h
new file mode 100644
index 000000000000..fa8e5d0144dd
--- /dev/null
+++ b/fs/gfs2/aops.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Red Hat, Inc. All rights reserved.
+ */
+
+#ifndef __AOPS_DOT_H__
+#define __AOPS_DOT_H__
+
+#include "incore.h"
+
+extern int stuffed_readpage(struct gfs2_inode *ip, struct page *page);
+extern int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
+ loff_t pos, unsigned copied,
+ struct page *page);
+extern void adjust_fs_space(struct inode *inode);
+extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+ unsigned int from, unsigned int len);
+
+#endif /* __AOPS_DOT_H__ */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index ed6699705c13..03128ed1f34e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -28,6 +28,7 @@
#include "trans.h"
#include "dir.h"
#include "util.h"
+#include "aops.h"
#include "trace_gfs2.h"
/* This doesn't need to be that large as max 64 bit pointers in a 4k
@@ -41,6 +42,8 @@ struct metapath {
int mp_aheight; /* actual height (lookup height) */
};
+static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
+
/**
* gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
* @ip: the inode
@@ -389,7 +392,7 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
return mp->mp_aheight - x - 1;
}
-static inline void release_metapath(struct metapath *mp)
+static void release_metapath(struct metapath *mp)
{
int i;
@@ -397,27 +400,23 @@ static inline void release_metapath(struct metapath *mp)
if (mp->mp_bh[i] == NULL)
break;
brelse(mp->mp_bh[i]);
+ mp->mp_bh[i] = NULL;
}
}
/**
* gfs2_extent_length - Returns length of an extent of blocks
- * @start: Start of the buffer
- * @len: Length of the buffer in bytes
- * @ptr: Current position in the buffer
- * @limit: Max extent length to return (0 = unlimited)
+ * @bh: The metadata block
+ * @ptr: Current position in @bh
+ * @limit: Max extent length to return
* @eob: Set to 1 if we hit "end of block"
*
- * If the first block is zero (unallocated) it will return the number of
- * unallocated blocks in the extent, otherwise it will return the number
- * of contiguous blocks in the extent.
- *
* Returns: The length of the extent (minimum of one block)
*/
-static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
+static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
{
- const __be64 *end = (start + len);
+ const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
const __be64 *first = ptr;
u64 d = be64_to_cpu(*ptr);
@@ -426,14 +425,11 @@ static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __b
ptr++;
if (ptr >= end)
break;
- if (limit && --limit == 0)
- break;
- if (d)
- d++;
+ d++;
} while(be64_to_cpu(*ptr) == d);
if (ptr >= end)
*eob = 1;
- return (ptr - first);
+ return ptr - first;
}
typedef const __be64 *(*gfs2_metadata_walker)(
@@ -609,11 +605,13 @@ enum alloc_state {
* ii) Indirect blocks to fill in lower part of the metadata tree
* iii) Data blocks
*
- * The function is in two parts. The first part works out the total
- * number of blocks which we need. The second part does the actual
- * allocation asking for an extent at a time (if enough contiguous free
- * blocks are available, there will only be one request per bmap call)
- * and uses the state machine to initialise the blocks in order.
+ * This function is called after gfs2_iomap_get, which works out the
+ * total number of blocks which we need via gfs2_alloc_size.
+ *
+ * We then do the actual allocation asking for an extent at a time (if
+ * enough contiguous free blocks are available, there will only be one
+ * allocation request per call) and uses the state machine to initialise
+ * the blocks in order.
*
* Right now, this function will allocate at most one indirect block
* worth of data -- with a default block size of 4K, that's slightly
@@ -633,39 +631,26 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
struct buffer_head *dibh = mp->mp_bh[0];
u64 bn;
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
- unsigned dblks = 0;
- unsigned ptrs_per_blk;
+ size_t dblks = iomap->length >> inode->i_blkbits;
const unsigned end_of_metadata = mp->mp_fheight - 1;
int ret;
enum alloc_state state;
__be64 *ptr;
__be64 zero_bn = 0;
- size_t maxlen = iomap->length >> inode->i_blkbits;
BUG_ON(mp->mp_aheight < 1);
BUG_ON(dibh == NULL);
+ BUG_ON(dblks < 1);
gfs2_trans_add_meta(ip->i_gl, dibh);
down_write(&ip->i_rw_mutex);
if (mp->mp_fheight == mp->mp_aheight) {
- struct buffer_head *bh;
- int eob;
-
- /* Bottom indirect block exists, find unalloced extent size */
- ptr = metapointer(end_of_metadata, mp);
- bh = mp->mp_bh[end_of_metadata];
- dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr,
- maxlen, &eob);
- BUG_ON(dblks < 1);
+ /* Bottom indirect block exists */
state = ALLOC_DATA;
} else {
/* Need to allocate indirect blocks */
- ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs :
- sdp->sd_diptrs;
- dblks = min(maxlen, (size_t)(ptrs_per_blk -
- mp->mp_list[end_of_metadata]));
if (mp->mp_fheight == ip->i_height) {
/* Writing into existing tree, extend tree down */
iblks = mp->mp_fheight - mp->mp_aheight;
@@ -750,6 +735,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
}
} while (iomap->addr == IOMAP_NULL_ADDR);
+ iomap->type = IOMAP_MAPPED;
iomap->length = (u64)dblks << inode->i_blkbits;
ip->i_height = mp->mp_fheight;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
@@ -759,18 +745,51 @@ out:
return ret;
}
-static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap)
+#define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
+
+/**
+ * gfs2_alloc_size - Compute the maximum allocation size
+ * @inode: The inode
+ * @mp: The metapath
+ * @size: Requested size in blocks
+ *
+ * Compute the maximum size of the next allocation at @mp.
+ *
+ * Returns: size in blocks
+ */
+static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
{
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ const __be64 *first, *ptr, *end;
- iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
- sizeof(struct gfs2_dinode);
- iomap->offset = 0;
- iomap->length = i_size_read(inode);
- iomap->type = IOMAP_INLINE;
-}
+ /*
+ * For writes to stuffed files, this function is called twice via
+ * gfs2_iomap_get, before and after unstuffing. The size we return the
+ * first time needs to be large enough to get the reservation and
+ * allocation sizes right. The size we return the second time must
+ * be exact or else gfs2_iomap_alloc won't do the right thing.
+ */
-#define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
+ if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
+ unsigned int maxsize = mp->mp_fheight > 1 ?
+ sdp->sd_inptrs : sdp->sd_diptrs;
+ maxsize -= mp->mp_list[mp->mp_fheight - 1];
+ if (size > maxsize)
+ size = maxsize;
+ return size;
+ }
+
+ first = metapointer(ip->i_height - 1, mp);
+ end = metaend(ip->i_height - 1, mp);
+ if (end - first > size)
+ end = first + size;
+ for (ptr = first; ptr < end; ptr++) {
+ if (*ptr)
+ break;
+ }
+ return ptr - first;
+}
/**
* gfs2_iomap_get - Map blocks from an inode to disk blocks
@@ -789,37 +808,63 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ loff_t size = i_size_read(inode);
__be64 *ptr;
sector_t lblock;
sector_t lblock_stop;
int ret;
int eob;
u64 len;
- struct buffer_head *bh;
+ struct buffer_head *dibh = NULL, *bh;
u8 height;
if (!length)
return -EINVAL;
+ down_read(&ip->i_rw_mutex);
+
+ ret = gfs2_meta_inode_buffer(ip, &dibh);
+ if (ret)
+ goto unlock;
+ iomap->private = dibh;
+
if (gfs2_is_stuffed(ip)) {
- if (flags & IOMAP_REPORT) {
- if (pos >= i_size_read(inode))
- return -ENOENT;
- gfs2_stuffed_iomap(inode, iomap);
- return 0;
+ if (flags & IOMAP_WRITE) {
+ loff_t max_size = gfs2_max_stuffed_size(ip);
+
+ if (pos + length > max_size)
+ goto unstuff;
+ iomap->length = max_size;
+ } else {
+ if (pos >= size) {
+ if (flags & IOMAP_REPORT) {
+ ret = -ENOENT;
+ goto unlock;
+ } else {
+ /* report a hole */
+ iomap->offset = pos;
+ iomap->length = length;
+ goto do_alloc;
+ }
+ }
+ iomap->length = size;
}
- BUG_ON(!(flags & IOMAP_WRITE));
+ iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
+ sizeof(struct gfs2_dinode);
+ iomap->type = IOMAP_INLINE;
+ iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
+ goto out;
}
+
+unstuff:
lblock = pos >> inode->i_blkbits;
iomap->offset = lblock << inode->i_blkbits;
lblock_stop = (pos + length - 1) >> inode->i_blkbits;
len = lblock_stop - lblock + 1;
+ iomap->length = len << inode->i_blkbits;
- down_read(&ip->i_rw_mutex);
-
- ret = gfs2_meta_inode_buffer(ip, &mp->mp_bh[0]);
- if (ret)
- goto unlock;
+ get_bh(dibh);
+ mp->mp_bh[0] = dibh;
height = ip->i_height;
while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
@@ -840,12 +885,12 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
goto do_alloc;
bh = mp->mp_bh[ip->i_height - 1];
- len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, len, &eob);
+ len = gfs2_extent_length(bh, ptr, len, &eob);
iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
iomap->length = len << inode->i_blkbits;
iomap->type = IOMAP_MAPPED;
- iomap->flags = IOMAP_F_MERGED;
+ iomap->flags |= IOMAP_F_MERGED;
if (eob)
iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
@@ -853,25 +898,185 @@ out:
iomap->bdev = inode->i_sb->s_bdev;
unlock:
up_read(&ip->i_rw_mutex);
+ if (ret && dibh)
+ brelse(dibh);
return ret;
do_alloc:
iomap->addr = IOMAP_NULL_ADDR;
- iomap->length = len << inode->i_blkbits;
iomap->type = IOMAP_HOLE;
- iomap->flags = 0;
if (flags & IOMAP_REPORT) {
- loff_t size = i_size_read(inode);
if (pos >= size)
ret = -ENOENT;
else if (height == ip->i_height)
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
else
iomap->length = size - pos;
+ } else if (flags & IOMAP_WRITE) {
+ u64 alloc_size;
+
+ if (flags & IOMAP_DIRECT)
+ goto out; /* (see gfs2_file_direct_write) */
+
+ len = gfs2_alloc_size(inode, mp, len);
+ alloc_size = len << inode->i_blkbits;
+ if (alloc_size < iomap->length)
+ iomap->length = alloc_size;
+ } else {
+ if (pos < size && height == ip->i_height)
+ ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
}
goto out;
}
+static int gfs2_write_lock(struct inode *inode)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ int error;
+
+ gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
+ error = gfs2_glock_nq(&ip->i_gh);
+ if (error)
+ goto out_uninit;
+ if (&ip->i_inode == sdp->sd_rindex) {
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+ error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
+ GL_NOCACHE, &m_ip->i_gh);
+ if (error)
+ goto out_unlock;
+ }
+ return 0;
+
+out_unlock:
+ gfs2_glock_dq(&ip->i_gh);
+out_uninit:
+ gfs2_holder_uninit(&ip->i_gh);
+ return error;
+}
+
+static void gfs2_write_unlock(struct inode *inode)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+
+ if (&ip->i_inode == sdp->sd_rindex) {
+ struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+ gfs2_glock_dq_uninit(&m_ip->i_gh);
+ }
+ gfs2_glock_dq_uninit(&ip->i_gh);
+}
+
+static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
+ unsigned copied, struct page *page,
+ struct iomap *iomap)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+
+ gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+}
+
+static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
+ loff_t length, unsigned flags,
+ struct iomap *iomap)
+{
+ struct metapath mp = { .mp_aheight = 1, };
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
+ bool unstuff, alloc_required;
+ int ret;
+
+ ret = gfs2_write_lock(inode);
+ if (ret)
+ return ret;
+
+ unstuff = gfs2_is_stuffed(ip) &&
+ pos + length > gfs2_max_stuffed_size(ip);
+
+ ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
+ if (ret)
+ goto out_release;
+
+ alloc_required = unstuff || iomap->type == IOMAP_HOLE;
+
+ if (alloc_required || gfs2_is_jdata(ip))
+ gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
+ &ind_blocks);
+
+ if (alloc_required) {
+ struct gfs2_alloc_parms ap = {
+ .target = data_blocks + ind_blocks
+ };
+
+ ret = gfs2_quota_lock_check(ip, &ap);
+ if (ret)
+ goto out_release;
+
+ ret = gfs2_inplace_reserve(ip, &ap);
+ if (ret)
+ goto out_qunlock;
+ }
+
+ rblocks = RES_DINODE + ind_blocks;
+ if (gfs2_is_jdata(ip))
+ rblocks += data_blocks;
+ if (ind_blocks || data_blocks)
+ rblocks += RES_STATFS + RES_QUOTA;
+ if (inode == sdp->sd_rindex)
+ rblocks += 2 * RES_STATFS;
+ if (alloc_required)
+ rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
+
+ ret = gfs2_trans_begin(sdp, rblocks, iomap->length >> inode->i_blkbits);
+ if (ret)
+ goto out_trans_fail;
+
+ if (unstuff) {
+ ret = gfs2_unstuff_dinode(ip, NULL);
+ if (ret)
+ goto out_trans_end;
+ release_metapath(&mp);
+ brelse(iomap->private);
+ iomap->private = NULL;
+ ret = gfs2_iomap_get(inode, iomap->offset, iomap->length,
+ flags, iomap, &mp);
+ if (ret)
+ goto out_trans_end;
+ }
+
+ if (iomap->type == IOMAP_HOLE) {
+ ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+ if (ret) {
+ gfs2_trans_end(sdp);
+ gfs2_inplace_release(ip);
+ punch_hole(ip, iomap->offset, iomap->length);
+ goto out_qunlock;
+ }
+ }
+ release_metapath(&mp);
+ if (gfs2_is_jdata(ip))
+ iomap->page_done = gfs2_iomap_journaled_page_done;
+ return 0;
+
+out_trans_end:
+ gfs2_trans_end(sdp);
+out_trans_fail:
+ if (alloc_required)
+ gfs2_inplace_release(ip);
+out_qunlock:
+ if (alloc_required)
+ gfs2_quota_unlock(ip);
+out_release:
+ if (iomap->private)
+ brelse(iomap->private);
+ release_metapath(&mp);
+ gfs2_write_unlock(inode);
+ return ret;
+}
+
static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, struct iomap *iomap)
{
@@ -879,22 +1084,79 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
struct metapath mp = { .mp_aheight = 1, };
int ret;
+ iomap->flags |= IOMAP_F_BUFFER_HEAD;
+
trace_gfs2_iomap_start(ip, pos, length, flags);
- if (flags & IOMAP_WRITE) {
- ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
- if (!ret && iomap->type == IOMAP_HOLE)
- ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
- release_metapath(&mp);
+ if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
+ ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap);
} else {
ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
release_metapath(&mp);
+ /*
+ * Silently fall back to buffered I/O for stuffed files or if
+ * we've hot a hole (see gfs2_file_direct_write).
+ */
+ if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) &&
+ iomap->type != IOMAP_MAPPED)
+ ret = -ENOTBLK;
}
trace_gfs2_iomap_end(ip, iomap, ret);
return ret;
}
+static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned flags, struct iomap *iomap)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_trans *tr = current->journal_info;
+ struct buffer_head *dibh = iomap->private;
+
+ if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE)
+ goto out;
+
+ if (iomap->type != IOMAP_INLINE) {
+ gfs2_ordered_add_inode(ip);
+
+ if (tr->tr_num_buf_new)
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ else
+ gfs2_trans_add_meta(ip->i_gl, dibh);
+ }
+
+ if (inode == sdp->sd_rindex) {
+ adjust_fs_space(inode);
+ sdp->sd_rindex_uptodate = 0;
+ }
+
+ gfs2_trans_end(sdp);
+ gfs2_inplace_release(ip);
+
+ if (length != written && (iomap->flags & IOMAP_F_NEW)) {
+ /* Deallocate blocks that were just allocated. */
+ loff_t blockmask = i_blocksize(inode) - 1;
+ loff_t end = (pos + length) & ~blockmask;
+
+ pos = (pos + written + blockmask) & ~blockmask;
+ if (pos < end) {
+ truncate_pagecache_range(inode, pos, end - 1);
+ punch_hole(ip, pos, end - pos);
+ }
+ }
+
+ if (ip->i_qadata && ip->i_qadata->qa_qd_num)
+ gfs2_quota_unlock(ip);
+ gfs2_write_unlock(inode);
+
+out:
+ if (dibh)
+ brelse(dibh);
+ return 0;
+}
+
const struct iomap_ops gfs2_iomap_ops = {
.iomap_begin = gfs2_iomap_begin,
+ .iomap_end = gfs2_iomap_end,
};
/**
@@ -941,12 +1203,6 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
} else {
ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
release_metapath(&mp);
-
- /* Return unmapped buffer beyond the end of file. */
- if (ret == -ENOENT) {
- ret = 0;
- goto out;
- }
}
if (ret)
goto out;
@@ -2060,7 +2316,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
lblock = offset >> shift;
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
- if (lblock_stop > end_of_file)
+ if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
return 1;
size = (lblock_stop - lblock) << shift;
@@ -2154,11 +2410,11 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
if (error)
goto out;
} else {
- unsigned int start_off, end_off, blocksize;
+ unsigned int start_off, end_len, blocksize;
blocksize = i_blocksize(inode);
start_off = offset & (blocksize - 1);
- end_off = (offset + length) & (blocksize - 1);
+ end_len = (offset + length) & (blocksize - 1);
if (start_off) {
unsigned int len = length;
if (length > blocksize - start_off)
@@ -2167,11 +2423,11 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
if (error)
goto out;
if (start_off + length < blocksize)
- end_off = 0;
+ end_len = 0;
}
- if (end_off) {
+ if (end_len) {
error = gfs2_block_zero_range(inode,
- offset + length - end_off, end_off);
+ offset + length - end_len, end_len);
if (error)
goto out;
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index d97ad89955d1..e37002560c11 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1011,7 +1011,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
u64 bn, leaf_no;
__be64 *lp;
u32 index;
- int x, moved = 0;
+ int x;
int error;
index = name->hash >> (32 - dip->i_depth);
@@ -1113,8 +1113,6 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
if (!prev)
prev = dent;
-
- moved = 1;
} else {
prev = dent;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 7137db7b0119..08369c6cd127 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -26,10 +26,12 @@
#include <linux/dlm.h>
#include <linux/dlm_plock.h>
#include <linux/delay.h>
+#include <linux/backing-dev.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
+#include "aops.h"
#include "dir.h"
#include "glock.h"
#include "glops.h"
@@ -387,7 +389,7 @@ static int gfs2_allocate_page_backing(struct page *page)
* blocks allocated on disk to back that page.
*/
-static int gfs2_page_mkwrite(struct vm_fault *vmf)
+static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
@@ -688,12 +690,83 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
return ret ? ret : ret1;
}
+static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
+ size_t count = iov_iter_count(to);
+ struct gfs2_holder gh;
+ ssize_t ret;
+
+ if (!count)
+ return 0; /* skip atime */
+
+ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
+ ret = gfs2_glock_nq(&gh);
+ if (ret)
+ goto out_uninit;
+
+ ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);
+
+ gfs2_glock_dq(&gh);
+out_uninit:
+ gfs2_holder_uninit(&gh);
+ return ret;
+}
+
+static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ size_t len = iov_iter_count(from);
+ loff_t offset = iocb->ki_pos;
+ struct gfs2_holder gh;
+ ssize_t ret;
+
+ /*
+ * Deferred lock, even if its a write, since we do no allocation on
+ * this path. All we need to change is the atime, and this lock mode
+ * ensures that other nodes have flushed their buffered read caches
+ * (i.e. their page cache entries for this inode). We do not,
+ * unfortunately, have the option of only flushing a range like the
+ * VFS does.
+ */
+ gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
+ ret = gfs2_glock_nq(&gh);
+ if (ret)
+ goto out_uninit;
+
+ /* Silently fall back to buffered I/O when writing beyond EOF */
+ if (offset + len > i_size_read(&ip->i_inode))
+ goto out;
+
+ ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);
+
+out:
+ gfs2_glock_dq(&gh);
+out_uninit:
+ gfs2_holder_uninit(&gh);
+ return ret;
+}
+
+static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ ssize_t ret;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = gfs2_file_direct_read(iocb, to);
+ if (likely(ret != -ENOTBLK))
+ return ret;
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ }
+ return generic_file_read_iter(iocb, to);
+}
+
/**
* gfs2_file_write_iter - Perform a write to a file
* @iocb: The io context
- * @iov: The data to write
- * @nr_segs: Number of @iov segments
- * @pos: The file position
+ * @from: The data to write
*
* We have to do a lock/unlock here to refresh the inode size for
* O_APPEND writes, otherwise we can land up writing at the wrong
@@ -705,8 +778,9 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct gfs2_inode *ip = GFS2_I(file_inode(file));
- int ret;
+ struct inode *inode = file_inode(file);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ ssize_t written = 0, ret;
ret = gfs2_rsqa_alloc(ip);
if (ret)
@@ -723,7 +797,71 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
gfs2_glock_dq_uninit(&gh);
}
- return generic_file_write_iter(iocb, from);
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out;
+
+ /* We can write back this queue in page reclaim */
+ current->backing_dev_info = inode_to_bdi(inode);
+
+ ret = file_remove_privs(file);
+ if (ret)
+ goto out2;
+
+ ret = file_update_time(file);
+ if (ret)
+ goto out2;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ struct address_space *mapping = file->f_mapping;
+ loff_t pos, endbyte;
+ ssize_t buffered;
+
+ written = gfs2_file_direct_write(iocb, from);
+ if (written < 0 || !iov_iter_count(from))
+ goto out2;
+
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ if (unlikely(ret < 0))
+ goto out2;
+ buffered = ret;
+
+ /*
+ * We need to ensure that the page cache pages are written to
+ * disk and invalidated to preserve the expected O_DIRECT
+ * semantics.
+ */
+ pos = iocb->ki_pos;
+ endbyte = pos + buffered - 1;
+ ret = filemap_write_and_wait_range(mapping, pos, endbyte);
+ if (!ret) {
+ iocb->ki_pos += buffered;
+ written += buffered;
+ invalidate_mapping_pages(mapping,
+ pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
+ } else {
+ /*
+ * We don't know how much we wrote, so just return
+ * the number of bytes which were direct-written
+ */
+ }
+ } else {
+ ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
+ if (likely(ret > 0))
+ iocb->ki_pos += ret;
+ }
+
+out2:
+ current->backing_dev_info = NULL;
+out:
+ inode_unlock(inode);
+ if (likely(ret > 0)) {
+ /* Handle various SYNC-type writes */
+ ret = generic_write_sync(iocb, ret);
+ }
+ return written ? written : ret;
}
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
@@ -733,7 +871,6 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
struct gfs2_inode *ip = GFS2_I(inode);
loff_t end = offset + len;
struct buffer_head *dibh;
- struct iomap iomap = { };
int error;
error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -749,12 +886,14 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
}
while (offset < end) {
+ struct iomap iomap = { };
+
error = gfs2_iomap_get_alloc(inode, offset, end - offset,
&iomap);
if (error)
goto out;
offset = iomap.offset + iomap.length;
- if (iomap.type != IOMAP_HOLE)
+ if (!(iomap.flags & IOMAP_F_NEW))
continue;
error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
iomap.length >> inode->i_blkbits,
@@ -1125,7 +1264,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
const struct file_operations gfs2_file_fops = {
.llseek = gfs2_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = gfs2_file_read_iter,
.write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
@@ -1155,7 +1294,7 @@ const struct file_operations gfs2_dir_fops = {
const struct file_operations gfs2_file_fops_nolock = {
.llseek = gfs2_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = gfs2_file_read_iter,
.write_iter = gfs2_file_write_iter,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index d2ad817e089f..b96d39c28e17 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -65,6 +65,27 @@ struct gfs2_log_operations {
#define GBF_FULL 1
+/**
+ * Clone bitmaps (bi_clone):
+ *
+ * - When a block is freed, we remember the previous state of the block in the
+ * clone bitmap, and only mark the block as free in the real bitmap.
+ *
+ * - When looking for a block to allocate, we check for a free block in the
+ * clone bitmap, and if no clone bitmap exists, in the real bitmap.
+ *
+ * - For allocating a block, we mark it as allocated in the real bitmap, and if
+ * a clone bitmap exists, also in the clone bitmap.
+ *
+ * - At the end of a log_flush, we copy the real bitmap into the clone bitmap
+ * to make the clone bitmap reflect the current allocation state.
+ * (Alternatively, we could remove the clone bitmap.)
+ *
+ * The clone bitmaps are in-core only, and is never written to disk.
+ *
+ * These steps ensure that blocks which have been freed in a transaction cannot
+ * be reallocated in that same transaction.
+ */
struct gfs2_bitmap {
struct buffer_head *bi_bh;
char *bi_clone;
@@ -295,7 +316,6 @@ struct gfs2_blkreserv {
struct rb_node rs_node; /* link to other block reservations */
struct gfs2_rbm rs_rbm; /* Start of reservation */
u32 rs_free; /* how many blocks are still free */
- u64 rs_inum; /* Inode number for reservation */
};
/*
@@ -398,7 +418,6 @@ struct gfs2_inode {
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_qadata *i_qadata; /* quota allocation data */
struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
- struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
struct list_head i_ordered;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index feda55f67050..648f0ca1ad57 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -580,7 +580,7 @@ static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
struct file *file,
umode_t mode, dev_t dev, const char *symname,
- unsigned int size, int excl, int *opened)
+ unsigned int size, int excl)
{
const struct qstr *name = &dentry->d_name;
struct posix_acl *default_acl, *acl;
@@ -626,7 +626,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = 0;
if (file) {
if (S_ISREG(inode->i_mode))
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ error = finish_open(file, dentry, gfs2_open_common);
else
error = finish_no_open(file, NULL);
}
@@ -767,8 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode);
d_instantiate(dentry, inode);
if (file) {
- *opened |= FILE_CREATED;
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ file->f_mode |= FMODE_CREATED;
+ error = finish_open(file, dentry, gfs2_open_common);
}
gfs2_glock_dq_uninit(ghs);
gfs2_glock_dq_uninit(ghs + 1);
@@ -822,7 +822,7 @@ fail:
static int gfs2_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
- return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl);
}
/**
@@ -830,14 +830,13 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
* @dir: The directory inode
* @dentry: The dentry of the new inode
* @file: File to be opened
- * @opened: atomic_open flags
*
*
* Returns: errno
*/
static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
- struct file *file, int *opened)
+ struct file *file)
{
struct inode *inode;
struct dentry *d;
@@ -866,7 +865,7 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
return d;
}
if (file && S_ISREG(inode->i_mode))
- error = finish_open(file, dentry, gfs2_open_common, opened);
+ error = finish_open(file, dentry, gfs2_open_common);
gfs2_glock_dq_uninit(&gh);
if (error) {
@@ -879,7 +878,7 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
unsigned flags)
{
- return __gfs2_lookup(dir, dentry, NULL, NULL);
+ return __gfs2_lookup(dir, dentry, NULL);
}
/**
@@ -1189,7 +1188,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
if (size >= gfs2_max_stuffed_size(GFS2_I(dir)))
return -ENAMETOOLONG;
- return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0);
}
/**
@@ -1204,7 +1203,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
- return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
}
/**
@@ -1219,7 +1218,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t dev)
{
- return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0, NULL);
+ return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0);
}
/**
@@ -1229,14 +1228,13 @@ static int gfs2_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
* @file: The proposed new struct file
* @flags: open flags
* @mode: File mode
- * @opened: Flag to say whether the file has been opened or not
*
* Returns: error code or 0 for success
*/
static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
struct dentry *d;
bool excl = !!(flags & O_EXCL);
@@ -1244,13 +1242,13 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
if (!d_in_lookup(dentry))
goto skip_lookup;
- d = __gfs2_lookup(dir, dentry, file, opened);
+ d = __gfs2_lookup(dir, dentry, file);
if (IS_ERR(d))
return PTR_ERR(d);
if (d != NULL)
dentry = d;
if (d_really_is_positive(dentry)) {
- if (!(*opened & FILE_OPENED))
+ if (!(file->f_mode & FMODE_OPENED))
return finish_no_open(file, d);
dput(d);
return 0;
@@ -1262,7 +1260,7 @@ skip_lookup:
if (!(flags & O_CREAT))
return -ENOENT;
- return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl, opened);
+ return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl);
}
/*
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 006c6164f759..ac7caa267ed6 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -821,6 +821,13 @@ restart:
goto fail;
}
+ /**
+ * If we're a spectator, we don't want to take the lock in EX because
+ * we cannot do the first-mount responsibility it implies: recovery.
+ */
+ if (sdp->sd_args.ar_spectator)
+ goto locks_done;
+
error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
if (!error) {
mounted_mode = DLM_LOCK_EX;
@@ -896,9 +903,16 @@ locks_done:
if (lvb_gen < mount_gen) {
/* wait for mounted nodes to update control_lock lvb to our
generation, which might include new recovery bits set */
- fs_info(sdp, "control_mount wait1 block %u start %u mount %u "
- "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
- lvb_gen, ls->ls_recover_flags);
+ if (sdp->sd_args.ar_spectator) {
+ fs_info(sdp, "Recovery is required. Waiting for a "
+ "non-spectator to mount.\n");
+ msleep_interruptible(1000);
+ } else {
+ fs_info(sdp, "control_mount wait1 block %u start %u "
+ "mount %u lvb %u flags %lx\n", block_gen,
+ start_gen, mount_gen, lvb_gen,
+ ls->ls_recover_flags);
+ }
spin_unlock(&ls->ls_recover_spin);
goto restart;
}
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 0248835625f1..ee20ea42e7b5 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -92,7 +92,8 @@ static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
struct writeback_control *wbc,
- struct gfs2_trans *tr)
+ struct gfs2_trans *tr,
+ bool *withdraw)
__releases(&sdp->sd_ail_lock)
__acquires(&sdp->sd_ail_lock)
{
@@ -107,8 +108,10 @@ __acquires(&sdp->sd_ail_lock)
gfs2_assert(sdp, bd->bd_tr == tr);
if (!buffer_busy(bh)) {
- if (!buffer_uptodate(bh))
+ if (!buffer_uptodate(bh)) {
gfs2_io_error_bh(sdp, bh);
+ *withdraw = true;
+ }
list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
continue;
}
@@ -148,6 +151,7 @@ void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
struct list_head *head = &sdp->sd_ail1_list;
struct gfs2_trans *tr;
struct blk_plug plug;
+ bool withdraw = false;
trace_gfs2_ail_flush(sdp, wbc, 1);
blk_start_plug(&plug);
@@ -156,11 +160,13 @@ restart:
list_for_each_entry_reverse(tr, head, tr_list) {
if (wbc->nr_to_write <= 0)
break;
- if (gfs2_ail1_start_one(sdp, wbc, tr))
+ if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
goto restart;
}
spin_unlock(&sdp->sd_ail_lock);
blk_finish_plug(&plug);
+ if (withdraw)
+ gfs2_lm_withdraw(sdp, NULL);
trace_gfs2_ail_flush(sdp, wbc, 0);
}
@@ -188,7 +194,8 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp)
*
*/
-static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ bool *withdraw)
{
struct gfs2_bufdata *bd, *s;
struct buffer_head *bh;
@@ -199,11 +206,12 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_assert(sdp, bd->bd_tr == tr);
if (buffer_busy(bh))
continue;
- if (!buffer_uptodate(bh))
+ if (!buffer_uptodate(bh)) {
gfs2_io_error_bh(sdp, bh);
+ *withdraw = true;
+ }
list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
}
-
}
/**
@@ -218,10 +226,11 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
struct gfs2_trans *tr, *s;
int oldest_tr = 1;
int ret;
+ bool withdraw = false;
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
- gfs2_ail1_empty_one(sdp, tr);
+ gfs2_ail1_empty_one(sdp, tr, &withdraw);
if (list_empty(&tr->tr_ail1_list) && oldest_tr)
list_move(&tr->tr_list, &sdp->sd_ail2_list);
else
@@ -230,6 +239,9 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
ret = list_empty(&sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock);
+ if (withdraw)
+ gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
+
return ret;
}
@@ -689,7 +701,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
hash = ~crc32(~0, lh, LH_V1_SIZE);
lh->lh_hash = cpu_to_be32(hash);
- tv = current_kernel_time64();
+ ktime_get_coarse_real_ts64(&tv);
lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
lh->lh_sec = cpu_to_be64(tv.tv_sec);
addr = gfs2_log_bmap(sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 4d6567990baf..f2567f958d00 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -49,7 +49,7 @@ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (test_set_buffer_pinned(bh))
gfs2_assert_withdraw(sdp, 0);
if (!buffer_uptodate(bh))
- gfs2_io_error_bh(sdp, bh);
+ gfs2_io_error_bh_wd(sdp, bh);
bd = bh->b_private;
/* If this buffer is in the AIL and it has already been written
* to in-place disk block, remove it from the AIL.
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 52de1036d9f9..be9c0bf697fe 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -293,7 +293,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
if (unlikely(!buffer_uptodate(bh))) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
- gfs2_io_error_bh(sdp, bh);
+ gfs2_io_error_bh_wd(sdp, bh);
brelse(bh);
*bhp = NULL;
return -EIO;
@@ -320,7 +320,7 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
if (!buffer_uptodate(bh)) {
struct gfs2_trans *tr = current->journal_info;
if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
- gfs2_io_error_bh(sdp, bh);
+ gfs2_io_error_bh_wd(sdp, bh);
return -EIO;
}
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index d8b622c375ab..0f501f938d1c 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -413,12 +413,13 @@ void gfs2_recover_func(struct work_struct *work)
ktime_t t_start, t_jlck, t_jhd, t_tlck, t_rep;
int ro = 0;
unsigned int pass;
- int error;
+ int error = 0;
int jlocked = 0;
t_start = ktime_get();
- if (sdp->sd_args.ar_spectator ||
- (jd->jd_jid != sdp->sd_lockstruct.ls_jid)) {
+ if (sdp->sd_args.ar_spectator)
+ goto fail;
+ if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
jd->jd_jid);
jlocked = 1;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 33abcf29bc05..1ad3256b9cbc 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -123,17 +123,26 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
/**
* gfs2_testbit - test a bit in the bitmaps
* @rbm: The bit to test
+ * @use_clone: If true, test the clone bitmap, not the official bitmap.
+ *
+ * Some callers like gfs2_unaligned_extlen need to test the clone bitmaps,
+ * not the "real" bitmaps, to avoid allocating recently freed blocks.
*
* Returns: The two bit block state of the requested bit
*/
-static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
+static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm, bool use_clone)
{
struct gfs2_bitmap *bi = rbm_bi(rbm);
- const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
+ const u8 *buffer;
const u8 *byte;
unsigned int bit;
+ if (use_clone && bi->bi_clone)
+ buffer = bi->bi_clone;
+ else
+ buffer = bi->bi_bh->b_data;
+ buffer += bi->bi_offset;
byte = buffer + (rbm->offset / GFS2_NBBY);
bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
@@ -322,7 +331,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
u8 res;
for (n = 0; n < n_unaligned; n++) {
- res = gfs2_testbit(rbm);
+ res = gfs2_testbit(rbm, true);
if (res != GFS2_BLKST_FREE)
return true;
(*len)--;
@@ -607,8 +616,10 @@ int gfs2_rsqa_alloc(struct gfs2_inode *ip)
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
{
+ struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
+
gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
- (unsigned long long)rs->rs_inum,
+ (unsigned long long)ip->i_no_addr,
(unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
rs->rs_rbm.offset, rs->rs_free);
}
@@ -1051,6 +1062,18 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
/* rd_data0, rd_data and rd_bitbytes already set from rindex */
}
+static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
+{
+ const struct gfs2_rgrp *str = buf;
+
+ rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
+ rgl->rl_flags = str->rg_flags;
+ rgl->rl_free = str->rg_free;
+ rgl->rl_dinodes = str->rg_dinodes;
+ rgl->rl_igeneration = str->rg_igeneration;
+ rgl->__pad = 0UL;
+}
+
static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
{
struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
@@ -1073,6 +1096,7 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
str->rg_crc = cpu_to_be32(crc);
memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
+ gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, buf);
}
static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
@@ -1087,25 +1111,6 @@ static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
return 1;
}
-static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
-{
- const struct gfs2_rgrp *str = buf;
-
- rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
- rgl->rl_flags = str->rg_flags;
- rgl->rl_free = str->rg_free;
- rgl->rl_dinodes = str->rg_dinodes;
- rgl->rl_igeneration = str->rg_igeneration;
- rgl->__pad = 0UL;
-}
-
-static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
-{
- struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
- u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
- rgl->rl_unlinked = cpu_to_be32(unlinked);
-}
-
static u32 count_unlinked(struct gfs2_rgrpd *rgd)
{
struct gfs2_bitmap *bi;
@@ -1424,7 +1429,6 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
rgd->rd_flags |= GFS2_RGF_TRIMMED;
gfs2_trans_add_meta(rgd->rd_gl, bh);
gfs2_rgrp_out(rgd, bh->b_data);
- gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
gfs2_trans_end(sdp);
}
}
@@ -1488,6 +1492,34 @@ static void rs_insert(struct gfs2_inode *ip)
}
/**
+ * rgd_free - return the number of free blocks we can allocate.
+ * @rgd: the resource group
+ *
+ * This function returns the number of free blocks for an rgrp.
+ * That's the clone-free blocks (blocks that are free, not including those
+ * still being used for unlinked files that haven't been deleted.)
+ *
+ * It also subtracts any blocks reserved by someone else, but does not
+ * include free blocks that are still part of our current reservation,
+ * because obviously we can (and will) allocate them.
+ */
+static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
+{
+ u32 tot_reserved, tot_free;
+
+ if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
+ return 0;
+ tot_reserved = rgd->rd_reserved - rs->rs_free;
+
+ if (rgd->rd_free_clone < tot_reserved)
+ tot_reserved = 0;
+
+ tot_free = rgd->rd_free_clone - tot_reserved;
+
+ return tot_free;
+}
+
+/**
* rg_mblk_search - find a group of multiple free blocks to form a reservation
* @rgd: the resource group descriptor
* @ip: pointer to the inode for which we're reserving blocks
@@ -1502,7 +1534,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
u64 goal;
struct gfs2_blkreserv *rs = &ip->i_res;
u32 extlen;
- u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
+ u32 free_blocks = rgd_free(rgd, rs);
int ret;
struct inode *inode = &ip->i_inode;
@@ -1528,7 +1560,6 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
if (ret == 0) {
rs->rs_rbm = rbm;
rs->rs_free = extlen;
- rs->rs_inum = ip->i_no_addr;
rs_insert(ip);
} else {
if (goal == rgd->rd_last_alloc + rgd->rd_data0)
@@ -1686,7 +1717,8 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
while(1) {
bi = rbm_bi(rbm);
- if (test_bit(GBF_FULL, &bi->bi_flags) &&
+ if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
+ test_bit(GBF_FULL, &bi->bi_flags) &&
(state == GFS2_BLKST_FREE))
goto next_bitmap;
@@ -1983,7 +2015,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
int error = 0, rg_locked, flags = 0;
u64 last_unlinked = NO_BLOCK;
int loops = 0;
- u32 skip = 0;
+ u32 free_blocks, skip = 0;
if (sdp->sd_args.ar_rgrplvb)
flags |= GL_SKIP;
@@ -1991,8 +2023,9 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
return -EINVAL;
if (gfs2_rs_active(rs)) {
begin = rs->rs_rbm.rgd;
- } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
- rs->rs_rbm.rgd = begin = ip->i_rgd;
+ } else if (rs->rs_rbm.rgd &&
+ rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) {
+ begin = rs->rs_rbm.rgd;
} else {
check_and_update_goal(ip);
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
@@ -2053,11 +2086,11 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
goto check_rgrp;
/* If rgrp has enough free space, use it */
- if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
+ free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
+ if (free_blocks >= ap->target ||
(loops == 2 && ap->min_target &&
- rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
- ip->i_rgd = rs->rs_rbm.rgd;
- ap->allowed = ip->i_rgd->rd_free_clone;
+ free_blocks >= ap->min_target)) {
+ ap->allowed = free_blocks;
return 0;
}
check_rgrp:
@@ -2116,26 +2149,6 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
}
/**
- * gfs2_get_block_type - Check a block in a RG is of given type
- * @rgd: the resource group holding the block
- * @block: the block number
- *
- * Returns: The block type (GFS2_BLKST_*)
- */
-
-static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
-{
- struct gfs2_rbm rbm = { .rgd = rgd, };
- int ret;
-
- ret = gfs2_rbm_from_block(&rbm, block);
- WARN_ON_ONCE(ret != 0);
-
- return gfs2_testbit(&rbm);
-}
-
-
-/**
* gfs2_alloc_extent - allocate an extent from a given bitmap
* @rbm: the resource group information
* @dinode: TRUE if the first block we allocate is for a dinode
@@ -2159,7 +2172,7 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
block++;
while (*n < elen) {
ret = gfs2_rbm_from_block(&pos, block);
- if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
+ if (ret || gfs2_testbit(&pos, true) != GFS2_BLKST_FREE)
break;
gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
gfs2_setbit(&pos, true, GFS2_BLKST_USED);
@@ -2335,7 +2348,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
- struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
+ struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, };
unsigned int ndata;
u64 block; /* block, within the file system scope */
int error;
@@ -2393,7 +2406,6 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
- gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
if (dinode)
@@ -2434,7 +2446,6 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
/* Directories keep their data in the metadata address space */
if (meta || ip->i_depth)
@@ -2471,8 +2482,7 @@ void gfs2_unlink_di(struct inode *inode)
trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
- update_rgrp_lvb_unlinked(rgd, 1);
+ be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
}
void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
@@ -2492,8 +2502,7 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
- gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
- update_rgrp_lvb_unlinked(rgd, -1);
+ be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
gfs2_statfs_change(sdp, 0, +1, -1);
trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
@@ -2516,6 +2525,7 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
{
struct gfs2_rgrpd *rgd;
struct gfs2_holder rgd_gh;
+ struct gfs2_rbm rbm;
int error = -EINVAL;
rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
@@ -2526,7 +2536,11 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
if (error)
goto fail;
- if (gfs2_get_block_type(rgd, no_addr) != type)
+ rbm.rgd = rgd;
+ error = gfs2_rbm_from_block(&rbm, no_addr);
+ WARN_ON_ONCE(error != 0);
+
+ if (gfs2_testbit(&rbm, false) != type)
error = -ESTALE;
gfs2_glock_dq_uninit(&rgd_gh);
@@ -2558,19 +2572,34 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
return;
- if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
- rgd = ip->i_rgd;
- else
+ /*
+ * The resource group last accessed is kept in the last position.
+ */
+
+ if (rlist->rl_rgrps) {
+ rgd = rlist->rl_rgd[rlist->rl_rgrps - 1];
+ if (rgrp_contains_block(rgd, block))
+ return;
rgd = gfs2_blk2rgrpd(sdp, block, 1);
+ } else {
+ rgd = ip->i_res.rs_rbm.rgd;
+ if (!rgd || !rgrp_contains_block(rgd, block))
+ rgd = gfs2_blk2rgrpd(sdp, block, 1);
+ }
+
if (!rgd) {
- fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
+ fs_err(sdp, "rlist_add: no rgrp for block %llu\n",
+ (unsigned long long)block);
return;
}
- ip->i_rgd = rgd;
- for (x = 0; x < rlist->rl_rgrps; x++)
- if (rlist->rl_rgd[x] == rgd)
+ for (x = 0; x < rlist->rl_rgrps; x++) {
+ if (rlist->rl_rgd[x] == rgd) {
+ swap(rlist->rl_rgd[x],
+ rlist->rl_rgd[rlist->rl_rgrps - 1]);
return;
+ }
+ }
if (rlist->rl_rgrps == rlist->rl_space) {
new_space = rlist->rl_space + 10;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index af0d5b01cf0b..c212893534ed 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1729,7 +1729,6 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
if (ip) {
ip->i_flags = 0;
ip->i_gl = NULL;
- ip->i_rgd = NULL;
memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node);
ip->i_rahead = 0;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c191fa58a1df..1787d295834e 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -429,11 +429,18 @@ int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
spin_lock(&sdp->sd_jindex_spin);
rv = -EBUSY;
- if (sdp->sd_jdesc->jd_jid == jid)
+ /**
+ * If we're a spectator, we use journal0, but it's not really ours.
+ * So we need to wait for its recovery too. If we skip it we'd never
+ * queue work to the recovery workqueue, and so its completion would
+ * never clear the DFL_BLOCK_LOCKS flag, so all our locks would
+ * permanently stop working.
+ */
+ if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator)
goto out;
rv = -ENOENT;
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
- if (jd->jd_jid != jid)
+ if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator)
continue;
rv = gfs2_recover_journal(jd, false);
break;
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index cb10b95efe0f..e0025258107a 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -606,7 +606,8 @@ TRACE_EVENT(gfs2_rs,
__entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
__entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
__entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
- __entry->inum = rs->rs_inum;
+ __entry->inum = container_of(rs, struct gfs2_inode,
+ i_res)->i_no_addr;
__entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
__entry->free = rs->rs_free;
__entry->func = func;
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index 1e6e7da25a17..ad70087d0597 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -30,9 +30,11 @@ struct gfs2_glock;
* block, or all of the blocks in the rg, whichever is smaller */
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested)
{
- if (requested < ip->i_rgd->rd_length)
+ struct gfs2_rgrpd *rgd = ip->i_res.rs_rbm.rgd;
+
+ if (requested < rgd->rd_length)
return requested + 1;
- return ip->i_rgd->rd_length;
+ return rgd->rd_length;
}
extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 763d659db91b..59c811de0dc7 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -46,14 +46,16 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, const char *fmt, ...)
test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
return 0;
- va_start(args, fmt);
+ if (fmt) {
+ va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
+ vaf.fmt = fmt;
+ vaf.va = &args;
- fs_err(sdp, "%pV", &vaf);
+ fs_err(sdp, "%pV", &vaf);
- va_end(args);
+ va_end(args);
+ }
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
fs_err(sdp, "about to withdraw this file system\n");
@@ -246,21 +248,21 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
}
/**
- * gfs2_io_error_bh_i - Flag a buffer I/O error and withdraw
- * Returns: -1 if this call withdrew the machine,
- * 0 if it was already withdrawn
+ * gfs2_io_error_bh_i - Flag a buffer I/O error
+ * @withdraw: withdraw the filesystem
*/
-int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file, unsigned int line)
+void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function, char *file, unsigned int line,
+ bool withdraw)
{
- int rv;
- rv = gfs2_lm_withdraw(sdp,
- "fatal: I/O error\n"
- " block = %llu\n"
- " function = %s, file = %s, line = %u\n",
- (unsigned long long)bh->b_blocknr,
- function, file, line);
- return rv;
+ fs_err(sdp,
+ "fatal: I/O error\n"
+ " block = %llu\n"
+ " function = %s, file = %s, line = %u\n",
+ (unsigned long long)bh->b_blocknr,
+ function, file, line);
+ if (withdraw)
+ gfs2_lm_withdraw(sdp, NULL);
}
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 3926f95a6eb7..96ac4aba4738 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -136,11 +136,15 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__);
-int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *function, char *file, unsigned int line);
+void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ const char *function, char *file, unsigned int line,
+ bool withdraw);
+
+#define gfs2_io_error_bh_wd(sdp, bh) \
+gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, true);
#define gfs2_io_error_bh(sdp, bh) \
-gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__);
+gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, false);
extern struct kmem_cache *gfs2_glock_cachep;
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index f2bce1e0f6fb..38515988aaf7 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -343,60 +343,45 @@ struct ea_list {
unsigned int ei_size;
};
-static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
-{
- switch (ea->ea_type) {
- case GFS2_EATYPE_USR:
- return 5 + ea->ea_name_len + 1;
- case GFS2_EATYPE_SYS:
- return 7 + ea->ea_name_len + 1;
- case GFS2_EATYPE_SECURITY:
- return 9 + ea->ea_name_len + 1;
- default:
- return 0;
- }
-}
-
static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
void *private)
{
struct ea_list *ei = private;
struct gfs2_ea_request *er = ei->ei_er;
- unsigned int ea_size = gfs2_ea_strlen(ea);
+ unsigned int ea_size;
+ char *prefix;
+ unsigned int l;
if (ea->ea_type == GFS2_EATYPE_UNUSED)
return 0;
- if (er->er_data_len) {
- char *prefix = NULL;
- unsigned int l = 0;
- char c = 0;
+ switch (ea->ea_type) {
+ case GFS2_EATYPE_USR:
+ prefix = "user.";
+ l = 5;
+ break;
+ case GFS2_EATYPE_SYS:
+ prefix = "system.";
+ l = 7;
+ break;
+ case GFS2_EATYPE_SECURITY:
+ prefix = "security.";
+ l = 9;
+ break;
+ default:
+ BUG();
+ }
+ ea_size = l + ea->ea_name_len + 1;
+ if (er->er_data_len) {
if (ei->ei_size + ea_size > er->er_data_len)
return -ERANGE;
- switch (ea->ea_type) {
- case GFS2_EATYPE_USR:
- prefix = "user.";
- l = 5;
- break;
- case GFS2_EATYPE_SYS:
- prefix = "system.";
- l = 7;
- break;
- case GFS2_EATYPE_SECURITY:
- prefix = "security.";
- l = 9;
- break;
- }
-
- BUG_ON(l == 0);
-
memcpy(er->er_data + ei->ei_size, prefix, l);
memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
ea->ea_name_len);
- memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
+ er->er_data[ei->ei_size + ea_size - 1] = 0;
}
ei->ei_size += ea_size;
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index ad04a5741016..9a8772465a90 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -75,9 +75,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
if (!fd->bnode) {
if (!tree->root)
hfs_btree_inc_height(tree);
- fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
- if (IS_ERR(fd->bnode))
- return PTR_ERR(fd->bnode);
+ node = hfs_bnode_find(tree, tree->leaf_head);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+ fd->bnode = node;
fd->record = -1;
}
new_node = NULL;
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 2a16111d312f..a2dfa1b2a89c 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -541,7 +541,7 @@ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
HFS_I(inode)->rsrc_inode = dir;
HFS_I(dir)->rsrc_inode = inode;
igrab(dir);
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
mark_inode_dirty(inode);
dont_mount(dentry);
out:
diff --git a/fs/hfsplus/Kconfig b/fs/hfsplus/Kconfig
index 7cc8b4acf66a..a63371815aab 100644
--- a/fs/hfsplus/Kconfig
+++ b/fs/hfsplus/Kconfig
@@ -11,18 +11,3 @@ config HFSPLUS_FS
MacOS 8. It includes all Mac specific filesystem data such as
data forks and creator codes, but it also has several UNIX
style features such as file ownership and permissions.
-
-config HFSPLUS_FS_POSIX_ACL
- bool "HFS+ POSIX Access Control Lists"
- depends on HFSPLUS_FS
- select FS_POSIX_ACL
- help
- POSIX Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
-
- It needs to understand that POSIX ACLs are treated only under
- Linux. POSIX ACLs doesn't mean something under Mac OS X.
- Mac OS X beginning with version 10.4 ("Tiger") support NFSv4 ACLs,
- which are part of the NFSv4 standard.
-
- If you don't know what Access Control Lists are, say N
diff --git a/fs/hfsplus/Makefile b/fs/hfsplus/Makefile
index f6a56542f8d7..9ed20e64b983 100644
--- a/fs/hfsplus/Makefile
+++ b/fs/hfsplus/Makefile
@@ -8,5 +8,3 @@ obj-$(CONFIG_HFSPLUS_FS) += hfsplus.o
hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
bnode.o brec.o bfind.o tables.o unicode.o wrapper.o bitmap.o part_tbl.o \
attributes.o xattr.o xattr_user.o xattr_security.o xattr_trusted.o
-
-hfsplus-$(CONFIG_HFSPLUS_FS_POSIX_ACL) += posix_acl.o
diff --git a/fs/hfsplus/acl.h b/fs/hfsplus/acl.h
deleted file mode 100644
index 488c2b75cf41..000000000000
--- a/fs/hfsplus/acl.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/fs/hfsplus/acl.h
- *
- * Vyacheslav Dubeyko <slava@dubeyko.com>
- *
- * Handler for Posix Access Control Lists (ACLs) support.
- */
-
-#include <linux/posix_acl_xattr.h>
-
-#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
-
-/* posix_acl.c */
-struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type);
-int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
- int type);
-extern int hfsplus_init_posix_acl(struct inode *, struct inode *);
-
-#else /* CONFIG_HFSPLUS_FS_POSIX_ACL */
-#define hfsplus_get_posix_acl NULL
-#define hfsplus_set_posix_acl NULL
-
-static inline int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
-{
- return 0;
-}
-#endif /* CONFIG_HFSPLUS_FS_POSIX_ACL */
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 808f4d8c859c..ed8eacb34452 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -73,9 +73,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
if (!fd->bnode) {
if (!tree->root)
hfs_btree_inc_height(tree);
- fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
- if (IS_ERR(fd->bnode))
- return PTR_ERR(fd->bnode);
+ node = hfs_bnode_find(tree, tree->leaf_head);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+ fd->bnode = node;
fd->record = -1;
}
new_node = NULL;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index b5254378f011..f37662675c3a 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -18,7 +18,6 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
#include "xattr.h"
-#include "acl.h"
static inline void hfsplus_instantiate(struct dentry *dentry,
struct inode *inode, u32 cnid)
@@ -78,13 +77,13 @@ again:
cpu_to_be32(HFSP_HARDLINK_TYPE) &&
entry.file.user_info.fdCreator ==
cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
+ HFSPLUS_SB(sb)->hidden_dir &&
(entry.file.create_date ==
HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
create_date ||
entry.file.create_date ==
HFSPLUS_I(d_inode(sb->s_root))->
- create_date) &&
- HFSPLUS_SB(sb)->hidden_dir) {
+ create_date)) {
struct qstr str;
char name[32];
@@ -455,7 +454,7 @@ static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
if (res)
goto out_err;
- res = hfsplus_init_inode_security(inode, dir, &dentry->d_name);
+ res = hfsplus_init_security(inode, dir, &dentry->d_name);
if (res == -EOPNOTSUPP)
res = 0; /* Operation is not supported. */
else if (res) {
@@ -496,7 +495,7 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
if (res)
goto failed_mknod;
- res = hfsplus_init_inode_security(inode, dir, &dentry->d_name);
+ res = hfsplus_init_security(inode, dir, &dentry->d_name);
if (res == -EOPNOTSUPP)
res = 0; /* Operation is not supported. */
else if (res) {
@@ -567,10 +566,6 @@ const struct inode_operations hfsplus_dir_inode_operations = {
.mknod = hfsplus_mknod,
.rename = hfsplus_rename,
.listxattr = hfsplus_listxattr,
-#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
- .get_acl = hfsplus_get_posix_acl,
- .set_acl = hfsplus_set_posix_acl,
-#endif
};
const struct file_operations hfsplus_dir_operations = {
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index e8770935ce6d..8e0f59767694 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -336,6 +336,9 @@ static int hfsplus_free_extents(struct super_block *sb,
int i;
int err = 0;
+ /* Mapping the allocation file may lock the extent tree */
+ WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
+
hfsplus_dump_extent(extent);
for (i = 0; i < 8; extent++, i++) {
count = be32_to_cpu(extent->block_count);
@@ -415,11 +418,13 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid,
if (res)
break;
start = be32_to_cpu(fd.key->ext.start_block);
- hfsplus_free_extents(sb, ext_entry,
- total_blocks - start,
- total_blocks);
hfs_brec_remove(&fd);
+
+ mutex_unlock(&fd.tree->tree_lock);
+ hfsplus_free_extents(sb, ext_entry, total_blocks - start,
+ total_blocks);
total_blocks = start;
+ mutex_lock(&fd.tree->tree_lock);
} while (total_blocks > blocks);
hfs_find_exit(&fd);
@@ -576,15 +581,20 @@ void hfsplus_file_truncate(struct inode *inode)
}
while (1) {
if (alloc_cnt == hip->first_blocks) {
+ mutex_unlock(&fd.tree->tree_lock);
hfsplus_free_extents(sb, hip->first_extents,
alloc_cnt, alloc_cnt - blk_cnt);
hfsplus_dump_extent(hip->first_extents);
hip->first_blocks = blk_cnt;
+ mutex_lock(&fd.tree->tree_lock);
break;
}
res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
if (res)
break;
+ hfs_brec_remove(&fd);
+
+ mutex_unlock(&fd.tree->tree_lock);
start = hip->cached_start;
hfsplus_free_extents(sb, hip->cached_extents,
alloc_cnt - start, alloc_cnt - blk_cnt);
@@ -596,7 +606,7 @@ void hfsplus_file_truncate(struct inode *inode)
alloc_cnt = start;
hip->cached_start = hip->cached_blocks = 0;
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
- hfs_brec_remove(&fd);
+ mutex_lock(&fd.tree->tree_lock);
}
hfs_find_exit(&fd);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index d9255abafb81..8e039435958a 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -31,7 +31,6 @@
#define DBG_EXTENT 0x00000020
#define DBG_BITMAP 0x00000040
#define DBG_ATTR_MOD 0x00000080
-#define DBG_ACL_MOD 0x00000100
#if 0
#define DBG_MASK (DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index c824f702feec..8e9427a42b81 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -21,7 +21,6 @@
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
#include "xattr.h"
-#include "acl.h"
static int hfsplus_readpage(struct file *file, struct page *page)
{
@@ -267,12 +266,6 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
setattr_copy(inode, attr);
mark_inode_dirty(inode);
- if (attr->ia_valid & ATTR_MODE) {
- error = posix_acl_chmod(inode, inode->i_mode);
- if (unlikely(error))
- return error;
- }
-
return 0;
}
@@ -336,10 +329,6 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
static const struct inode_operations hfsplus_file_inode_operations = {
.setattr = hfsplus_setattr,
.listxattr = hfsplus_listxattr,
-#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
- .get_acl = hfsplus_get_posix_acl,
- .set_acl = hfsplus_set_posix_acl,
-#endif
};
static const struct file_operations hfsplus_file_operations = {
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
deleted file mode 100644
index 066114dcc3a2..000000000000
--- a/fs/hfsplus/posix_acl.c
+++ /dev/null
@@ -1,144 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/hfsplus/posix_acl.c
- *
- * Vyacheslav Dubeyko <slava@dubeyko.com>
- *
- * Handler for Posix Access Control Lists (ACLs) support.
- */
-
-#include "hfsplus_fs.h"
-#include "xattr.h"
-#include "acl.h"
-
-struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type)
-{
- struct posix_acl *acl;
- char *xattr_name;
- char *value = NULL;
- ssize_t size;
-
- hfs_dbg(ACL_MOD, "[%s]: ino %lu\n", __func__, inode->i_ino);
-
- switch (type) {
- case ACL_TYPE_ACCESS:
- xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
- break;
- case ACL_TYPE_DEFAULT:
- xattr_name = XATTR_NAME_POSIX_ACL_DEFAULT;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
- size = __hfsplus_getxattr(inode, xattr_name, NULL, 0);
-
- if (size > 0) {
- value = (char *)hfsplus_alloc_attr_entry();
- if (unlikely(!value))
- return ERR_PTR(-ENOMEM);
- size = __hfsplus_getxattr(inode, xattr_name, value, size);
- }
-
- if (size > 0)
- acl = posix_acl_from_xattr(&init_user_ns, value, size);
- else if (size == -ENODATA)
- acl = NULL;
- else
- acl = ERR_PTR(size);
-
- hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value);
-
- return acl;
-}
-
-static int __hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
- int type)
-{
- int err;
- char *xattr_name;
- size_t size = 0;
- char *value = NULL;
-
- hfs_dbg(ACL_MOD, "[%s]: ino %lu\n", __func__, inode->i_ino);
-
- switch (type) {
- case ACL_TYPE_ACCESS:
- xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
- break;
-
- case ACL_TYPE_DEFAULT:
- xattr_name = XATTR_NAME_POSIX_ACL_DEFAULT;
- if (!S_ISDIR(inode->i_mode))
- return acl ? -EACCES : 0;
- break;
-
- default:
- return -EINVAL;
- }
-
- if (acl) {
- size = posix_acl_xattr_size(acl->a_count);
- if (unlikely(size > HFSPLUS_MAX_INLINE_DATA_SIZE))
- return -ENOMEM;
- value = (char *)hfsplus_alloc_attr_entry();
- if (unlikely(!value))
- return -ENOMEM;
- err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
- if (unlikely(err < 0))
- goto end_set_acl;
- }
-
- err = __hfsplus_setxattr(inode, xattr_name, value, size, 0);
-
-end_set_acl:
- hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value);
-
- if (!err)
- set_cached_acl(inode, type, acl);
-
- return err;
-}
-
-int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, int type)
-{
- int err;
-
- if (type == ACL_TYPE_ACCESS && acl) {
- err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
- if (err)
- return err;
- }
- return __hfsplus_set_posix_acl(inode, acl, type);
-}
-
-int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
-{
- int err = 0;
- struct posix_acl *default_acl, *acl;
-
- hfs_dbg(ACL_MOD,
- "[%s]: ino %lu, dir->ino %lu\n",
- __func__, inode->i_ino, dir->i_ino);
-
- if (S_ISLNK(inode->i_mode))
- return 0;
-
- err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
- if (err)
- return err;
-
- if (default_acl) {
- err = __hfsplus_set_posix_acl(inode, default_acl,
- ACL_TYPE_DEFAULT);
- posix_acl_release(default_acl);
- }
-
- if (acl) {
- if (!err)
- err = __hfsplus_set_posix_acl(inode, acl,
- ACL_TYPE_ACCESS);
- posix_acl_release(acl);
- }
- return err;
-}
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index a6c0f54c48c3..eb4535eba95d 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
goto out_put_root;
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
- if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
+ if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
+ err = -EINVAL;
goto out_put_root;
+ }
inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
@@ -562,8 +564,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
goto out_put_hidden_dir;
}
- err = hfsplus_init_inode_security(sbi->hidden_dir,
- root, &str);
+ err = hfsplus_init_security(sbi->hidden_dir,
+ root, &str);
if (err == -EOPNOTSUPP)
err = 0; /* Operation is not supported. */
else if (err) {
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index dfa90c21948f..c8d1b2be7854 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -272,8 +272,8 @@ static inline int asc2unichar(struct super_block *sb, const char *astr, int len,
return size;
}
-/* Decomposes a single unicode character. */
-static inline u16 *decompose_unichar(wchar_t uc, int *size)
+/* Decomposes a non-Hangul unicode character. */
+static u16 *hfsplus_decompose_nonhangul(wchar_t uc, int *size)
{
int off;
@@ -296,6 +296,51 @@ static inline u16 *decompose_unichar(wchar_t uc, int *size)
return hfsplus_decompose_table + (off / 4);
}
+/*
+ * Try to decompose a unicode character as Hangul. Return 0 if @uc is not
+ * precomposed Hangul, otherwise return the length of the decomposition.
+ *
+ * This function was adapted from sample code from the Unicode Standard
+ * Annex #15: Unicode Normalization Forms, version 3.2.0.
+ *
+ * Copyright (C) 1991-2018 Unicode, Inc. All rights reserved. Distributed
+ * under the Terms of Use in http://www.unicode.org/copyright.html.
+ */
+static int hfsplus_try_decompose_hangul(wchar_t uc, u16 *result)
+{
+ int index;
+ int l, v, t;
+
+ index = uc - Hangul_SBase;
+ if (index < 0 || index >= Hangul_SCount)
+ return 0;
+
+ l = Hangul_LBase + index / Hangul_NCount;
+ v = Hangul_VBase + (index % Hangul_NCount) / Hangul_TCount;
+ t = Hangul_TBase + index % Hangul_TCount;
+
+ result[0] = l;
+ result[1] = v;
+ if (t != Hangul_TBase) {
+ result[2] = t;
+ return 3;
+ }
+ return 2;
+}
+
+/* Decomposes a single unicode character. */
+static u16 *decompose_unichar(wchar_t uc, int *size, u16 *hangul_buffer)
+{
+ u16 *result;
+
+ /* Hangul is handled separately */
+ result = hangul_buffer;
+ *size = hfsplus_try_decompose_hangul(uc, result);
+ if (*size == 0)
+ result = hfsplus_decompose_nonhangul(uc, size);
+ return result;
+}
+
int hfsplus_asc2uni(struct super_block *sb,
struct hfsplus_unistr *ustr, int max_unistr_len,
const char *astr, int len)
@@ -303,13 +348,14 @@ int hfsplus_asc2uni(struct super_block *sb,
int size, dsize, decompose;
u16 *dstr, outlen = 0;
wchar_t c;
+ u16 dhangul[3];
decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
while (outlen < max_unistr_len && len > 0) {
size = asc2unichar(sb, astr, len, &c);
if (decompose)
- dstr = decompose_unichar(c, &dsize);
+ dstr = decompose_unichar(c, &dsize, dhangul);
else
dstr = NULL;
if (dstr) {
@@ -344,6 +390,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
unsigned long hash;
wchar_t c;
u16 c2;
+ u16 dhangul[3];
casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
@@ -357,7 +404,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
len -= size;
if (decompose)
- dstr = decompose_unichar(c, &dsize);
+ dstr = decompose_unichar(c, &dsize, dhangul);
else
dstr = NULL;
if (dstr) {
@@ -396,6 +443,7 @@ int hfsplus_compare_dentry(const struct dentry *dentry,
const char *astr1, *astr2;
u16 c1, c2;
wchar_t c;
+ u16 dhangul_1[3], dhangul_2[3];
casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
@@ -413,7 +461,8 @@ int hfsplus_compare_dentry(const struct dentry *dentry,
len1 -= size;
if (decompose)
- dstr1 = decompose_unichar(c, &dsize1);
+ dstr1 = decompose_unichar(c, &dsize1,
+ dhangul_1);
if (!decompose || !dstr1) {
c1 = c;
dstr1 = &c1;
@@ -427,7 +476,8 @@ int hfsplus_compare_dentry(const struct dentry *dentry,
len2 -= size;
if (decompose)
- dstr2 = decompose_unichar(c, &dsize2);
+ dstr2 = decompose_unichar(c, &dsize2,
+ dhangul_2);
if (!decompose || !dstr2) {
c2 = c;
dstr2 = &c2;
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index e538b758c448..d5403b4004c9 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -8,10 +8,8 @@
*/
#include "hfsplus_fs.h"
-#include <linux/posix_acl_xattr.h>
#include <linux/nls.h>
#include "xattr.h"
-#include "acl.h"
static int hfsplus_removexattr(struct inode *inode, const char *name);
@@ -19,10 +17,6 @@ const struct xattr_handler *hfsplus_xattr_handlers[] = {
&hfsplus_xattr_osx_handler,
&hfsplus_xattr_user_handler,
&hfsplus_xattr_trusted_handler,
-#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
- &posix_acl_access_xattr_handler,
- &posix_acl_default_xattr_handler,
-#endif
&hfsplus_xattr_security_handler,
NULL
};
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index a4e611d69710..d14e362b3eba 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -38,7 +38,4 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
int hfsplus_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr);
-int hfsplus_init_inode_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr);
-
#endif
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index f5550b006e88..cfbe6a3bfb1e 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -12,7 +12,6 @@
#include "hfsplus_fs.h"
#include "xattr.h"
-#include "acl.h"
static int hfsplus_security_getxattr(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
@@ -72,18 +71,6 @@ int hfsplus_init_security(struct inode *inode, struct inode *dir,
&hfsplus_initxattrs, NULL);
}
-int hfsplus_init_inode_security(struct inode *inode,
- struct inode *dir,
- const struct qstr *qstr)
-{
- int err;
-
- err = hfsplus_init_posix_acl(inode, dir);
- if (!err)
- err = hfsplus_init_security(inode, dir, qstr);
- return err;
-}
-
const struct xattr_handler hfsplus_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = hfsplus_security_getxattr,
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index cb8374af08a6..33b8423ef0c9 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -19,7 +19,7 @@
#define HOSTFS_ATTR_ATIME_SET 128
#define HOSTFS_ATTR_MTIME_SET 256
-/* These two are unused by hostfs. */
+/* This one is unused by hostfs. */
#define HOSTFS_ATTR_FORCE 512 /* Not a change, but a change it */
#define HOSTFS_ATTR_ATTR_FLAG 1024
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2597b290c2a5..444c7b170359 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -610,33 +610,21 @@ static struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
int err;
inode = hostfs_iget(ino->i_sb);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
+ if (IS_ERR(inode))
goto out;
- }
err = -ENOMEM;
name = dentry_name(dentry);
- if (name == NULL)
- goto out_put;
-
- err = read_name(inode, name);
-
- __putname(name);
- if (err == -ENOENT) {
+ if (name) {
+ err = read_name(inode, name);
+ __putname(name);
+ }
+ if (err) {
iput(inode);
- inode = NULL;
+ inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
}
- else if (err)
- goto out_put;
-
- d_add(dentry, inode);
- return NULL;
-
- out_put:
- iput(inode);
out:
- return ERR_PTR(err);
+ return d_splice_alias(inode, dentry);
}
static int hostfs_link(struct dentry *to, struct inode *ino,
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index c83ece7facc5..d85230c84ef2 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -244,6 +244,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
result = iget_locked(dir->i_sb, ino);
if (!result) {
hpfs_error(dir->i_sb, "hpfs_lookup: can't get inode");
+ result = ERR_PTR(-ENOMEM);
goto bail1;
}
if (result->i_state & I_NEW) {
@@ -266,6 +267,8 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
if (de->has_acl || de->has_xtd_perm) if (!sb_rdonly(dir->i_sb)) {
hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
+ iput(result);
+ result = ERR_PTR(-EINVAL);
goto bail1;
}
@@ -301,29 +304,17 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
}
}
+bail1:
hpfs_brelse4(&qbh);
/*
* Made it.
*/
- end:
- end_add:
+end:
+end_add:
hpfs_unlock(dir->i_sb);
- d_add(dentry, result);
- return NULL;
-
- /*
- * Didn't.
- */
- bail1:
-
- hpfs_brelse4(&qbh);
-
- /*bail:*/
-
- hpfs_unlock(dir->i_sb);
- return ERR_PTR(-ENOENT);
+ return d_splice_alias(result, dentry);
}
const struct file_operations hpfs_dir_ops =
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 2a153aed4c19..ab2e7cc2ff33 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -334,16 +334,23 @@ long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg);
* local time (HPFS) to GMT (Unix)
*/
-static inline time_t local_to_gmt(struct super_block *s, time32_t t)
+static inline time64_t local_to_gmt(struct super_block *s, time32_t t)
{
extern struct timezone sys_tz;
return t + sys_tz.tz_minuteswest * 60 + hpfs_sb(s)->sb_timeshift;
}
-static inline time32_t gmt_to_local(struct super_block *s, time_t t)
+static inline time32_t gmt_to_local(struct super_block *s, time64_t t)
{
extern struct timezone sys_tz;
- return t - sys_tz.tz_minuteswest * 60 - hpfs_sb(s)->sb_timeshift;
+ t = t - sys_tz.tz_minuteswest * 60 - hpfs_sb(s)->sb_timeshift;
+
+ return clamp_t(time64_t, t, 0, U32_MAX);
+}
+
+static inline time32_t local_get_seconds(struct super_block *s)
+{
+ return gmt_to_local(s, ktime_get_real_seconds());
}
/*
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index a3615e4c730d..1aee39160ac5 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -11,7 +11,7 @@
static void hpfs_update_directory_times(struct inode *dir)
{
- time_t t = get_seconds();
+ time64_t t = local_to_gmt(dir->i_sb, local_get_seconds(dir->i_sb));
if (t == dir->i_mtime.tv_sec &&
t == dir->i_ctime.tv_sec)
return;
@@ -50,7 +50,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
/*dee.archive = 0;*/
dee.hidden = name[0] == '.';
dee.fnode = cpu_to_le32(fno);
- dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
+ dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
if (!result)
goto bail2;
@@ -91,7 +91,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
dnode->root_dnode = 1;
dnode->up = cpu_to_le32(fno);
de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0);
- de->creation_date = de->write_date = de->read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
+ de->creation_date = de->write_date = de->read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
if (!(mode & 0222)) de->read_only = 1;
de->first = de->directory = 1;
/*de->hidden = de->system = 0;*/
@@ -151,7 +151,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, b
dee.archive = 1;
dee.hidden = name[0] == '.';
dee.fnode = cpu_to_le32(fno);
- dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
+ dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
if (!result)
@@ -238,7 +238,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, de
dee.archive = 1;
dee.hidden = name[0] == '.';
dee.fnode = cpu_to_le32(fno);
- dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
+ dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
if (!result)
@@ -314,7 +314,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
dee.archive = 1;
dee.hidden = name[0] == '.';
dee.fnode = cpu_to_le32(fno);
- dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
+ dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(local_get_seconds(dir->i_sb));
result = new_inode(dir->i_sb);
if (!result)
@@ -565,7 +565,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
err = -EFSERROR;
goto end1;
}
- err = r == 2 ? -ENOSPC : r == 1 ? -EFSERROR : 0;
+ err = -ENOSPC;
goto end1;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 40d4c66c7751..32920a10100e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -410,7 +410,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
int i, freed = 0;
bool truncate_op = (lend == LLONG_MAX);
- memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
vma_init(&pseudo_vma, current->mm);
pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
pagevec_init(&pvec);
@@ -595,7 +594,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
* allocation routines. If NUMA is configured, use page index
* as input to create an allocation policy.
*/
- memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
vma_init(&pseudo_vma, mm);
pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
pseudo_vma.vm_file = file;
@@ -1310,10 +1308,6 @@ static int get_hstate_idx(int page_size_log)
return h - hstates;
}
-static const struct dentry_operations anon_ops = {
- .d_dname = simple_dname
-};
-
/*
* Note that size should be aligned to proper hugepage size in caller side,
* otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
@@ -1322,19 +1316,18 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
vm_flags_t acctflag, struct user_struct **user,
int creat_flags, int page_size_log)
{
- struct file *file = ERR_PTR(-ENOMEM);
struct inode *inode;
- struct path path;
- struct super_block *sb;
- struct qstr quick_string;
+ struct vfsmount *mnt;
int hstate_idx;
+ struct file *file;
hstate_idx = get_hstate_idx(page_size_log);
if (hstate_idx < 0)
return ERR_PTR(-ENODEV);
*user = NULL;
- if (!hugetlbfs_vfsmount[hstate_idx])
+ mnt = hugetlbfs_vfsmount[hstate_idx];
+ if (!mnt)
return ERR_PTR(-ENOENT);
if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
@@ -1350,45 +1343,28 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
}
}
- sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
- quick_string.name = name;
- quick_string.len = strlen(quick_string.name);
- quick_string.hash = 0;
- path.dentry = d_alloc_pseudo(sb, &quick_string);
- if (!path.dentry)
- goto out_shm_unlock;
-
- d_set_d_op(path.dentry, &anon_ops);
- path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
file = ERR_PTR(-ENOSPC);
- inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
+ inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
if (!inode)
- goto out_dentry;
+ goto out;
if (creat_flags == HUGETLB_SHMFS_INODE)
inode->i_flags |= S_PRIVATE;
- file = ERR_PTR(-ENOMEM);
- if (hugetlb_reserve_pages(inode, 0,
- size >> huge_page_shift(hstate_inode(inode)), NULL,
- acctflag))
- goto out_inode;
-
- d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode);
- file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
- &hugetlbfs_file_operations);
- if (IS_ERR(file))
- goto out_dentry; /* inode is already attached */
-
- return file;
+ if (hugetlb_reserve_pages(inode, 0,
+ size >> huge_page_shift(hstate_inode(inode)), NULL,
+ acctflag))
+ file = ERR_PTR(-ENOMEM);
+ else
+ file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
+ &hugetlbfs_file_operations);
+ if (!IS_ERR(file))
+ return file;
-out_inode:
iput(inode);
-out_dentry:
- path_put(&path);
-out_shm_unlock:
+out:
if (*user) {
user_shm_unlock(size, *user);
*user = NULL;
diff --git a/fs/inode.c b/fs/inode.c
index 8c86c809ca17..42f6d25f32a5 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -804,6 +804,10 @@ repeat:
__wait_on_freeing_inode(inode);
goto repeat;
}
+ if (unlikely(inode->i_state & I_CREATING)) {
+ spin_unlock(&inode->i_lock);
+ return ERR_PTR(-ESTALE);
+ }
__iget(inode);
spin_unlock(&inode->i_lock);
return inode;
@@ -831,6 +835,10 @@ repeat:
__wait_on_freeing_inode(inode);
goto repeat;
}
+ if (unlikely(inode->i_state & I_CREATING)) {
+ spin_unlock(&inode->i_lock);
+ return ERR_PTR(-ESTALE);
+ }
__iget(inode);
spin_unlock(&inode->i_lock);
return inode;
@@ -961,13 +969,26 @@ void unlock_new_inode(struct inode *inode)
lockdep_annotate_inode_mutex_key(inode);
spin_lock(&inode->i_lock);
WARN_ON(!(inode->i_state & I_NEW));
- inode->i_state &= ~I_NEW;
+ inode->i_state &= ~I_NEW & ~I_CREATING;
smp_mb();
wake_up_bit(&inode->i_state, __I_NEW);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL(unlock_new_inode);
+void discard_new_inode(struct inode *inode)
+{
+ lockdep_annotate_inode_mutex_key(inode);
+ spin_lock(&inode->i_lock);
+ WARN_ON(!(inode->i_state & I_NEW));
+ inode->i_state &= ~I_NEW;
+ smp_mb();
+ wake_up_bit(&inode->i_state, __I_NEW);
+ spin_unlock(&inode->i_lock);
+ iput(inode);
+}
+EXPORT_SYMBOL(discard_new_inode);
+
/**
* lock_two_nondirectories - take two i_mutexes on non-directory objects
*
@@ -1029,6 +1050,7 @@ struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
struct inode *old;
+ bool creating = inode->i_state & I_CREATING;
again:
spin_lock(&inode_hash_lock);
@@ -1039,6 +1061,8 @@ again:
* Use the old inode instead of the preallocated one.
*/
spin_unlock(&inode_hash_lock);
+ if (IS_ERR(old))
+ return NULL;
wait_on_inode(old);
if (unlikely(inode_unhashed(old))) {
iput(old);
@@ -1060,6 +1084,8 @@ again:
inode->i_state |= I_NEW;
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
+ if (!creating)
+ inode_sb_list_add(inode);
unlock:
spin_unlock(&inode_hash_lock);
@@ -1094,12 +1120,13 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
struct inode *inode = ilookup5(sb, hashval, test, data);
if (!inode) {
- struct inode *new = new_inode(sb);
+ struct inode *new = alloc_inode(sb);
if (new) {
+ new->i_state = 0;
inode = inode_insert5(new, hashval, test, set, data);
if (unlikely(inode != new))
- iput(new);
+ destroy_inode(new);
}
}
return inode;
@@ -1128,6 +1155,8 @@ again:
inode = find_inode_fast(sb, head, ino);
spin_unlock(&inode_hash_lock);
if (inode) {
+ if (IS_ERR(inode))
+ return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
@@ -1165,6 +1194,8 @@ again:
*/
spin_unlock(&inode_hash_lock);
destroy_inode(inode);
+ if (IS_ERR(old))
+ return NULL;
inode = old;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
@@ -1282,7 +1313,7 @@ struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
inode = find_inode(sb, head, test, data);
spin_unlock(&inode_hash_lock);
- return inode;
+ return IS_ERR(inode) ? NULL : inode;
}
EXPORT_SYMBOL(ilookup5_nowait);
@@ -1338,6 +1369,8 @@ again:
spin_unlock(&inode_hash_lock);
if (inode) {
+ if (IS_ERR(inode))
+ return NULL;
wait_on_inode(inode);
if (unlikely(inode_unhashed(inode))) {
iput(inode);
@@ -1421,12 +1454,17 @@ int insert_inode_locked(struct inode *inode)
}
if (likely(!old)) {
spin_lock(&inode->i_lock);
- inode->i_state |= I_NEW;
+ inode->i_state |= I_NEW | I_CREATING;
hlist_add_head(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
}
+ if (unlikely(old->i_state & I_CREATING)) {
+ spin_unlock(&old->i_lock);
+ spin_unlock(&inode_hash_lock);
+ return -EBUSY;
+ }
__iget(old);
spin_unlock(&old->i_lock);
spin_unlock(&inode_hash_lock);
@@ -1443,7 +1481,10 @@ EXPORT_SYMBOL(insert_inode_locked);
int insert_inode_locked4(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
- struct inode *old = inode_insert5(inode, hashval, test, NULL, data);
+ struct inode *old;
+
+ inode->i_state |= I_CREATING;
+ old = inode_insert5(inode, hashval, test, NULL, data);
if (old != inode) {
iput(old);
@@ -1555,49 +1596,16 @@ sector_t bmap(struct inode *inode, sector_t block)
EXPORT_SYMBOL(bmap);
/*
- * Update times in overlayed inode from underlying real inode
- */
-static void update_ovl_inode_times(struct dentry *dentry, struct inode *inode,
- bool rcu)
-{
- struct dentry *upperdentry;
-
- /*
- * Nothing to do if in rcu or if non-overlayfs
- */
- if (rcu || likely(!(dentry->d_flags & DCACHE_OP_REAL)))
- return;
-
- upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER);
-
- /*
- * If file is on lower then we can't update atime, so no worries about
- * stale mtime/ctime.
- */
- if (upperdentry) {
- struct inode *realinode = d_inode(upperdentry);
-
- if ((!timespec64_equal(&inode->i_mtime, &realinode->i_mtime) ||
- !timespec64_equal(&inode->i_ctime, &realinode->i_ctime))) {
- inode->i_mtime = realinode->i_mtime;
- inode->i_ctime = realinode->i_ctime;
- }
- }
-}
-
-/*
* With relative atime, only update atime if the previous atime is
* earlier than either the ctime or mtime or if at least a day has
* passed since the last atime update.
*/
-static int relatime_need_update(const struct path *path, struct inode *inode,
- struct timespec now, bool rcu)
+static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
+ struct timespec now)
{
- if (!(path->mnt->mnt_flags & MNT_RELATIME))
+ if (!(mnt->mnt_flags & MNT_RELATIME))
return 1;
-
- update_ovl_inode_times(path->dentry, inode, rcu);
/*
* Is mtime younger than atime? If yes, update atime:
*/
@@ -1668,8 +1676,7 @@ static int update_time(struct inode *inode, struct timespec64 *time, int flags)
* This function automatically handles read only file systems and media,
* as well as the "noatime" flag and inode specific "noatime" markers.
*/
-bool __atime_needs_update(const struct path *path, struct inode *inode,
- bool rcu)
+bool atime_needs_update(const struct path *path, struct inode *inode)
{
struct vfsmount *mnt = path->mnt;
struct timespec64 now;
@@ -1695,7 +1702,7 @@ bool __atime_needs_update(const struct path *path, struct inode *inode,
now = current_time(inode);
- if (!relatime_need_update(path, inode, timespec64_to_timespec(now), rcu))
+ if (!relatime_need_update(mnt, inode, timespec64_to_timespec(now)))
return false;
if (timespec64_equal(&inode->i_atime, &now))
@@ -1710,7 +1717,7 @@ void touch_atime(const struct path *path)
struct inode *inode = d_inode(path->dentry);
struct timespec64 now;
- if (!__atime_needs_update(path, inode, false))
+ if (!atime_needs_update(path, inode))
return;
if (!sb_start_write_trylock(inode->i_sb))
diff --git a/fs/internal.h b/fs/internal.h
index 5645b4ebf494..d410186bc369 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -43,6 +43,8 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
extern void guard_bio_eod(int rw, struct bio *bio);
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block, struct iomap *iomap);
+int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied,
+ struct page *page);
/*
* char_dev.c
@@ -80,10 +82,8 @@ extern void __init mnt_init(void);
extern int __mnt_want_write(struct vfsmount *);
extern int __mnt_want_write_file(struct file *);
-extern int mnt_want_write_file_path(struct file *);
extern void __mnt_drop_write(struct vfsmount *);
extern void __mnt_drop_write_file(struct file *);
-extern void mnt_drop_write_file_path(struct file *);
/*
* fs_struct.c
@@ -93,7 +93,8 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
/*
* file_table.c
*/
-extern struct file *get_empty_filp(void);
+extern struct file *alloc_empty_file(int, const struct cred *);
+extern struct file *alloc_empty_file_noaccount(int, const struct cred *);
/*
* super.c
@@ -125,8 +126,7 @@ int do_fchmodat(int dfd, const char __user *filename, umode_t mode);
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
int flag);
-extern int open_check_o_direct(struct file *f);
-extern int vfs_open(const struct path *, struct file *, const struct cred *);
+extern int vfs_open(const struct path *, struct file *);
/*
* inode.c
@@ -135,13 +135,6 @@ extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
extern void inode_add_lru(struct inode *inode);
extern int dentry_needs_remove_privs(struct dentry *dentry);
-extern bool __atime_needs_update(const struct path *, struct inode *, bool);
-static inline bool atime_needs_update_rcu(const struct path *path,
- struct inode *inode)
-{
- return __atime_needs_update(path, inode, true);
-}
-
/*
* fs-writeback.c
*/
@@ -184,7 +177,6 @@ extern const struct dentry_operations ns_dentry_operations;
*/
extern int do_vfs_ioctl(struct file *file, unsigned int fd, unsigned int cmd,
unsigned long arg);
-extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/*
* iomap support:
diff --git a/fs/ioctl.c b/fs/ioctl.c
index b445b13fc59b..3212c29235ce 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -49,6 +49,7 @@ long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
out:
return error;
}
+EXPORT_SYMBOL(vfs_ioctl);
static int ioctl_fibmap(struct file *filp, int __user *p)
{
diff --git a/fs/iomap.c b/fs/iomap.c
index 0d0bd8845586..74762b1ec233 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016 Christoph Hellwig.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -17,7 +17,9 @@
#include <linux/iomap.h>
#include <linux/uaccess.h>
#include <linux/gfp.h>
+#include <linux/migrate.h>
#include <linux/mm.h>
+#include <linux/mm_inline.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
@@ -103,6 +105,467 @@ iomap_sector(struct iomap *iomap, loff_t pos)
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
+static struct iomap_page *
+iomap_page_create(struct inode *inode, struct page *page)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (iop || i_blocksize(inode) == PAGE_SIZE)
+ return iop;
+
+ iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
+ atomic_set(&iop->read_count, 0);
+ atomic_set(&iop->write_count, 0);
+ bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+ set_page_private(page, (unsigned long)iop);
+ SetPagePrivate(page);
+ return iop;
+}
+
+static void
+iomap_page_release(struct page *page)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (!iop)
+ return;
+ WARN_ON_ONCE(atomic_read(&iop->read_count));
+ WARN_ON_ONCE(atomic_read(&iop->write_count));
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ kfree(iop);
+}
+
+/*
+ * Calculate the range inside the page that we actually need to read.
+ */
+static void
+iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
+ loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+{
+ unsigned block_bits = inode->i_blkbits;
+ unsigned block_size = (1 << block_bits);
+ unsigned poff = offset_in_page(*pos);
+ unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+ unsigned first = poff >> block_bits;
+ unsigned last = (poff + plen - 1) >> block_bits;
+ unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
+
+ /*
+ * If the block size is smaller than the page size we need to check the
+ * per-block uptodate status and adjust the offset and length if needed
+ * to avoid reading in already uptodate ranges.
+ */
+ if (iop) {
+ unsigned int i;
+
+ /* move forward for each leading block marked uptodate */
+ for (i = first; i <= last; i++) {
+ if (!test_bit(i, iop->uptodate))
+ break;
+ *pos += block_size;
+ poff += block_size;
+ plen -= block_size;
+ first++;
+ }
+
+ /* truncate len if we find any trailing uptodate block(s) */
+ for ( ; i <= last; i++) {
+ if (test_bit(i, iop->uptodate)) {
+ plen -= (last - i + 1) * block_size;
+ last = i - 1;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If the extent spans the block that contains the i_size we need to
+ * handle both halves separately so that we properly zero data in the
+ * page cache for blocks that are entirely outside of i_size.
+ */
+ if (first <= end && last > end)
+ plen -= (last - end) * block_size;
+
+ *offp = poff;
+ *lenp = plen;
+}
+
+static void
+iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct inode *inode = page->mapping->host;
+ unsigned first = off >> inode->i_blkbits;
+ unsigned last = (off + len - 1) >> inode->i_blkbits;
+ unsigned int i;
+ bool uptodate = true;
+
+ if (iop) {
+ for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
+ if (i >= first && i <= last)
+ set_bit(i, iop->uptodate);
+ else if (!test_bit(i, iop->uptodate))
+ uptodate = false;
+ }
+ }
+
+ if (uptodate && !PageError(page))
+ SetPageUptodate(page);
+}
+
+static void
+iomap_read_finish(struct iomap_page *iop, struct page *page)
+{
+ if (!iop || atomic_dec_and_test(&iop->read_count))
+ unlock_page(page);
+}
+
+static void
+iomap_read_page_end_io(struct bio_vec *bvec, int error)
+{
+ struct page *page = bvec->bv_page;
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (unlikely(error)) {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ } else {
+ iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+ }
+
+ iomap_read_finish(iop, page);
+}
+
+static void
+iomap_read_inline_data(struct inode *inode, struct page *page,
+ struct iomap *iomap)
+{
+ size_t size = i_size_read(inode);
+ void *addr;
+
+ if (PageUptodate(page))
+ return;
+
+ BUG_ON(page->index);
+ BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ addr = kmap_atomic(page);
+ memcpy(addr, iomap->inline_data, size);
+ memset(addr + size, 0, PAGE_SIZE - size);
+ kunmap_atomic(addr);
+ SetPageUptodate(page);
+}
+
+static void
+iomap_read_end_io(struct bio *bio)
+{
+ int error = blk_status_to_errno(bio->bi_status);
+ struct bio_vec *bvec;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i)
+ iomap_read_page_end_io(bvec, error);
+ bio_put(bio);
+}
+
+struct iomap_readpage_ctx {
+ struct page *cur_page;
+ bool cur_page_in_bio;
+ bool is_readahead;
+ struct bio *bio;
+ struct list_head *pages;
+};
+
+static loff_t
+iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ struct iomap_readpage_ctx *ctx = data;
+ struct page *page = ctx->cur_page;
+ struct iomap_page *iop = iomap_page_create(inode, page);
+ bool is_contig = false;
+ loff_t orig_pos = pos;
+ unsigned poff, plen;
+ sector_t sector;
+
+ if (iomap->type == IOMAP_INLINE) {
+ WARN_ON_ONCE(pos);
+ iomap_read_inline_data(inode, page, iomap);
+ return PAGE_SIZE;
+ }
+
+ /* zero post-eof blocks as the page may be mapped */
+ iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
+ if (plen == 0)
+ goto done;
+
+ if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
+ zero_user(page, poff, plen);
+ iomap_set_range_uptodate(page, poff, plen);
+ goto done;
+ }
+
+ ctx->cur_page_in_bio = true;
+
+ /*
+ * Try to merge into a previous segment if we can.
+ */
+ sector = iomap_sector(iomap, pos);
+ if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
+ if (__bio_try_merge_page(ctx->bio, page, plen, poff))
+ goto done;
+ is_contig = true;
+ }
+
+ /*
+ * If we start a new segment we need to increase the read count, and we
+ * need to do so before submitting any previous full bio to make sure
+ * that we don't prematurely unlock the page.
+ */
+ if (iop)
+ atomic_inc(&iop->read_count);
+
+ if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
+ gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ if (ctx->bio)
+ submit_bio(ctx->bio);
+
+ if (ctx->is_readahead) /* same as readahead_gfp_mask */
+ gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
+ ctx->bio->bi_opf = REQ_OP_READ;
+ if (ctx->is_readahead)
+ ctx->bio->bi_opf |= REQ_RAHEAD;
+ ctx->bio->bi_iter.bi_sector = sector;
+ bio_set_dev(ctx->bio, iomap->bdev);
+ ctx->bio->bi_end_io = iomap_read_end_io;
+ }
+
+ __bio_add_page(ctx->bio, page, plen, poff);
+done:
+ /*
+ * Move the caller beyond our range so that it keeps making progress.
+ * For that we have to include any leading non-uptodate ranges, but
+ * we can skip trailing ones as they will be handled in the next
+ * iteration.
+ */
+ return pos - orig_pos + plen;
+}
+
+int
+iomap_readpage(struct page *page, const struct iomap_ops *ops)
+{
+ struct iomap_readpage_ctx ctx = { .cur_page = page };
+ struct inode *inode = page->mapping->host;
+ unsigned poff;
+ loff_t ret;
+
+ for (poff = 0; poff < PAGE_SIZE; poff += ret) {
+ ret = iomap_apply(inode, page_offset(page) + poff,
+ PAGE_SIZE - poff, 0, ops, &ctx,
+ iomap_readpage_actor);
+ if (ret <= 0) {
+ WARN_ON_ONCE(ret == 0);
+ SetPageError(page);
+ break;
+ }
+ }
+
+ if (ctx.bio) {
+ submit_bio(ctx.bio);
+ WARN_ON_ONCE(!ctx.cur_page_in_bio);
+ } else {
+ WARN_ON_ONCE(ctx.cur_page_in_bio);
+ unlock_page(page);
+ }
+
+ /*
+ * Just like mpage_readpages and block_read_full_page we always
+ * return 0 and just mark the page as PageError on errors. This
+ * should be cleaned up all through the stack eventually.
+ */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_readpage);
+
+static struct page *
+iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
+ loff_t length, loff_t *done)
+{
+ while (!list_empty(pages)) {
+ struct page *page = lru_to_page(pages);
+
+ if (page_offset(page) >= (u64)pos + length)
+ break;
+
+ list_del(&page->lru);
+ if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
+ GFP_NOFS))
+ return page;
+
+ /*
+ * If we already have a page in the page cache at index we are
+ * done. Upper layers don't care if it is uptodate after the
+ * readpages call itself as every page gets checked again once
+ * actually needed.
+ */
+ *done += PAGE_SIZE;
+ put_page(page);
+ }
+
+ return NULL;
+}
+
+static loff_t
+iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ struct iomap_readpage_ctx *ctx = data;
+ loff_t done, ret;
+
+ for (done = 0; done < length; done += ret) {
+ if (ctx->cur_page && offset_in_page(pos + done) == 0) {
+ if (!ctx->cur_page_in_bio)
+ unlock_page(ctx->cur_page);
+ put_page(ctx->cur_page);
+ ctx->cur_page = NULL;
+ }
+ if (!ctx->cur_page) {
+ ctx->cur_page = iomap_next_page(inode, ctx->pages,
+ pos, length, &done);
+ if (!ctx->cur_page)
+ break;
+ ctx->cur_page_in_bio = false;
+ }
+ ret = iomap_readpage_actor(inode, pos + done, length - done,
+ ctx, iomap);
+ }
+
+ return done;
+}
+
+int
+iomap_readpages(struct address_space *mapping, struct list_head *pages,
+ unsigned nr_pages, const struct iomap_ops *ops)
+{
+ struct iomap_readpage_ctx ctx = {
+ .pages = pages,
+ .is_readahead = true,
+ };
+ loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
+ loff_t last = page_offset(list_entry(pages->next, struct page, lru));
+ loff_t length = last - pos + PAGE_SIZE, ret = 0;
+
+ while (length > 0) {
+ ret = iomap_apply(mapping->host, pos, length, 0, ops,
+ &ctx, iomap_readpages_actor);
+ if (ret <= 0) {
+ WARN_ON_ONCE(ret == 0);
+ goto done;
+ }
+ pos += ret;
+ length -= ret;
+ }
+ ret = 0;
+done:
+ if (ctx.bio)
+ submit_bio(ctx.bio);
+ if (ctx.cur_page) {
+ if (!ctx.cur_page_in_bio)
+ unlock_page(ctx.cur_page);
+ put_page(ctx.cur_page);
+ }
+
+ /*
+ * Check that we didn't lose a page due to the arcance calling
+ * conventions..
+ */
+ WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iomap_readpages);
+
+int
+iomap_is_partially_uptodate(struct page *page, unsigned long from,
+ unsigned long count)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct inode *inode = page->mapping->host;
+ unsigned first = from >> inode->i_blkbits;
+ unsigned last = (from + count - 1) >> inode->i_blkbits;
+ unsigned i;
+
+ if (iop) {
+ for (i = first; i <= last; i++)
+ if (!test_bit(i, iop->uptodate))
+ return 0;
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
+
+int
+iomap_releasepage(struct page *page, gfp_t gfp_mask)
+{
+ /*
+ * mm accommodates an old ext3 case where clean pages might not have had
+ * the dirty bit cleared. Thus, it can send actual dirty pages to
+ * ->releasepage() via shrink_active_list(), skip those here.
+ */
+ if (PageDirty(page) || PageWriteback(page))
+ return 0;
+ iomap_page_release(page);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(iomap_releasepage);
+
+void
+iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
+{
+ /*
+ * If we are invalidating the entire page, clear the dirty state from it
+ * and release it to avoid unnecessary buildup of the LRU.
+ */
+ if (offset == 0 && len == PAGE_SIZE) {
+ WARN_ON_ONCE(PageWriteback(page));
+ cancel_dirty_page(page);
+ iomap_page_release(page);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_invalidatepage);
+
+#ifdef CONFIG_MIGRATION
+int
+iomap_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{
+ int ret;
+
+ ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+ if (ret != MIGRATEPAGE_SUCCESS)
+ return ret;
+
+ if (page_has_private(page)) {
+ ClearPagePrivate(page);
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ SetPagePrivate(newpage);
+ }
+
+ if (mode != MIGRATE_SYNC_NO_COPY)
+ migrate_page_copy(newpage, page);
+ else
+ migrate_page_states(newpage, page);
+ return MIGRATEPAGE_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(iomap_migrate_page);
+#endif /* CONFIG_MIGRATION */
+
static void
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
{
@@ -117,6 +580,61 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
}
static int
+iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
+ unsigned poff, unsigned plen, unsigned from, unsigned to,
+ struct iomap *iomap)
+{
+ struct bio_vec bvec;
+ struct bio bio;
+
+ if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
+ zero_user_segments(page, poff, from, to, poff + plen);
+ iomap_set_range_uptodate(page, poff, plen);
+ return 0;
+ }
+
+ bio_init(&bio, &bvec, 1);
+ bio.bi_opf = REQ_OP_READ;
+ bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
+ bio_set_dev(&bio, iomap->bdev);
+ __bio_add_page(&bio, page, plen, poff);
+ return submit_bio_wait(&bio);
+}
+
+static int
+__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
+ struct page *page, struct iomap *iomap)
+{
+ struct iomap_page *iop = iomap_page_create(inode, page);
+ loff_t block_size = i_blocksize(inode);
+ loff_t block_start = pos & ~(block_size - 1);
+ loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
+ unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+ int status = 0;
+
+ if (PageUptodate(page))
+ return 0;
+
+ do {
+ iomap_adjust_read_range(inode, iop, &block_start,
+ block_end - block_start, &poff, &plen);
+ if (plen == 0)
+ break;
+
+ if ((from > poff && from < poff + plen) ||
+ (to > poff && to < poff + plen)) {
+ status = iomap_read_page_sync(inode, block_start, page,
+ poff, plen, from, to, iomap);
+ if (status)
+ break;
+ }
+
+ } while ((block_start += plen) < block_end);
+
+ return status;
+}
+
+static int
iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
struct page **pagep, struct iomap *iomap)
{
@@ -133,7 +651,12 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
if (!page)
return -ENOMEM;
- status = __block_write_begin_int(page, pos, len, NULL, iomap);
+ if (iomap->type == IOMAP_INLINE)
+ iomap_read_inline_data(inode, page, iomap);
+ else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
+ status = __block_write_begin_int(page, pos, len, NULL, iomap);
+ else
+ status = __iomap_write_begin(inode, pos, len, page, iomap);
if (unlikely(status)) {
unlock_page(page);
put_page(page);
@@ -146,14 +669,93 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
return status;
}
+int
+iomap_set_page_dirty(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ int newly_dirty;
+
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
+
+ /*
+ * Lock out page->mem_cgroup migration to keep PageDirty
+ * synchronized with per-memcg dirty page counters.
+ */
+ lock_page_memcg(page);
+ newly_dirty = !TestSetPageDirty(page);
+ if (newly_dirty)
+ __set_page_dirty(page, mapping, 0);
+ unlock_page_memcg(page);
+
+ if (newly_dirty)
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return newly_dirty;
+}
+EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
+
+static int
+__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
+ unsigned copied, struct page *page, struct iomap *iomap)
+{
+ flush_dcache_page(page);
+
+ /*
+ * The blocks that were entirely written will now be uptodate, so we
+ * don't have to worry about a readpage reading them and overwriting a
+ * partial write. However if we have encountered a short write and only
+ * partially written into a block, it will not be marked uptodate, so a
+ * readpage might come in and destroy our partial write.
+ *
+ * Do the simplest thing, and just treat any short write to a non
+ * uptodate page as a zero-length write, and force the caller to redo
+ * the whole thing.
+ */
+ if (unlikely(copied < len && !PageUptodate(page))) {
+ copied = 0;
+ } else {
+ iomap_set_range_uptodate(page, offset_in_page(pos), len);
+ iomap_set_page_dirty(page);
+ }
+ return __generic_write_end(inode, pos, copied, page);
+}
+
+static int
+iomap_write_end_inline(struct inode *inode, struct page *page,
+ struct iomap *iomap, loff_t pos, unsigned copied)
+{
+ void *addr;
+
+ WARN_ON_ONCE(!PageUptodate(page));
+ BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ addr = kmap_atomic(page);
+ memcpy(iomap->inline_data + pos, addr + pos, copied);
+ kunmap_atomic(addr);
+
+ mark_inode_dirty(inode);
+ __generic_write_end(inode, pos, copied, page);
+ return copied;
+}
+
static int
iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page)
+ unsigned copied, struct page *page, struct iomap *iomap)
{
int ret;
- ret = generic_write_end(NULL, inode->i_mapping, pos, len,
- copied, page, NULL);
+ if (iomap->type == IOMAP_INLINE) {
+ ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
+ } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+ ret = generic_write_end(NULL, inode->i_mapping, pos, len,
+ copied, page, NULL);
+ } else {
+ ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
+ }
+
+ if (iomap->page_done)
+ iomap->page_done(inode, pos, copied, page, iomap);
+
if (ret < len)
iomap_write_failed(inode, pos, len);
return ret;
@@ -174,7 +776,7 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
- offset = (pos & (PAGE_SIZE - 1));
+ offset = offset_in_page(pos);
bytes = min_t(unsigned long, PAGE_SIZE - offset,
iov_iter_count(i));
again:
@@ -208,7 +810,8 @@ again:
flush_dcache_page(page);
- status = iomap_write_end(inode, pos, bytes, copied, page);
+ status = iomap_write_end(inode, pos, bytes, copied, page,
+ iomap);
if (unlikely(status < 0))
break;
copied = status;
@@ -287,7 +890,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
- offset = (pos & (PAGE_SIZE - 1));
+ offset = offset_in_page(pos);
bytes = min_t(loff_t, PAGE_SIZE - offset, length);
rpage = __iomap_read_page(inode, pos);
@@ -302,7 +905,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
WARN_ON_ONCE(!PageUptodate(page));
- status = iomap_write_end(inode, pos, bytes, bytes, page);
+ status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
if (unlikely(status <= 0)) {
if (WARN_ON_ONCE(status == 0))
return -EIO;
@@ -354,7 +957,7 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
zero_user(page, offset, bytes);
mark_page_accessed(page);
- return iomap_write_end(inode, pos, bytes, bytes, page);
+ return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
}
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
@@ -379,7 +982,7 @@ iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
do {
unsigned offset, bytes;
- offset = pos & (PAGE_SIZE - 1); /* Within page */
+ offset = offset_in_page(pos);
bytes = min_t(loff_t, PAGE_SIZE - offset, count);
if (IS_DAX(inode))
@@ -440,11 +1043,16 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
struct page *page = data;
int ret;
- ret = __block_write_begin_int(page, pos, length, NULL, iomap);
- if (ret)
- return ret;
+ if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+ ret = __block_write_begin_int(page, pos, length, NULL, iomap);
+ if (ret)
+ return ret;
+ block_commit_write(page, 0, length);
+ } else {
+ WARN_ON_ONCE(!PageUptodate(page));
+ iomap_page_create(inode, page);
+ }
- block_commit_write(page, 0, length);
return length;
}
@@ -467,7 +1075,7 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
/* page is wholly or partially inside EOF */
if (((page->index + 1) << PAGE_SHIFT) > size)
- length = size & ~PAGE_MASK;
+ length = offset_in_page(size);
else
length = PAGE_SIZE;
@@ -630,7 +1238,7 @@ page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
goto out_unlock_not_found;
for (off = 0; off < PAGE_SIZE; off += bsize) {
- if ((*lastoff & ~PAGE_MASK) >= off + bsize)
+ if (offset_in_page(*lastoff) >= off + bsize)
continue;
if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
unlock_page(page);
@@ -811,6 +1419,7 @@ struct iomap_dio {
atomic_t ref;
unsigned flags;
int error;
+ bool wait_for_completion;
union {
/* used during submission and for synchronous completion: */
@@ -914,9 +1523,8 @@ static void iomap_dio_bio_end_io(struct bio *bio)
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
if (atomic_dec_and_test(&dio->ref)) {
- if (is_sync_kiocb(dio->iocb)) {
+ if (dio->wait_for_completion) {
struct task_struct *waiter = dio->submit.waiter;
-
WRITE_ONCE(dio->submit.waiter, NULL);
wake_up_process(waiter);
} else if (dio->flags & IOMAP_DIO_WRITE) {
@@ -963,10 +1571,9 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
}
static loff_t
-iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap_dio *dio, struct iomap *iomap)
{
- struct iomap_dio *dio = data;
unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
unsigned int fs_block_size = i_blocksize(inode), pad;
unsigned int align = iov_iter_alignment(dio->submit.iter);
@@ -980,41 +1587,27 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
if ((pos | length | align) & ((1 << blkbits) - 1))
return -EINVAL;
- switch (iomap->type) {
- case IOMAP_HOLE:
- if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
- return -EIO;
- /*FALLTHRU*/
- case IOMAP_UNWRITTEN:
- if (!(dio->flags & IOMAP_DIO_WRITE)) {
- length = iov_iter_zero(length, dio->submit.iter);
- dio->size += length;
- return length;
- }
+ if (iomap->type == IOMAP_UNWRITTEN) {
dio->flags |= IOMAP_DIO_UNWRITTEN;
need_zeroout = true;
- break;
- case IOMAP_MAPPED:
- if (iomap->flags & IOMAP_F_SHARED)
- dio->flags |= IOMAP_DIO_COW;
- if (iomap->flags & IOMAP_F_NEW) {
- need_zeroout = true;
- } else {
- /*
- * Use a FUA write if we need datasync semantics, this
- * is a pure data IO that doesn't require any metadata
- * updates and the underlying device supports FUA. This
- * allows us to avoid cache flushes on IO completion.
- */
- if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
- (dio->flags & IOMAP_DIO_WRITE_FUA) &&
- blk_queue_fua(bdev_get_queue(iomap->bdev)))
- use_fua = true;
- }
- break;
- default:
- WARN_ON_ONCE(1);
- return -EIO;
+ }
+
+ if (iomap->flags & IOMAP_F_SHARED)
+ dio->flags |= IOMAP_DIO_COW;
+
+ if (iomap->flags & IOMAP_F_NEW) {
+ need_zeroout = true;
+ } else {
+ /*
+ * Use a FUA write if we need datasync semantics, this
+ * is a pure data IO that doesn't require any metadata
+ * updates and the underlying device supports FUA. This
+ * allows us to avoid cache flushes on IO completion.
+ */
+ if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
+ (dio->flags & IOMAP_DIO_WRITE_FUA) &&
+ blk_queue_fua(bdev_get_queue(iomap->bdev)))
+ use_fua = true;
}
/*
@@ -1093,6 +1686,66 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
return copied;
}
+static loff_t
+iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
+{
+ length = iov_iter_zero(length, dio->submit.iter);
+ dio->size += length;
+ return length;
+}
+
+static loff_t
+iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
+ struct iomap_dio *dio, struct iomap *iomap)
+{
+ struct iov_iter *iter = dio->submit.iter;
+ size_t copied;
+
+ BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
+
+ if (dio->flags & IOMAP_DIO_WRITE) {
+ loff_t size = inode->i_size;
+
+ if (pos > size)
+ memset(iomap->inline_data + size, 0, pos - size);
+ copied = copy_from_iter(iomap->inline_data + pos, length, iter);
+ if (copied) {
+ if (pos + copied > size)
+ i_size_write(inode, pos + copied);
+ mark_inode_dirty(inode);
+ }
+ } else {
+ copied = copy_to_iter(iomap->inline_data + pos, length, iter);
+ }
+ dio->size += copied;
+ return copied;
+}
+
+static loff_t
+iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ struct iomap_dio *dio = data;
+
+ switch (iomap->type) {
+ case IOMAP_HOLE:
+ if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
+ return -EIO;
+ return iomap_dio_hole_actor(length, dio);
+ case IOMAP_UNWRITTEN:
+ if (!(dio->flags & IOMAP_DIO_WRITE))
+ return iomap_dio_hole_actor(length, dio);
+ return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+ case IOMAP_MAPPED:
+ return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+ case IOMAP_INLINE:
+ return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+
/*
* iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
* is being issued as AIO or not. This allows us to optimise pure data writes
@@ -1131,13 +1784,12 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->end_io = end_io;
dio->error = 0;
dio->flags = 0;
+ dio->wait_for_completion = is_sync_kiocb(iocb);
dio->submit.iter = iter;
- if (is_sync_kiocb(iocb)) {
- dio->submit.waiter = current;
- dio->submit.cookie = BLK_QC_T_NONE;
- dio->submit.last_queue = NULL;
- }
+ dio->submit.waiter = current;
+ dio->submit.cookie = BLK_QC_T_NONE;
+ dio->submit.last_queue = NULL;
if (iov_iter_rw(iter) == READ) {
if (pos >= dio->i_size)
@@ -1187,7 +1839,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0;
- if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
+ if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
!inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb);
if (ret < 0)
@@ -1202,8 +1854,10 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomap_dio_actor);
if (ret <= 0) {
/* magic error code to fall back to buffered I/O */
- if (ret == -ENOTBLK)
+ if (ret == -ENOTBLK) {
+ dio->wait_for_completion = true;
ret = 0;
+ }
break;
}
pos += ret;
@@ -1224,7 +1878,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
if (!atomic_dec_and_test(&dio->ref)) {
- if (!is_sync_kiocb(iocb))
+ if (!dio->wait_for_completion)
return -EIOCBQUEUED;
for (;;) {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 8de0e7723316..150cc030b4d7 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -121,7 +121,7 @@ static int journal_submit_commit_record(journal_t *journal,
struct commit_header *tmp;
struct buffer_head *bh;
int ret;
- struct timespec64 now = current_kernel_time64();
+ struct timespec64 now;
*cbh = NULL;
@@ -134,6 +134,7 @@ static int journal_submit_commit_record(journal_t *journal,
return 1;
tmp = (struct commit_header *)bh->b_data;
+ ktime_get_coarse_real_ts64(&now);
tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index b2944f9218f7..f20cff1194bb 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -201,7 +201,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
if (ret)
goto fail;
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->ctime)));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
jffs2_free_raw_inode(ri);
@@ -227,14 +227,14 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(d_inode(dentry));
int ret;
- uint32_t now = get_seconds();
+ uint32_t now = JFFS2_NOW();
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, dead_f, now);
if (dead_f->inocache)
set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink);
if (!ret)
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
return ret;
}
/***********************************************************************/
@@ -260,7 +260,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12;
if (!type) type = DT_REG;
- now = get_seconds();
+ now = JFFS2_NOW();
ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now);
if (!ret) {
@@ -268,7 +268,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink);
mutex_unlock(&f->sem);
d_instantiate(dentry, d_inode(old_dentry));
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
ihold(d_inode(old_dentry));
}
return ret;
@@ -400,7 +400,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
rd->pino = cpu_to_je32(dir_i->i_ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(inode->i_ino);
- rd->mctime = cpu_to_je32(get_seconds());
+ rd->mctime = cpu_to_je32(JFFS2_NOW());
rd->nsize = namelen;
rd->type = DT_LNK;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
@@ -418,7 +418,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(rd->mctime)));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
jffs2_free_raw_dirent(rd);
@@ -543,7 +543,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
rd->pino = cpu_to_je32(dir_i->i_ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(inode->i_ino);
- rd->mctime = cpu_to_je32(get_seconds());
+ rd->mctime = cpu_to_je32(JFFS2_NOW());
rd->nsize = namelen;
rd->type = DT_DIR;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
@@ -561,7 +561,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(rd->mctime)));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
inc_nlink(dir_i);
jffs2_free_raw_dirent(rd);
@@ -588,7 +588,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry));
struct jffs2_full_dirent *fd;
int ret;
- uint32_t now = get_seconds();
+ uint32_t now = JFFS2_NOW();
for (fd = f->dents ; fd; fd = fd->next) {
if (fd->ino)
@@ -598,7 +598,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, f, now);
if (!ret) {
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
clear_nlink(d_inode(dentry));
drop_nlink(dir_i);
}
@@ -712,7 +712,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
rd->pino = cpu_to_je32(dir_i->i_ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(inode->i_ino);
- rd->mctime = cpu_to_je32(get_seconds());
+ rd->mctime = cpu_to_je32(JFFS2_NOW());
rd->nsize = namelen;
/* XXX: This is ugly. */
@@ -733,7 +733,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(rd->mctime)));
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
jffs2_free_raw_dirent(rd);
@@ -797,7 +797,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
type = (d_inode(old_dentry)->i_mode & S_IFMT) >> 12;
if (!type) type = DT_REG;
- now = get_seconds();
+ now = JFFS2_NOW();
ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i),
d_inode(old_dentry)->i_ino, type,
new_dentry->d_name.name, new_dentry->d_name.len, now);
@@ -853,14 +853,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
* caller won't do it on its own since we are returning an error.
*/
d_invalidate(new_dentry);
- new_dir_i->i_mtime = new_dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
return ret;
}
if (d_is_dir(old_dentry))
drop_nlink(old_dir_i);
- new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = timespec_to_timespec64(ITIME(now));
+ new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
return 0;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 481afd4c2e1a..7d8654a1472e 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -175,7 +175,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
ri.uid = cpu_to_je16(i_uid_read(inode));
ri.gid = cpu_to_je16(i_gid_read(inode));
ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
- ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
+ ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
ri.offset = cpu_to_je32(inode->i_size);
ri.dsize = cpu_to_je32(pageofs - inode->i_size);
ri.csize = cpu_to_je32(0);
@@ -283,7 +283,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
ri->uid = cpu_to_je16(i_uid_read(inode));
ri->gid = cpu_to_je16(i_gid_read(inode));
ri->isize = cpu_to_je32((uint32_t)inode->i_size);
- ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());
+ ri->atime = ri->ctime = ri->mtime = cpu_to_je32(JFFS2_NOW());
/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
hurt to do it again. The alternative is ifdefs, which are ugly. */
@@ -308,7 +308,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
inode->i_size = pos + writtenlen;
inode->i_blocks = (inode->i_size + 511) >> 9;
- inode->i_ctime = inode->i_mtime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->ctime)));
+ inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
}
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 0ecfb8ea38cd..eab04eca95a3 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -146,9 +146,9 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return PTR_ERR(new_metadata);
}
/* It worked. Update the inode */
- inode->i_atime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->atime)));
- inode->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->ctime)));
- inode->i_mtime = timespec_to_timespec64(ITIME(je32_to_cpu(ri->mtime)));
+ inode->i_atime = ITIME(je32_to_cpu(ri->atime));
+ inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+ inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
inode->i_mode = jemode_to_cpu(ri->mode);
i_uid_write(inode, je16_to_cpu(ri->uid));
i_gid_write(inode, je16_to_cpu(ri->gid));
@@ -280,9 +280,9 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
i_uid_write(inode, je16_to_cpu(latest_node.uid));
i_gid_write(inode, je16_to_cpu(latest_node.gid));
inode->i_size = je32_to_cpu(latest_node.isize);
- inode->i_atime = timespec_to_timespec64(ITIME(je32_to_cpu(latest_node.atime)));
- inode->i_mtime = timespec_to_timespec64(ITIME(je32_to_cpu(latest_node.mtime)));
- inode->i_ctime = timespec_to_timespec64(ITIME(je32_to_cpu(latest_node.ctime)));
+ inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
+ inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
+ inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
set_nlink(inode, f->inocache->pino_nlink);
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index c2fbec19c616..a2dbbb3f4c74 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -31,12 +31,13 @@ struct kvec;
#define JFFS2_F_I_GID(f) (i_gid_read(OFNI_EDONI_2SFFJ(f)))
#define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev)
-#define ITIME(sec) ((struct timespec){sec, 0})
-#define I_SEC(tv) ((tv).tv_sec)
-#define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime.tv_sec)
-#define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime.tv_sec)
-#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime.tv_sec)
-
+#define JFFS2_CLAMP_TIME(t) ((uint32_t)clamp_t(time64_t, (t), 0, U32_MAX))
+#define ITIME(sec) ((struct timespec64){sec, 0})
+#define JFFS2_NOW() JFFS2_CLAMP_TIME(ktime_get_real_seconds())
+#define I_SEC(tv) JFFS2_CLAMP_TIME((tv).tv_sec)
+#define JFFS2_F_I_CTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_ctime)
+#define JFFS2_F_I_MTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_mtime)
+#define JFFS2_F_I_ATIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_atime)
#define sleep_on_spinunlock(wq, s) \
do { \
DECLARE_WAITQUEUE(__wait, current); \
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index f36ef68905a7..93e8c590ff5c 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -491,13 +491,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
/* release the page */
release_metapage(mp);
- /*
- * __mark_inode_dirty expects inodes to be hashed. Since we don't
- * want special inodes in the fileset inode space, we make them
- * appear hashed, but do not put on any lists. hlist_del()
- * will work fine and require no locking.
- */
- hlist_add_fake(&ip->i_hash);
+ inode_fake_hash(ip);
return (ip);
}
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 9940a1e04cbf..912a3af2393e 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -43,7 +43,7 @@ struct jfs_inode_info {
pxd_t ixpxd; /* inode extent descriptor */
dxd_t acl; /* dxd describing acl */
dxd_t ea; /* dxd describing ea */
- time_t otime; /* time created */
+ time64_t otime; /* time created */
uint next_index; /* next available directory entry index */
int acltype; /* Type of ACL */
short btorder; /* access order */
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 5e9b7bb3aabf..4572b7cf183d 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -61,8 +61,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
inode = new_inode(sb);
if (!inode) {
jfs_warn("ialloc: new_inode returned NULL!");
- rc = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
jfs_inode = JFS_IP(inode);
@@ -70,8 +69,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
rc = diAlloc(parent, S_ISDIR(mode), inode);
if (rc) {
jfs_warn("ialloc: diAlloc returned %d!", rc);
- if (rc == -EIO)
- make_bad_inode(inode);
goto fail_put;
}
@@ -141,9 +138,10 @@ fail_drop:
dquot_drop(inode);
inode->i_flags |= S_NOQUOTA;
clear_nlink(inode);
- unlock_new_inode(inode);
+ discard_new_inode(inode);
+ return ERR_PTR(rc);
+
fail_put:
iput(inode);
-fail:
return ERR_PTR(rc);
}
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 56c3fcbfe80e..14528c0ffe63 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -175,8 +175,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -309,8 +308,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -1054,8 +1052,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
@@ -1441,8 +1438,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
if (rc) {
free_ea_wmap(ip);
clear_nlink(ip);
- unlock_new_inode(ip);
- iput(ip);
+ discard_new_inode(ip);
} else {
d_instantiate_new(dentry, ip);
}
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index f08571433aba..09da5cf14e27 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -581,7 +581,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_ino = 0;
inode->i_size = i_size_read(sb->s_bdev->bd_inode);
inode->i_mapping->a_ops = &jfs_metapage_aops;
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
sbi->direct_inode = inode;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index d66cc0777303..4ca0b5c18192 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -619,6 +619,7 @@ struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
unsigned flags)
{
struct kernfs_node *kn;
@@ -661,8 +662,22 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
kn->mode = mode;
kn->flags = flags;
+ if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) {
+ struct iattr iattr = {
+ .ia_valid = ATTR_UID | ATTR_GID,
+ .ia_uid = uid,
+ .ia_gid = gid,
+ };
+
+ ret = __kernfs_setattr(kn, &iattr);
+ if (ret < 0)
+ goto err_out3;
+ }
+
return kn;
+ err_out3:
+ idr_remove(&root->ino_idr, kn->id.ino);
err_out2:
kmem_cache_free(kernfs_node_cache, kn);
err_out1:
@@ -672,11 +687,13 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
unsigned flags)
{
struct kernfs_node *kn;
- kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
+ kn = __kernfs_new_node(kernfs_root(parent),
+ name, mode, uid, gid, flags);
if (kn) {
kernfs_get(parent);
kn->parent = parent;
@@ -946,6 +963,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
root->next_generation = 1;
kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
KERNFS_DIR);
if (!kn) {
idr_destroy(&root->ino_idr);
@@ -984,6 +1002,8 @@ void kernfs_destroy_root(struct kernfs_root *root)
* @parent: parent in which to create a new directory
* @name: name of the new directory
* @mode: mode of the new directory
+ * @uid: uid of the new directory
+ * @gid: gid of the new directory
* @priv: opaque data associated with the new directory
* @ns: optional namespace tag of the directory
*
@@ -991,13 +1011,15 @@ void kernfs_destroy_root(struct kernfs_root *root)
*/
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
void *priv, const void *ns)
{
struct kernfs_node *kn;
int rc;
/* allocate */
- kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
+ kn = kernfs_new_node(parent, name, mode | S_IFDIR,
+ uid, gid, KERNFS_DIR);
if (!kn)
return ERR_PTR(-ENOMEM);
@@ -1028,7 +1050,8 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
int rc;
/* allocate */
- kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR);
+ kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR);
if (!kn)
return ERR_PTR(-ENOMEM);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 2015d8c45e4a..dbf5bc250bfd 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -965,6 +965,8 @@ const struct file_operations kernfs_file_fops = {
* @parent: directory to create the file in
* @name: name of the file
* @mode: mode of the file
+ * @uid: uid of the file
+ * @gid: gid of the file
* @size: size of the file
* @ops: kernfs operations for the file
* @priv: private data for the file
@@ -975,7 +977,8 @@ const struct file_operations kernfs_file_fops = {
*/
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
const char *name,
- umode_t mode, loff_t size,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key)
@@ -986,7 +989,8 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
flags = KERNFS_FILE;
- kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
+ kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG,
+ uid, gid, flags);
if (!kn)
return ERR_PTR(-ENOMEM);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 3d73fe9d56e2..80cebcd94c90 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -63,7 +63,7 @@ out_unlock:
return ret;
}
-static int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
+int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
{
struct kernfs_iattrs *attrs;
struct iattr *iattrs;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 0f260dcca177..3d83b114bb08 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -90,6 +90,7 @@ int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr);
int kernfs_iop_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags);
ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
+int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
/*
* dir.c
@@ -104,6 +105,7 @@ void kernfs_put_active(struct kernfs_node *kn);
int kernfs_add_one(struct kernfs_node *kn);
struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
unsigned flags);
struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
unsigned int ino);
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 08ccabd7047f..305b220af45d 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -21,6 +21,7 @@
* @target: target node for the symlink to point to
*
* Returns the created node on success, ERR_PTR() value on error.
+ * Ownership of the link matches ownership of the target.
*/
struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
@@ -28,8 +29,16 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
{
struct kernfs_node *kn;
int error;
+ kuid_t uid = GLOBAL_ROOT_UID;
+ kgid_t gid = GLOBAL_ROOT_GID;
- kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, KERNFS_LINK);
+ if (target->iattr) {
+ uid = target->iattr->ia_iattr.ia_uid;
+ gid = target->iattr->ia_iattr.ia_gid;
+ }
+
+ kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, uid, gid,
+ KERNFS_LINK);
if (!kn)
return ERR_PTR(-ENOMEM);
@@ -88,7 +97,7 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
int slen = strlen(kn->name);
len -= slen;
- strncpy(s + len, kn->name, slen);
+ memcpy(s + len, kn->name, slen);
if (len)
s[--len] = '/';
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 96c1d14c18f1..c2a128678e6e 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -187,7 +187,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
continue;
if (!rpc_cmp_addr(nlm_addr(block->b_host), addr))
continue;
- if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->fl_file)) ,fh) != 0)
+ if (nfs_compare_fh(NFS_FH(locks_inode(fl_blocked->fl_file)), fh) != 0)
continue;
/* Alright, we found a lock. Set the return status
* and wake up the caller
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index a2c0dfc6fdc0..d20b92f271c2 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -128,7 +128,7 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
char *nodename = req->a_host->h_rpcclnt->cl_nodename;
nlmclnt_next_cookie(&argp->cookie);
- memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
+ memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh));
lock->caller = nodename;
lock->oh.data = req->a_owner;
lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 3701bccab478..74330daeab71 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -405,8 +405,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
__be32 ret;
dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
- file_inode(file->f_file)->i_sb->s_id,
- file_inode(file->f_file)->i_ino,
+ locks_inode(file->f_file)->i_sb->s_id,
+ locks_inode(file->f_file)->i_ino,
lock->fl.fl_type, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end,
@@ -511,8 +511,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
__be32 ret;
dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
- file_inode(file->f_file)->i_sb->s_id,
- file_inode(file->f_file)->i_ino,
+ locks_inode(file->f_file)->i_sb->s_id,
+ locks_inode(file->f_file)->i_ino,
lock->fl.fl_type,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
@@ -566,8 +566,8 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
int error;
dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
- file_inode(file->f_file)->i_sb->s_id,
- file_inode(file->f_file)->i_ino,
+ locks_inode(file->f_file)->i_sb->s_id,
+ locks_inode(file->f_file)->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
@@ -595,8 +595,8 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
int status = 0;
dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
- file_inode(file->f_file)->i_sb->s_id,
- file_inode(file->f_file)->i_ino,
+ locks_inode(file->f_file)->i_sb->s_id,
+ locks_inode(file->f_file)->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 4ec3d6e03e76..899360ba3b84 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -44,7 +44,7 @@ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
static inline void nlm_debug_print_file(char *msg, struct nlm_file *file)
{
- struct inode *inode = file_inode(file->f_file);
+ struct inode *inode = locks_inode(file->f_file);
dprintk("lockd: %s %s/%ld\n",
msg, inode->i_sb->s_id, inode->i_ino);
@@ -414,7 +414,7 @@ nlmsvc_match_sb(void *datap, struct nlm_file *file)
{
struct super_block *sb = datap;
- return sb == file_inode(file->f_file)->i_sb;
+ return sb == locks_inode(file->f_file)->i_sb;
}
/**
diff --git a/fs/locks.c b/fs/locks.c
index db7b6917d9c5..2ecb4db8c840 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -139,11 +139,6 @@
#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
#define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
-static inline bool is_remote_lock(struct file *filp)
-{
- return likely(!(filp->f_path.dentry->d_sb->s_flags & SB_NOREMOTELOCK));
-}
-
static bool lease_breaking(struct file_lock *fl)
{
return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
@@ -202,10 +197,6 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
* we often hold the flc_lock as well. In certain cases, when reading the fields
* protected by this lock, we can skip acquiring it iff we already hold the
* flc_lock.
- *
- * In particular, adding an entry to the fl_block list requires that you hold
- * both the flc_lock and the blocked_lock_lock (acquired in that order).
- * Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_SPINLOCK(blocked_lock_lock);
@@ -546,7 +537,7 @@ lease_setup(struct file_lock *fl, void **priv)
if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
*priv = NULL;
- __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
+ __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
}
static const struct lock_manager_operations lease_manager_ops = {
@@ -990,6 +981,7 @@ out:
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
+ trace_flock_lock_inode(inode, request, error);
return error;
}
@@ -1654,8 +1646,7 @@ check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
if (flags & FL_LAYOUT)
return 0;
- if ((arg == F_RDLCK) &&
- (atomic_read(&d_real_inode(dentry)->i_writecount) > 0))
+ if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
return -EAGAIN;
if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
@@ -1876,7 +1867,7 @@ EXPORT_SYMBOL(generic_setlease);
int
vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
{
- if (filp->f_op->setlease && is_remote_lock(filp))
+ if (filp->f_op->setlease)
return filp->f_op->setlease(filp, arg, lease, priv);
else
return generic_setlease(filp, arg, lease, priv);
@@ -2023,7 +2014,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
if (error)
goto out_free;
- if (f.file->f_op->flock && is_remote_lock(f.file))
+ if (f.file->f_op->flock)
error = f.file->f_op->flock(f.file,
(can_sleep) ? F_SETLKW : F_SETLK,
lock);
@@ -2049,7 +2040,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
*/
int vfs_test_lock(struct file *filp, struct file_lock *fl)
{
- if (filp->f_op->lock && is_remote_lock(filp))
+ if (filp->f_op->lock)
return filp->f_op->lock(filp, F_GETLK, fl);
posix_test_lock(filp, fl);
return 0;
@@ -2072,6 +2063,13 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
return -1;
if (IS_REMOTELCK(fl))
return fl->fl_pid;
+ /*
+ * If the flock owner process is dead and its pid has been already
+ * freed, the translation below won't work, but we still want to show
+ * flock owner pid number in init pidns.
+ */
+ if (ns == &init_pid_ns)
+ return (pid_t)fl->fl_pid;
rcu_read_lock();
pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
@@ -2192,7 +2190,7 @@ out:
*/
int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
{
- if (filp->f_op->lock && is_remote_lock(filp))
+ if (filp->f_op->lock)
return filp->f_op->lock(filp, cmd, fl);
else
return posix_lock_file(filp, fl, conf);
@@ -2514,7 +2512,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
if (list_empty(&flctx->flc_flock))
return;
- if (filp->f_op->flock && is_remote_lock(filp))
+ if (filp->f_op->flock)
filp->f_op->flock(filp, F_SETLKW, &fl);
else
flock_lock_inode(inode, &fl);
@@ -2601,7 +2599,7 @@ EXPORT_SYMBOL(posix_unblock_lock);
*/
int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
{
- if (filp->f_op->lock && is_remote_lock(filp))
+ if (filp->f_op->lock)
return filp->f_op->lock(filp, F_CANCELLK, fl);
return 0;
}
@@ -2626,12 +2624,10 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
fl_pid = locks_translate_pid(fl, proc_pidns);
/*
- * If there isn't a fl_pid don't display who is waiting on
- * the lock if we are called from locks_show, or if we are
- * called from __show_fd_info - skip lock entirely
+ * If lock owner is dead (and pid is freed) or not visible in current
+ * pidns, zero is shown as a pid value. Check lock info from
+ * init_pid_ns to get saved lock pid value.
*/
- if (fl_pid == 0)
- return;
if (fl->fl_file != NULL)
inode = locks_inode(fl->fl_file);
diff --git a/fs/mpage.c b/fs/mpage.c
index b7e7f570733a..c820dc9bebab 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -51,8 +51,8 @@ static void mpage_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- page_endio(page, op_is_write(bio_op(bio)),
- blk_status_to_errno(bio->bi_status));
+ page_endio(page, bio_op(bio),
+ blk_status_to_errno(bio->bi_status));
}
bio_put(bio);
@@ -133,6 +133,17 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
} while (page_bh != head);
}
+struct mpage_readpage_args {
+ struct bio *bio;
+ struct page *page;
+ unsigned int nr_pages;
+ bool is_readahead;
+ sector_t last_block_in_bio;
+ struct buffer_head map_bh;
+ unsigned long first_logical_block;
+ get_block_t *get_block;
+};
+
/*
* This is the worker routine which does all the work of mapping the disk
* blocks and constructs largest possible bios, submits them for IO if the
@@ -142,16 +153,14 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
* represent the validity of its disk mapping and to decide when to do the next
* get_block() call.
*/
-static struct bio *
-do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
- sector_t *last_block_in_bio, struct buffer_head *map_bh,
- unsigned long *first_logical_block, get_block_t get_block,
- gfp_t gfp)
+static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
{
+ struct page *page = args->page;
struct inode *inode = page->mapping->host;
const unsigned blkbits = inode->i_blkbits;
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
const unsigned blocksize = 1 << blkbits;
+ struct buffer_head *map_bh = &args->map_bh;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
@@ -161,14 +170,24 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
struct block_device *bdev = NULL;
int length;
int fully_mapped = 1;
+ int op_flags;
unsigned nblocks;
unsigned relative_block;
+ gfp_t gfp;
+
+ if (args->is_readahead) {
+ op_flags = REQ_RAHEAD;
+ gfp = readahead_gfp_mask(page->mapping);
+ } else {
+ op_flags = 0;
+ gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ }
if (page_has_buffers(page))
goto confused;
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
- last_block = block_in_file + nr_pages * blocks_per_page;
+ last_block = block_in_file + args->nr_pages * blocks_per_page;
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
@@ -178,9 +197,10 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
* Map blocks using the result from the previous get_blocks call first.
*/
nblocks = map_bh->b_size >> blkbits;
- if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
- block_in_file < (*first_logical_block + nblocks)) {
- unsigned map_offset = block_in_file - *first_logical_block;
+ if (buffer_mapped(map_bh) &&
+ block_in_file > args->first_logical_block &&
+ block_in_file < (args->first_logical_block + nblocks)) {
+ unsigned map_offset = block_in_file - args->first_logical_block;
unsigned last = nblocks - map_offset;
for (relative_block = 0; ; relative_block++) {
@@ -208,9 +228,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
if (block_in_file < last_block) {
map_bh->b_size = (last_block-block_in_file) << blkbits;
- if (get_block(inode, block_in_file, map_bh, 0))
+ if (args->get_block(inode, block_in_file, map_bh, 0))
goto confused;
- *first_logical_block = block_in_file;
+ args->first_logical_block = block_in_file;
}
if (!buffer_mapped(map_bh)) {
@@ -273,43 +293,45 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
/*
* This page will go to BIO. Do we need to send this BIO off first?
*/
- if (bio && (*last_block_in_bio != blocks[0] - 1))
- bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
+ if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
alloc_new:
- if (bio == NULL) {
+ if (args->bio == NULL) {
if (first_hole == blocks_per_page) {
if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
page))
goto out;
}
- bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
- if (bio == NULL)
+ args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
+ min_t(int, args->nr_pages,
+ BIO_MAX_PAGES),
+ gfp);
+ if (args->bio == NULL)
goto confused;
}
length = first_hole << blkbits;
- if (bio_add_page(bio, page, length, 0) < length) {
- bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
+ if (bio_add_page(args->bio, page, length, 0) < length) {
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
goto alloc_new;
}
- relative_block = block_in_file - *first_logical_block;
+ relative_block = block_in_file - args->first_logical_block;
nblocks = map_bh->b_size >> blkbits;
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
(first_hole != blocks_per_page))
- bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
else
- *last_block_in_bio = blocks[blocks_per_page - 1];
+ args->last_block_in_bio = blocks[blocks_per_page - 1];
out:
- return bio;
+ return args->bio;
confused:
- if (bio)
- bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
+ if (args->bio)
+ args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio);
if (!PageUptodate(page))
- block_read_full_page(page, get_block);
+ block_read_full_page(page, args->get_block);
else
unlock_page(page);
goto out;
@@ -363,15 +385,12 @@ int
mpage_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages, get_block_t get_block)
{
- struct bio *bio = NULL;
+ struct mpage_readpage_args args = {
+ .get_block = get_block,
+ .is_readahead = true,
+ };
unsigned page_idx;
- sector_t last_block_in_bio = 0;
- struct buffer_head map_bh;
- unsigned long first_logical_block = 0;
- gfp_t gfp = readahead_gfp_mask(mapping);
- map_bh.b_state = 0;
- map_bh.b_size = 0;
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = lru_to_page(pages);
@@ -379,18 +398,16 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
list_del(&page->lru);
if (!add_to_page_cache_lru(page, mapping,
page->index,
- gfp)) {
- bio = do_mpage_readpage(bio, page,
- nr_pages - page_idx,
- &last_block_in_bio, &map_bh,
- &first_logical_block,
- get_block, gfp);
+ readahead_gfp_mask(mapping))) {
+ args.page = page;
+ args.nr_pages = nr_pages - page_idx;
+ args.bio = do_mpage_readpage(&args);
}
put_page(page);
}
BUG_ON(!list_empty(pages));
- if (bio)
- mpage_bio_submit(REQ_OP_READ, 0, bio);
+ if (args.bio)
+ mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
return 0;
}
EXPORT_SYMBOL(mpage_readpages);
@@ -400,18 +417,15 @@ EXPORT_SYMBOL(mpage_readpages);
*/
int mpage_readpage(struct page *page, get_block_t get_block)
{
- struct bio *bio = NULL;
- sector_t last_block_in_bio = 0;
- struct buffer_head map_bh;
- unsigned long first_logical_block = 0;
- gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ struct mpage_readpage_args args = {
+ .page = page,
+ .nr_pages = 1,
+ .get_block = get_block,
+ };
- map_bh.b_state = 0;
- map_bh.b_size = 0;
- bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
- &map_bh, &first_logical_block, get_block, gfp);
- if (bio)
- mpage_bio_submit(REQ_OP_READ, 0, bio);
+ args.bio = do_mpage_readpage(&args);
+ if (args.bio)
+ mpage_bio_submit(REQ_OP_READ, 0, args.bio);
return 0;
}
EXPORT_SYMBOL(mpage_readpage);
diff --git a/fs/namei.c b/fs/namei.c
index 734cef54fdf8..0cab6494978c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -887,6 +887,8 @@ static inline void put_link(struct nameidata *nd)
int sysctl_protected_symlinks __read_mostly = 0;
int sysctl_protected_hardlinks __read_mostly = 0;
+int sysctl_protected_fifos __read_mostly;
+int sysctl_protected_regular __read_mostly;
/**
* may_follow_link - Check symlink following for unsafe situations
@@ -1003,6 +1005,45 @@ static int may_linkat(struct path *link)
return -EPERM;
}
+/**
+ * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
+ * should be allowed, or not, on files that already
+ * exist.
+ * @dir: the sticky parent directory
+ * @inode: the inode of the file to open
+ *
+ * Block an O_CREAT open of a FIFO (or a regular file) when:
+ * - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
+ * - the file already exists
+ * - we are in a sticky directory
+ * - we don't own the file
+ * - the owner of the directory doesn't own the file
+ * - the directory is world writable
+ * If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
+ * the directory doesn't have to be world writable: being group writable will
+ * be enough.
+ *
+ * Returns 0 if the open is allowed, -ve on error.
+ */
+static int may_create_in_sticky(struct dentry * const dir,
+ struct inode * const inode)
+{
+ if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
+ (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
+ likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
+ uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
+ uid_eq(current_fsuid(), inode->i_uid))
+ return 0;
+
+ if (likely(dir->d_inode->i_mode & 0002) ||
+ (dir->d_inode->i_mode & 0020 &&
+ ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
+ (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
+ return -EACCES;
+ }
+ return 0;
+}
+
static __always_inline
const char *get_link(struct nameidata *nd)
{
@@ -1015,7 +1056,7 @@ const char *get_link(struct nameidata *nd)
if (!(nd->flags & LOOKUP_RCU)) {
touch_atime(&last->link);
cond_resched();
- } else if (atime_needs_update_rcu(&last->link, inode)) {
+ } else if (atime_needs_update(&last->link, inode)) {
if (unlikely(unlazy_walk(nd)))
return ERR_PTR(-ECHILD);
touch_atime(&last->link);
@@ -2028,6 +2069,8 @@ static int link_path_walk(const char *name, struct nameidata *nd)
{
int err;
+ if (IS_ERR(name))
+ return PTR_ERR(name);
while (*name=='/')
name++;
if (!*name)
@@ -2125,12 +2168,15 @@ OK:
}
}
+/* must be paired with terminate_walk() */
static const char *path_init(struct nameidata *nd, unsigned flags)
{
const char *s = nd->name->name;
if (!*s)
flags &= ~LOOKUP_RCU;
+ if (flags & LOOKUP_RCU)
+ rcu_read_lock();
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
@@ -2143,7 +2189,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
- rcu_read_lock();
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
nd->root_seq = nd->seq;
nd->m_seq = read_seqbegin(&mount_lock);
@@ -2159,21 +2204,15 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->m_seq = read_seqbegin(&mount_lock);
if (*s == '/') {
- if (flags & LOOKUP_RCU)
- rcu_read_lock();
set_root(nd);
if (likely(!nd_jump_root(nd)))
return s;
- nd->root.mnt = NULL;
- rcu_read_unlock();
return ERR_PTR(-ECHILD);
} else if (nd->dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
- rcu_read_lock();
-
do {
seq = read_seqcount_begin(&fs->seq);
nd->path = fs->pwd;
@@ -2195,16 +2234,13 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
dentry = f.file->f_path.dentry;
- if (*s) {
- if (!d_can_lookup(dentry)) {
- fdput(f);
- return ERR_PTR(-ENOTDIR);
- }
+ if (*s && unlikely(!d_can_lookup(dentry))) {
+ fdput(f);
+ return ERR_PTR(-ENOTDIR);
}
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
- rcu_read_lock();
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
@@ -2272,24 +2308,15 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
const char *s = path_init(nd, flags);
int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
-
- if (unlikely(flags & LOOKUP_DOWN)) {
+ if (unlikely(flags & LOOKUP_DOWN) && !IS_ERR(s)) {
err = handle_lookup_down(nd);
- if (unlikely(err < 0)) {
- terminate_walk(nd);
- return err;
- }
+ if (unlikely(err < 0))
+ s = ERR_PTR(err);
}
while (!(err = link_path_walk(s, nd))
&& ((err = lookup_last(nd)) > 0)) {
s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- err = PTR_ERR(s);
- break;
- }
}
if (!err)
err = complete_walk(nd);
@@ -2336,10 +2363,7 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
{
const char *s = path_init(nd, flags);
- int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
- err = link_path_walk(s, nd);
+ int err = link_path_walk(s, nd);
if (!err)
err = complete_walk(nd);
if (!err) {
@@ -2666,15 +2690,10 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
int err;
- if (IS_ERR(s))
- return PTR_ERR(s);
+
while (!(err = link_path_walk(s, nd)) &&
(err = mountpoint_last(nd)) > 0) {
s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- err = PTR_ERR(s);
- break;
- }
}
if (!err) {
*path = nd->path;
@@ -3027,17 +3046,16 @@ static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t m
* Returns 0 if successful. The file will have been created and attached to
* @file by the filesystem calling finish_open().
*
- * Returns 1 if the file was looked up only or didn't need creating. The
- * caller will need to perform the open themselves. @path will have been
- * updated to point to the new dentry. This may be negative.
+ * If the file was looked up only or didn't need creating, FMODE_OPENED won't
+ * be set. The caller will need to perform the open themselves. @path will
+ * have been updated to point to the new dentry. This may be negative.
*
* Returns an error code otherwise.
*/
static int atomic_open(struct nameidata *nd, struct dentry *dentry,
struct path *path, struct file *file,
const struct open_flags *op,
- int open_flag, umode_t mode,
- int *opened)
+ int open_flag, umode_t mode)
{
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
struct inode *dir = nd->path.dentry->d_inode;
@@ -3052,39 +3070,38 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
file->f_path.dentry = DENTRY_NOT_SET;
file->f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file,
- open_to_namei_flags(open_flag),
- mode, opened);
+ open_to_namei_flags(open_flag), mode);
d_lookup_done(dentry);
if (!error) {
- /*
- * We didn't have the inode before the open, so check open
- * permission here.
- */
- int acc_mode = op->acc_mode;
- if (*opened & FILE_CREATED) {
- WARN_ON(!(open_flag & O_CREAT));
- fsnotify_create(dir, dentry);
- acc_mode = 0;
- }
- error = may_open(&file->f_path, acc_mode, open_flag);
- if (WARN_ON(error > 0))
- error = -EINVAL;
- } else if (error > 0) {
- if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
+ if (file->f_mode & FMODE_OPENED) {
+ /*
+ * We didn't have the inode before the open, so check open
+ * permission here.
+ */
+ int acc_mode = op->acc_mode;
+ if (file->f_mode & FMODE_CREATED) {
+ WARN_ON(!(open_flag & O_CREAT));
+ fsnotify_create(dir, dentry);
+ acc_mode = 0;
+ }
+ error = may_open(&file->f_path, acc_mode, open_flag);
+ if (WARN_ON(error > 0))
+ error = -EINVAL;
+ } else if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
} else {
if (file->f_path.dentry) {
dput(dentry);
dentry = file->f_path.dentry;
}
- if (*opened & FILE_CREATED)
+ if (file->f_mode & FMODE_CREATED)
fsnotify_create(dir, dentry);
if (unlikely(d_is_negative(dentry))) {
error = -ENOENT;
} else {
path->dentry = dentry;
path->mnt = nd->path.mnt;
- return 1;
+ return 0;
}
}
}
@@ -3095,25 +3112,22 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
/*
* Look up and maybe create and open the last component.
*
- * Must be called with i_mutex held on parent.
+ * Must be called with parent locked (exclusive in O_CREAT case).
*
- * Returns 0 if the file was successfully atomically created (if necessary) and
- * opened. In this case the file will be returned attached to @file.
+ * Returns 0 on success, that is, if
+ * the file was successfully atomically created (if necessary) and opened, or
+ * the file was not completely opened at this time, though lookups and
+ * creations were performed.
+ * These case are distinguished by presence of FMODE_OPENED on file->f_mode.
+ * In the latter case dentry returned in @path might be negative if O_CREAT
+ * hadn't been specified.
*
- * Returns 1 if the file was not completely opened at this time, though lookups
- * and creations will have been performed and the dentry returned in @path will
- * be positive upon return if O_CREAT was specified. If O_CREAT wasn't
- * specified then a negative dentry may be returned.
- *
- * An error code is returned otherwise.
- *
- * FILE_CREATE will be set in @*opened if the dentry was created and will be
- * cleared otherwise prior to returning.
+ * An error code is returned on failure.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
struct file *file,
const struct open_flags *op,
- bool got_write, int *opened)
+ bool got_write)
{
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
@@ -3126,7 +3140,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
if (unlikely(IS_DEADDIR(dir_inode)))
return -ENOENT;
- *opened &= ~FILE_CREATED;
+ file->f_mode &= ~FMODE_CREATED;
dentry = d_lookup(dir, &nd->last);
for (;;) {
if (!dentry) {
@@ -3188,7 +3202,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
if (dir_inode->i_op->atomic_open) {
error = atomic_open(nd, dentry, path, file, op, open_flag,
- mode, opened);
+ mode);
if (unlikely(error == -ENOENT) && create_error)
error = create_error;
return error;
@@ -3211,7 +3225,7 @@ no_open:
/* Negative dentry, just create the file */
if (!dentry->d_inode && (open_flag & O_CREAT)) {
- *opened |= FILE_CREATED;
+ file->f_mode |= FMODE_CREATED;
audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
if (!dir_inode->i_op->create) {
error = -EACCES;
@@ -3230,7 +3244,7 @@ no_open:
out_no_open:
path->dentry = dentry;
path->mnt = nd->path.mnt;
- return 1;
+ return 0;
out_dput:
dput(dentry);
@@ -3241,8 +3255,7 @@ out_dput:
* Handle the last step of open()
*/
static int do_last(struct nameidata *nd,
- struct file *file, const struct open_flags *op,
- int *opened)
+ struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
@@ -3308,17 +3321,17 @@ static int do_last(struct nameidata *nd,
inode_lock(dir->d_inode);
else
inode_lock_shared(dir->d_inode);
- error = lookup_open(nd, &path, file, op, got_write, opened);
+ error = lookup_open(nd, &path, file, op, got_write);
if (open_flag & O_CREAT)
inode_unlock(dir->d_inode);
else
inode_unlock_shared(dir->d_inode);
- if (error <= 0) {
- if (error)
- goto out;
+ if (error)
+ goto out;
- if ((*opened & FILE_CREATED) ||
+ if (file->f_mode & FMODE_OPENED) {
+ if ((file->f_mode & FMODE_CREATED) ||
!S_ISREG(file_inode(file)->i_mode))
will_truncate = false;
@@ -3326,7 +3339,7 @@ static int do_last(struct nameidata *nd,
goto opened;
}
- if (*opened & FILE_CREATED) {
+ if (file->f_mode & FMODE_CREATED) {
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
will_truncate = false;
@@ -3376,9 +3389,15 @@ finish_open:
if (error)
return error;
audit_inode(nd->name, nd->path.dentry, 0);
- error = -EISDIR;
- if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
- goto out;
+ if (open_flag & O_CREAT) {
+ error = -EISDIR;
+ if (d_is_dir(nd->path.dentry))
+ goto out;
+ error = may_create_in_sticky(dir,
+ d_backing_inode(nd->path.dentry));
+ if (unlikely(error))
+ goto out;
+ }
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
goto out;
@@ -3395,20 +3414,15 @@ finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
goto out;
- BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
- error = vfs_open(&nd->path, file, current_cred());
+ BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
+ error = vfs_open(&nd->path, file);
if (error)
goto out;
- *opened |= FILE_OPENED;
opened:
- error = open_check_o_direct(file);
- if (!error)
- error = ima_file_check(file, op->acc_mode, *opened);
+ error = ima_file_check(file, op->acc_mode);
if (!error && will_truncate)
error = handle_truncate(file);
out:
- if (unlikely(error) && (*opened & FILE_OPENED))
- fput(file);
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
@@ -3458,7 +3472,7 @@ EXPORT_SYMBOL(vfs_tmpfile);
static int do_tmpfile(struct nameidata *nd, unsigned flags,
const struct open_flags *op,
- struct file *file, int *opened)
+ struct file *file)
{
struct dentry *child;
struct path path;
@@ -3480,12 +3494,7 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
if (error)
goto out2;
file->f_path.mnt = path.mnt;
- error = finish_open(file, child, NULL, opened);
- if (error)
- goto out2;
- error = open_check_o_direct(file);
- if (error)
- fput(file);
+ error = finish_open(file, child, NULL);
out2:
mnt_drop_write(path.mnt);
out:
@@ -3499,7 +3508,7 @@ static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
int error = path_lookupat(nd, flags, &path);
if (!error) {
audit_inode(nd->name, path.dentry, 0);
- error = vfs_open(&path, file, current_cred());
+ error = vfs_open(&path, file);
path_put(&path);
}
return error;
@@ -3508,59 +3517,40 @@ static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
static struct file *path_openat(struct nameidata *nd,
const struct open_flags *op, unsigned flags)
{
- const char *s;
struct file *file;
- int opened = 0;
int error;
- file = get_empty_filp();
+ file = alloc_empty_file(op->open_flag, current_cred());
if (IS_ERR(file))
return file;
- file->f_flags = op->open_flag;
-
if (unlikely(file->f_flags & __O_TMPFILE)) {
- error = do_tmpfile(nd, flags, op, file, &opened);
- goto out2;
- }
-
- if (unlikely(file->f_flags & O_PATH)) {
+ error = do_tmpfile(nd, flags, op, file);
+ } else if (unlikely(file->f_flags & O_PATH)) {
error = do_o_path(nd, flags, file);
- if (!error)
- opened |= FILE_OPENED;
- goto out2;
- }
-
- s = path_init(nd, flags);
- if (IS_ERR(s)) {
- put_filp(file);
- return ERR_CAST(s);
- }
- while (!(error = link_path_walk(s, nd)) &&
- (error = do_last(nd, file, op, &opened)) > 0) {
- nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
- s = trailing_symlink(nd);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- break;
+ } else {
+ const char *s = path_init(nd, flags);
+ while (!(error = link_path_walk(s, nd)) &&
+ (error = do_last(nd, file, op)) > 0) {
+ nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
+ s = trailing_symlink(nd);
}
+ terminate_walk(nd);
}
- terminate_walk(nd);
-out2:
- if (!(opened & FILE_OPENED)) {
- BUG_ON(!error);
- put_filp(file);
+ if (likely(!error)) {
+ if (likely(file->f_mode & FMODE_OPENED))
+ return file;
+ WARN_ON(1);
+ error = -EINVAL;
}
- if (unlikely(error)) {
- if (error == -EOPENSTALE) {
- if (flags & LOOKUP_RCU)
- error = -ECHILD;
- else
- error = -ESTALE;
- }
- file = ERR_PTR(error);
+ fput(file);
+ if (error == -EOPENSTALE) {
+ if (flags & LOOKUP_RCU)
+ error = -ECHILD;
+ else
+ error = -ESTALE;
}
- return file;
+ return ERR_PTR(error);
}
struct file *do_filp_open(int dfd, struct filename *pathname,
@@ -4712,29 +4702,6 @@ out:
return len;
}
-/*
- * A helper for ->readlink(). This should be used *ONLY* for symlinks that
- * have ->get_link() not calling nd_jump_link(). Using (or not using) it
- * for any given inode is up to filesystem.
- */
-static int generic_readlink(struct dentry *dentry, char __user *buffer,
- int buflen)
-{
- DEFINE_DELAYED_CALL(done);
- struct inode *inode = d_inode(dentry);
- const char *link = inode->i_link;
- int res;
-
- if (!link) {
- link = inode->i_op->get_link(dentry, inode, &done);
- if (IS_ERR(link))
- return PTR_ERR(link);
- }
- res = readlink_copy(buffer, buflen, link);
- do_delayed_call(&done);
- return res;
-}
-
/**
* vfs_readlink - copy symlink body into userspace buffer
* @dentry: dentry on which to get symbolic link
@@ -4748,6 +4715,9 @@ static int generic_readlink(struct dentry *dentry, char __user *buffer,
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct inode *inode = d_inode(dentry);
+ DEFINE_DELAYED_CALL(done);
+ const char *link;
+ int res;
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
if (unlikely(inode->i_op->readlink))
@@ -4761,7 +4731,15 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
spin_unlock(&inode->i_lock);
}
- return generic_readlink(dentry, buffer, buflen);
+ link = inode->i_link;
+ if (!link) {
+ link = inode->i_op->get_link(dentry, inode, &done);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+ }
+ res = readlink_copy(buffer, buflen, link);
+ do_delayed_call(&done);
+ return res;
}
EXPORT_SYMBOL(vfs_readlink);
diff --git a/fs/namespace.c b/fs/namespace.c
index 8e5fa0aa2314..99186556f8d3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -405,74 +405,20 @@ int __mnt_want_write_file(struct file *file)
}
/**
- * mnt_want_write_file_path - get write access to a file's mount
- * @file: the file who's mount on which to take a write
- *
- * This is like mnt_want_write, but it takes a file and can
- * do some optimisations if the file is open for write already
- *
- * Called by the vfs for cases when we have an open file at hand, but will do an
- * inode operation on it (important distinction for files opened on overlayfs,
- * since the file operations will come from the real underlying file, while
- * inode operations come from the overlay).
- */
-int mnt_want_write_file_path(struct file *file)
-{
- int ret;
-
- sb_start_write(file->f_path.mnt->mnt_sb);
- ret = __mnt_want_write_file(file);
- if (ret)
- sb_end_write(file->f_path.mnt->mnt_sb);
- return ret;
-}
-
-static inline int may_write_real(struct file *file)
-{
- struct dentry *dentry = file->f_path.dentry;
- struct dentry *upperdentry;
-
- /* Writable file? */
- if (file->f_mode & FMODE_WRITER)
- return 0;
-
- /* Not overlayfs? */
- if (likely(!(dentry->d_flags & DCACHE_OP_REAL)))
- return 0;
-
- /* File refers to upper, writable layer? */
- upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER);
- if (upperdentry &&
- (file_inode(file) == d_inode(upperdentry) ||
- file_inode(file) == d_inode(dentry)))
- return 0;
-
- /* Lower layer: can't write to real file, sorry... */
- return -EPERM;
-}
-
-/**
* mnt_want_write_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
* This is like mnt_want_write, but it takes a file and can
* do some optimisations if the file is open for write already
- *
- * Mostly called by filesystems from their ioctl operation before performing
- * modification. On overlayfs this needs to check if the file is on a read-only
- * lower layer and deny access in that case.
*/
int mnt_want_write_file(struct file *file)
{
int ret;
- ret = may_write_real(file);
- if (!ret) {
- sb_start_write(file_inode(file)->i_sb);
- ret = __mnt_want_write_file(file);
- if (ret)
- sb_end_write(file_inode(file)->i_sb);
- }
+ sb_start_write(file_inode(file)->i_sb);
+ ret = __mnt_want_write_file(file);
+ if (ret)
+ sb_end_write(file_inode(file)->i_sb);
return ret;
}
EXPORT_SYMBOL_GPL(mnt_want_write_file);
@@ -512,14 +458,9 @@ void __mnt_drop_write_file(struct file *file)
__mnt_drop_write(file->f_path.mnt);
}
-void mnt_drop_write_file_path(struct file *file)
-{
- mnt_drop_write(file->f_path.mnt);
-}
-
void mnt_drop_write_file(struct file *file)
{
- __mnt_drop_write(file->f_path.mnt);
+ __mnt_drop_write_file(file);
sb_end_write(file_inode(file)->i_sb);
}
EXPORT_SYMBOL(mnt_drop_write_file);
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 7cb5c38c19e4..06cb0c1d9aee 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -753,6 +753,7 @@ out:
case -ENODEV:
/* Our extent block devices are unavailable */
set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
+ /* Fall through */
case 0:
return lseg;
default:
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index a7efd83779d2..dec5880ac6de 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -204,7 +204,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
chunk = div_u64(offset, dev->chunk_size);
div_u64_rem(chunk, dev->nr_children, &chunk_idx);
- if (chunk_idx > dev->nr_children) {
+ if (chunk_idx >= dev->nr_children) {
dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
__func__, chunk_idx, offset, dev->chunk_size);
/* error, should not happen */
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index a20a0bce40a4..8f34daf85f70 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -184,6 +184,18 @@ struct cb_notify_lock_args {
extern __be32 nfs4_callback_notify_lock(void *argp, void *resp,
struct cb_process_state *cps);
#endif /* CONFIG_NFS_V4_1 */
+#ifdef CONFIG_NFS_V4_2
+struct cb_offloadargs {
+ struct nfs_fh coa_fh;
+ nfs4_stateid coa_stateid;
+ uint32_t error;
+ uint64_t wr_count;
+ struct nfs_writeverf wr_writeverf;
+};
+
+extern __be32 nfs4_callback_offload(void *args, void *dummy,
+ struct cb_process_state *cps);
+#endif /* CONFIG_NFS_V4_2 */
extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
extern __be32 nfs4_callback_getattr(void *argp, void *resp,
struct cb_process_state *cps);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 64c214fb9da6..fa515d5ea5ba 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -215,9 +215,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
{
u32 oldseq, newseq;
- /* Is the stateid still not initialised? */
+ /* Is the stateid not initialised? */
if (!pnfs_layout_is_valid(lo))
- return NFS4ERR_DELAY;
+ return NFS4ERR_NOMATCHING_LAYOUT;
/* Mismatched stateid? */
if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
@@ -273,7 +273,6 @@ static u32 initiate_file_draining(struct nfs_client *clp,
rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
if (rv != NFS_OK)
goto unlock;
- pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
/*
* Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
@@ -283,19 +282,23 @@ static u32 initiate_file_draining(struct nfs_client *clp,
goto unlock;
}
- if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
+ pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
+ switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
&args->cbl_range,
be32_to_cpu(args->cbl_stateid.seqid))) {
+ case 0:
+ case -EBUSY:
+ /* There are layout segments that need to be returned */
rv = NFS4_OK;
- goto unlock;
- }
-
- /* Embrace your forgetfulness! */
- rv = NFS4ERR_NOMATCHING_LAYOUT;
+ break;
+ case -ENOENT:
+ /* Embrace your forgetfulness! */
+ rv = NFS4ERR_NOMATCHING_LAYOUT;
- if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
- NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
- &args->cbl_range);
+ if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
+ NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
+ &args->cbl_range);
+ }
}
unlock:
spin_unlock(&ino->i_lock);
@@ -328,8 +331,6 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
static u32 do_callback_layoutrecall(struct nfs_client *clp,
struct cb_layoutrecallargs *args)
{
- write_seqcount_begin(&clp->cl_callback_count);
- write_seqcount_end(&clp->cl_callback_count);
if (args->cbl_recall_type == RETURN_FILE)
return initiate_file_draining(clp, args);
return initiate_bulk_draining(clp, args);
@@ -441,11 +442,14 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
* a match. If the slot is in use and the sequence numbers match, the
* client is still waiting for a response to the original request.
*/
-static bool referring_call_exists(struct nfs_client *clp,
+static int referring_call_exists(struct nfs_client *clp,
uint32_t nrclists,
- struct referring_call_list *rclists)
+ struct referring_call_list *rclists,
+ spinlock_t *lock)
+ __releases(lock)
+ __acquires(lock)
{
- bool status = false;
+ int status = 0;
int i, j;
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
@@ -468,8 +472,10 @@ static bool referring_call_exists(struct nfs_client *clp,
for (j = 0; j < rclist->rcl_nrefcalls; j++) {
ref = &rclist->rcl_refcalls[j];
+ spin_unlock(lock);
status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
ref->rc_sequenceid, HZ >> 1) < 0;
+ spin_lock(lock);
if (status)
goto out;
}
@@ -546,7 +552,8 @@ __be32 nfs4_callback_sequence(void *argp, void *resp,
* related callback was received before the response to the original
* call.
*/
- if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
+ if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
+ &tbl->slot_tbl_lock) < 0) {
status = htonl(NFS4ERR_DELAY);
goto out_unlock;
}
@@ -660,3 +667,57 @@ __be32 nfs4_callback_notify_lock(void *argp, void *resp,
return htonl(NFS4_OK);
}
#endif /* CONFIG_NFS_V4_1 */
+#ifdef CONFIG_NFS_V4_2
+static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
+ struct cb_offloadargs *args)
+{
+ cp_state->count = args->wr_count;
+ cp_state->error = args->error;
+ if (!args->error) {
+ cp_state->verf.committed = args->wr_writeverf.committed;
+ memcpy(&cp_state->verf.verifier.data[0],
+ &args->wr_writeverf.verifier.data[0],
+ NFS4_VERIFIER_SIZE);
+ }
+}
+
+__be32 nfs4_callback_offload(void *data, void *dummy,
+ struct cb_process_state *cps)
+{
+ struct cb_offloadargs *args = data;
+ struct nfs_server *server;
+ struct nfs4_copy_state *copy;
+ bool found = false;
+
+ spin_lock(&cps->clp->cl_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
+ client_link) {
+ list_for_each_entry(copy, &server->ss_copies, copies) {
+ if (memcmp(args->coa_stateid.other,
+ copy->stateid.other,
+ sizeof(args->coa_stateid.other)))
+ continue;
+ nfs4_copy_cb_args(copy, args);
+ complete(&copy->completion);
+ found = true;
+ goto out;
+ }
+ }
+out:
+ rcu_read_unlock();
+ if (!found) {
+ copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
+ if (!copy) {
+ spin_unlock(&cps->clp->cl_lock);
+ return htonl(NFS4ERR_SERVERFAULT);
+ }
+ memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
+ nfs4_copy_cb_args(copy, args);
+ list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
+ }
+ spin_unlock(&cps->clp->cl_lock);
+
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_2 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index a813979b5be0..a87a56273407 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -38,6 +38,9 @@
#define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_NOTIFY_LOCK_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#endif /* CONFIG_NFS_V4_1 */
+#ifdef CONFIG_NFS_V4_2
+#define CB_OP_OFFLOAD_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#endif /* CONFIG_NFS_V4_2 */
#define NFSDBG_FACILITY NFSDBG_CALLBACK
@@ -527,7 +530,72 @@ static __be32 decode_notify_lock_args(struct svc_rqst *rqstp,
}
#endif /* CONFIG_NFS_V4_1 */
+#ifdef CONFIG_NFS_V4_2
+static __be32 decode_write_response(struct xdr_stream *xdr,
+ struct cb_offloadargs *args)
+{
+ __be32 *p;
+
+ /* skip the always zero field */
+ p = read_buf(xdr, 4);
+ if (unlikely(!p))
+ goto out;
+ p++;
+
+ /* decode count, stable_how, verifier */
+ p = xdr_inline_decode(xdr, 8 + 4);
+ if (unlikely(!p))
+ goto out;
+ p = xdr_decode_hyper(p, &args->wr_count);
+ args->wr_writeverf.committed = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, NFS4_VERIFIER_SIZE);
+ if (likely(p)) {
+ memcpy(&args->wr_writeverf.verifier.data[0], p,
+ NFS4_VERIFIER_SIZE);
+ return 0;
+ }
+out:
+ return htonl(NFS4ERR_RESOURCE);
+}
+
+static __be32 decode_offload_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct cb_offloadargs *args = data;
+ __be32 *p;
+ __be32 status;
+
+ /* decode fh */
+ status = decode_fh(xdr, &args->coa_fh);
+ if (unlikely(status != 0))
+ return status;
+ /* decode stateid */
+ status = decode_stateid(xdr, &args->coa_stateid);
+ if (unlikely(status != 0))
+ return status;
+
+ /* decode status */
+ p = read_buf(xdr, 4);
+ if (unlikely(!p))
+ goto out;
+ args->error = ntohl(*p++);
+ if (!args->error) {
+ status = decode_write_response(xdr, args);
+ if (unlikely(status != 0))
+ return status;
+ } else {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out;
+ p = xdr_decode_hyper(p, &args->wr_count);
+ }
+ return 0;
+out:
+ return htonl(NFS4ERR_RESOURCE);
+}
+#endif /* CONFIG_NFS_V4_2 */
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
if (unlikely(xdr_stream_encode_opaque(xdr, str, len) < 0))
@@ -773,7 +841,10 @@ preprocess_nfs42_op(int nop, unsigned int op_nr, struct callback_op **op)
if (status != htonl(NFS4ERR_OP_ILLEGAL))
return status;
- if (op_nr == OP_CB_OFFLOAD)
+ if (op_nr == OP_CB_OFFLOAD) {
+ *op = &callback_ops[op_nr];
+ return htonl(NFS_OK);
+ } else
return htonl(NFS4ERR_NOTSUPP);
return htonl(NFS4ERR_OP_ILLEGAL);
}
@@ -883,16 +954,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
if (hdr_arg.minorversion == 0) {
cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
+ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
+ if (cps.clp)
+ nfs_put_client(cps.clp);
goto out_invalidcred;
+ }
}
cps.minorversion = hdr_arg.minorversion;
hdr_res.taglen = hdr_arg.taglen;
hdr_res.tag = hdr_arg.tag;
- if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
+ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
+ if (cps.clp)
+ nfs_put_client(cps.clp);
return rpc_system_err;
-
+ }
while (status == 0 && nops != hdr_arg.nops) {
status = process_op(nops, rqstp, &xdr_in,
rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
@@ -969,6 +1045,13 @@ static struct callback_op callback_ops[] = {
.res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ,
},
#endif /* CONFIG_NFS_V4_1 */
+#ifdef CONFIG_NFS_V4_2
+ [OP_CB_OFFLOAD] = {
+ .process_op = nfs4_callback_offload,
+ .decode_args = decode_offload_args,
+ .res_maxsize = CB_OP_OFFLOAD_RES_MAXSZ,
+ },
+#endif /* CONFIG_NFS_V4_2 */
};
/*
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 377a61654a88..96d5f8135eb9 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -886,6 +886,7 @@ struct nfs_server *nfs_alloc_server(void)
INIT_LIST_HEAD(&server->delegations);
INIT_LIST_HEAD(&server->layouts);
INIT_LIST_HEAD(&server->state_owners_lru);
+ INIT_LIST_HEAD(&server->ss_copies);
atomic_set(&server->active, 0);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7a9c14426855..8bfaa658b2c1 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -904,23 +904,29 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
filp, offset, whence);
- inode_lock(inode);
switch (whence) {
- case 1:
- offset += filp->f_pos;
- case 0:
- if (offset >= 0)
- break;
- default:
- offset = -EINVAL;
- goto out;
+ default:
+ return -EINVAL;
+ case SEEK_SET:
+ if (offset < 0)
+ return -EINVAL;
+ inode_lock(inode);
+ break;
+ case SEEK_CUR:
+ if (offset == 0)
+ return filp->f_pos;
+ inode_lock(inode);
+ offset += filp->f_pos;
+ if (offset < 0) {
+ inode_unlock(inode);
+ return -EINVAL;
+ }
}
if (offset != filp->f_pos) {
filp->f_pos = offset;
dir_ctx->dir_cookie = 0;
dir_ctx->duped = 0;
}
-out:
inode_unlock(inode);
return offset;
}
@@ -1032,7 +1038,7 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
if (flags & LOOKUP_REVAL)
goto out_force;
out:
- return (inode->i_nlink == 0) ? -ENOENT : 0;
+ return (inode->i_nlink == 0) ? -ESTALE : 0;
out_force:
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -1434,12 +1440,11 @@ static int do_open(struct inode *inode, struct file *filp)
static int nfs_finish_open(struct nfs_open_context *ctx,
struct dentry *dentry,
- struct file *file, unsigned open_flags,
- int *opened)
+ struct file *file, unsigned open_flags)
{
int err;
- err = finish_open(file, dentry, do_open, opened);
+ err = finish_open(file, dentry, do_open);
if (err)
goto out;
if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
@@ -1452,7 +1457,7 @@ out:
int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned open_flags,
- umode_t mode, int *opened)
+ umode_t mode)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct nfs_open_context *ctx;
@@ -1461,6 +1466,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct inode *inode;
unsigned int lookup_flags = 0;
bool switched = false;
+ int created = 0;
int err;
/* Expect a negative dentry */
@@ -1521,7 +1527,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
goto out;
trace_nfs_atomic_open_enter(dir, ctx, open_flags);
- inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created);
+ if (created)
+ file->f_mode |= FMODE_CREATED;
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
@@ -1546,7 +1554,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
goto out;
}
- err = nfs_finish_open(ctx, ctx->dentry, file, open_flags, opened);
+ err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
out:
@@ -1641,6 +1649,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
struct dentry *parent = dget_parent(dentry);
struct inode *dir = d_inode(parent);
struct inode *inode;
+ struct dentry *d;
int error = -EACCES;
d_drop(dentry);
@@ -1662,10 +1671,12 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
goto out_error;
}
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
- error = PTR_ERR(inode);
- if (IS_ERR(inode))
+ d = d_splice_alias(inode, dentry);
+ if (IS_ERR(d)) {
+ error = PTR_ERR(d);
goto out_error;
- d_add(dentry, inode);
+ }
+ dput(d);
out:
dput(parent);
return 0;
@@ -2494,7 +2505,9 @@ static int nfs_execute_ok(struct inode *inode, int mask)
struct nfs_server *server = NFS_SERVER(inode);
int ret = 0;
- if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) {
+ if (S_ISDIR(inode->i_mode))
+ return 0;
+ if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_OTHER)) {
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
ret = __nfs_revalidate_inode(server, inode);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 621c517b325c..aa12c3063bae 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -758,7 +758,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
{
- schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
+ queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
}
static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 81cca49a8375..29553fdba8af 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -532,13 +532,13 @@ const struct address_space_operations nfs_file_aops = {
* writable, implying that someone is about to modify the page through a
* shared-writable mapping
*/
-static int nfs_vm_page_mkwrite(struct vm_fault *vmf)
+static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp);
unsigned pagelen;
- int ret = VM_FAULT_NOPAGE;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
struct address_space *mapping;
dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n",
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 8f003792ccde..cae43333ef16 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -812,7 +812,6 @@ ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req,
bool strict_iomode)
{
-retry_strict:
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
@@ -825,16 +824,6 @@ retry_strict:
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
}
-
- /* If we don't have checking, do get a IOMODE_RW
- * segment, and the server wants to avoid READs
- * there, then retry!
- */
- if (pgio->pg_lseg && !strict_iomode &&
- ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
- strict_iomode = true;
- goto retry_strict;
- }
}
static void
@@ -849,14 +838,16 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
retry:
pnfs_generic_pg_check_layout(pgio);
/* Use full layout for now */
- if (!pgio->pg_lseg)
+ if (!pgio->pg_lseg) {
ff_layout_pg_get_read(pgio, req, false);
- else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg))
+ if (!pgio->pg_lseg)
+ goto out_nolseg;
+ }
+ if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
ff_layout_pg_get_read(pgio, req, true);
-
- /* If no lseg, fall back to read through mds */
- if (pgio->pg_lseg == NULL)
- goto out_mds;
+ if (!pgio->pg_lseg)
+ goto out_nolseg;
+ }
ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
if (!ds) {
@@ -878,6 +869,9 @@ retry:
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
return;
+out_nolseg:
+ if (pgio->pg_error < 0)
+ return;
out_mds:
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
@@ -1323,6 +1317,7 @@ static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
hdr->args.count,
hdr->res.count);
+ set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
}
static int ff_layout_read_prepare_common(struct rpc_task *task,
@@ -1507,6 +1502,7 @@ static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
hdr->args.count, hdr->res.count,
hdr->res.verf->committed);
+ set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
}
static int ff_layout_write_prepare_common(struct rpc_task *task,
@@ -1615,6 +1611,7 @@ static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
nfs4_ff_layout_stat_io_end_write(task,
FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
count, count, NFS_FILE_SYNC);
+ set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
}
static void ff_layout_commit_prepare_common(struct rpc_task *task,
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 7173a4ee862c..9fce18548f7e 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -108,6 +108,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type)
case -EPROTONOSUPPORT:
dprintk("NFS_V3_ACL extension not supported; disabling\n");
server->caps &= ~NFS_CAP_ACLS;
+ /* fall through */
case -ENOTSUPP:
status = -EOPNOTSUPP;
default:
@@ -229,6 +230,7 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
dprintk("NFS_V3_ACL SETACL RPC not supported"
"(will not retry)\n");
server->caps &= ~NFS_CAP_ACLS;
+ /* fall through */
case -ENOTSUPP:
status = -EOPNOTSUPP;
}
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 5f59b6f65a42..ac5b784a1de0 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -17,6 +17,7 @@
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_PROC
+static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
struct nfs_lock_context *lock, loff_t offset, loff_t len)
@@ -130,6 +131,91 @@ out_unlock:
return err;
}
+static int handle_async_copy(struct nfs42_copy_res *res,
+ struct nfs_server *server,
+ struct file *src,
+ struct file *dst,
+ nfs4_stateid *src_stateid)
+{
+ struct nfs4_copy_state *copy;
+ int status = NFS4_OK;
+ bool found_pending = false;
+ struct nfs_open_context *ctx = nfs_file_open_context(dst);
+
+ spin_lock(&server->nfs_client->cl_lock);
+ list_for_each_entry(copy, &server->nfs_client->pending_cb_stateids,
+ copies) {
+ if (memcmp(&res->write_res.stateid, &copy->stateid,
+ NFS4_STATEID_SIZE))
+ continue;
+ found_pending = true;
+ list_del(&copy->copies);
+ break;
+ }
+ if (found_pending) {
+ spin_unlock(&server->nfs_client->cl_lock);
+ goto out;
+ }
+
+ copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
+ if (!copy) {
+ spin_unlock(&server->nfs_client->cl_lock);
+ return -ENOMEM;
+ }
+ memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
+ init_completion(&copy->completion);
+ copy->parent_state = ctx->state;
+
+ list_add_tail(&copy->copies, &server->ss_copies);
+ spin_unlock(&server->nfs_client->cl_lock);
+
+ status = wait_for_completion_interruptible(&copy->completion);
+ spin_lock(&server->nfs_client->cl_lock);
+ list_del_init(&copy->copies);
+ spin_unlock(&server->nfs_client->cl_lock);
+ if (status == -ERESTARTSYS) {
+ goto out_cancel;
+ } else if (copy->flags) {
+ status = -EAGAIN;
+ goto out_cancel;
+ }
+out:
+ res->write_res.count = copy->count;
+ memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
+ status = -copy->error;
+
+ kfree(copy);
+ return status;
+out_cancel:
+ nfs42_do_offload_cancel_async(dst, &copy->stateid);
+ kfree(copy);
+ return status;
+}
+
+static int process_copy_commit(struct file *dst, loff_t pos_dst,
+ struct nfs42_copy_res *res)
+{
+ struct nfs_commitres cres;
+ int status = -ENOMEM;
+
+ cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
+ if (!cres.verf)
+ goto out;
+
+ status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
+ if (status)
+ goto out_free;
+ if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
+ &cres.verf->verifier)) {
+ dprintk("commit verf differs from copy verf\n");
+ status = -EAGAIN;
+ }
+out_free:
+ kfree(cres.verf);
+out:
+ return status;
+}
+
static ssize_t _nfs42_proc_copy(struct file *src,
struct nfs_lock_context *src_lock,
struct file *dst,
@@ -168,9 +254,16 @@ static ssize_t _nfs42_proc_copy(struct file *src,
if (status)
return status;
- res->commit_res.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
- if (!res->commit_res.verf)
- return -ENOMEM;
+ res->commit_res.verf = NULL;
+ if (args->sync) {
+ res->commit_res.verf =
+ kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS);
+ if (!res->commit_res.verf)
+ return -ENOMEM;
+ }
+ set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
+ &dst_lock->open_context->state->flags);
+
status = nfs4_call_sync(server->client, server, &msg,
&args->seq_args, &res->seq_res, 0);
if (status == -ENOTSUPP)
@@ -178,18 +271,34 @@ static ssize_t _nfs42_proc_copy(struct file *src,
if (status)
goto out;
- if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
+ if (args->sync &&
+ nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
&res->commit_res.verf->verifier)) {
status = -EAGAIN;
goto out;
}
+ if (!res->synchronous) {
+ status = handle_async_copy(res, server, src, dst,
+ &args->src_stateid);
+ if (status)
+ return status;
+ }
+
+ if ((!res->synchronous || !args->sync) &&
+ res->write_res.verifier.committed != NFS_FILE_SYNC) {
+ status = process_copy_commit(dst, pos_dst, res);
+ if (status)
+ return status;
+ }
+
truncate_pagecache_range(dst_inode, pos_dst,
pos_dst + res->write_res.count);
status = res->write_res.count;
out:
- kfree(res->commit_res.verf);
+ if (args->sync)
+ kfree(res->commit_res.verf);
return status;
}
@@ -206,6 +315,7 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
.dst_fh = NFS_FH(file_inode(dst)),
.dst_pos = pos_dst,
.count = count,
+ .sync = false,
};
struct nfs42_copy_res res;
struct nfs4_exception src_exception = {
@@ -247,7 +357,11 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
if (err == -ENOTSUPP) {
err = -EOPNOTSUPP;
break;
- } if (err == -EAGAIN) {
+ } else if (err == -EAGAIN) {
+ dst_exception.retry = 1;
+ continue;
+ } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
+ args.sync = true;
dst_exception.retry = 1;
continue;
}
@@ -264,6 +378,89 @@ out_put_src_lock:
return err;
}
+struct nfs42_offloadcancel_data {
+ struct nfs_server *seq_server;
+ struct nfs42_offload_status_args args;
+ struct nfs42_offload_status_res res;
+};
+
+static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs42_offloadcancel_data *data = calldata;
+
+ nfs4_setup_sequence(data->seq_server->nfs_client,
+ &data->args.osa_seq_args,
+ &data->res.osr_seq_res, task);
+}
+
+static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs42_offloadcancel_data *data = calldata;
+
+ nfs41_sequence_done(task, &data->res.osr_seq_res);
+ if (task->tk_status &&
+ nfs4_async_handle_error(task, data->seq_server, NULL,
+ NULL) == -EAGAIN)
+ rpc_restart_call_prepare(task);
+}
+
+static void nfs42_free_offloadcancel_data(void *data)
+{
+ kfree(data);
+}
+
+static const struct rpc_call_ops nfs42_offload_cancel_ops = {
+ .rpc_call_prepare = nfs42_offload_cancel_prepare,
+ .rpc_call_done = nfs42_offload_cancel_done,
+ .rpc_release = nfs42_free_offloadcancel_data,
+};
+
+static int nfs42_do_offload_cancel_async(struct file *dst,
+ nfs4_stateid *stateid)
+{
+ struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
+ struct nfs42_offloadcancel_data *data = NULL;
+ struct nfs_open_context *ctx = nfs_file_open_context(dst);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
+ .rpc_cred = ctx->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = dst_server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs42_offload_cancel_ops,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status;
+
+ if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
+ return -EOPNOTSUPP;
+
+ data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS);
+ if (data == NULL)
+ return -ENOMEM;
+
+ data->seq_server = dst_server;
+ data->args.osa_src_fh = NFS_FH(file_inode(dst));
+ memcpy(&data->args.osa_stateid, stateid,
+ sizeof(data->args.osa_stateid));
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ task_setup_data.callback_data = data;
+ nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
+ 1, 0);
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = rpc_wait_for_completion_task(task);
+ if (status == -ENOTSUPP)
+ dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
+ rpc_put_task(task);
+ return status;
+}
+
static loff_t _nfs42_proc_llseek(struct file *filep,
struct nfs_lock_context *lock, loff_t offset, int whence)
{
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 5966e1e7b1f5..69f72ed2bf87 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -26,6 +26,9 @@
NFS42_WRITE_RES_SIZE + \
1 /* cr_consecutive */ + \
1 /* cr_synchronous */)
+#define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_offload_cancel_maxsz (op_decode_hdr_maxsz)
#define encode_deallocate_maxsz (op_encode_hdr_maxsz + \
encode_fallocate_maxsz)
#define decode_deallocate_maxsz (op_decode_hdr_maxsz)
@@ -75,6 +78,12 @@
decode_putfh_maxsz + \
decode_copy_maxsz + \
decode_commit_maxsz)
+#define NFS4_enc_offload_cancel_sz (compound_encode_hdr_maxsz + \
+ encode_putfh_maxsz + \
+ encode_offload_cancel_maxsz)
+#define NFS4_dec_offload_cancel_sz (compound_decode_hdr_maxsz + \
+ decode_putfh_maxsz + \
+ decode_offload_cancel_maxsz)
#define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_deallocate_maxsz + \
@@ -141,10 +150,18 @@ static void encode_copy(struct xdr_stream *xdr,
encode_uint64(xdr, args->count);
encode_uint32(xdr, 1); /* consecutive = true */
- encode_uint32(xdr, 1); /* synchronous = true */
+ encode_uint32(xdr, args->sync);
encode_uint32(xdr, 0); /* src server list */
}
+static void encode_offload_cancel(struct xdr_stream *xdr,
+ const struct nfs42_offload_status_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OFFLOAD_CANCEL, decode_offload_cancel_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &args->osa_stateid);
+}
+
static void encode_deallocate(struct xdr_stream *xdr,
const struct nfs42_falloc_args *args,
struct compound_hdr *hdr)
@@ -256,7 +273,27 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
encode_savefh(xdr, &hdr);
encode_putfh(xdr, args->dst_fh, &hdr);
encode_copy(xdr, args, &hdr);
- encode_copy_commit(xdr, args, &hdr);
+ if (args->sync)
+ encode_copy_commit(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode OFFLOAD_CANEL request
+ */
+static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_offload_status_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->osa_seq_args, &hdr);
+ encode_putfh(xdr, args->osa_src_fh, &hdr);
+ encode_offload_cancel(xdr, args, &hdr);
encode_nops(&hdr);
}
@@ -353,21 +390,23 @@ static int decode_write_response(struct xdr_stream *xdr,
struct nfs42_write_res *res)
{
__be32 *p;
+ int status, count;
- p = xdr_inline_decode(xdr, 4 + 8 + 4);
+ p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
-
- /*
- * We never use asynchronous mode, so warn if a server returns
- * a stateid.
- */
- if (unlikely(*p != 0)) {
- pr_err_once("%s: server has set unrequested "
- "asynchronous mode\n", __func__);
+ count = be32_to_cpup(p);
+ if (count > 1)
return -EREMOTEIO;
+ else if (count == 1) {
+ status = decode_opaque_fixed(xdr, &res->stateid,
+ NFS4_STATEID_SIZE);
+ if (unlikely(status))
+ goto out_overflow;
}
- p++;
+ p = xdr_inline_decode(xdr, 8 + 4);
+ if (unlikely(!p))
+ goto out_overflow;
p = xdr_decode_hyper(p, &res->count);
res->verifier.committed = be32_to_cpup(p);
return decode_verifier(xdr, &res->verifier.verifier);
@@ -413,6 +452,12 @@ static int decode_copy(struct xdr_stream *xdr, struct nfs42_copy_res *res)
return decode_copy_requirements(xdr, res);
}
+static int decode_offload_cancel(struct xdr_stream *xdr,
+ struct nfs42_offload_status_res *res)
+{
+ return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL);
+}
+
static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
{
return decode_op_hdr(xdr, OP_DEALLOCATE);
@@ -507,7 +552,34 @@ static int nfs4_xdr_dec_copy(struct rpc_rqst *rqstp,
status = decode_copy(xdr, res);
if (status)
goto out;
- status = decode_commit(xdr, &res->commit_res);
+ if (res->commit_res.verf)
+ status = decode_commit(xdr, &res->commit_res);
+out:
+ return status;
+}
+
+/*
+ * Decode OFFLOAD_CANCEL response
+ */
+static int nfs4_xdr_dec_offload_cancel(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_offload_status_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->osr_seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_offload_cancel(xdr, res);
+
out:
return status;
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 137e18abb7e7..3a6904173214 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -163,6 +163,9 @@ enum {
NFS_STATE_RECOVERY_FAILED, /* OPEN stateid state recovery failed */
NFS_STATE_MAY_NOTIFY_LOCK, /* server may CB_NOTIFY_LOCK */
NFS_STATE_CHANGE_WAIT, /* A state changing operation is outstanding */
+#ifdef CONFIG_NFS_V4_2
+ NFS_CLNT_DST_SSC_COPY_STATE, /* dst server open state on client*/
+#endif /* CONFIG_NFS_V4_2 */
};
struct nfs4_state {
@@ -258,7 +261,7 @@ extern const struct dentry_operations nfs4_dentry_operations;
/* dir.c */
int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
- unsigned, umode_t, int *);
+ unsigned, umode_t);
/* super.c */
extern struct file_system_type nfs4_fs_type;
@@ -273,6 +276,9 @@ int nfs4_replace_transport(struct nfs_server *server,
/* nfs4proc.c */
extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *);
+extern int nfs4_async_handle_error(struct rpc_task *task,
+ struct nfs_server *server,
+ struct nfs4_state *state, long *timeout);
extern int nfs4_call_sync(struct rpc_clnt *, struct nfs_server *,
struct rpc_message *, struct nfs4_sequence_args *,
struct nfs4_sequence_res *, int);
@@ -505,7 +511,7 @@ extern int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res);
extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp);
-
+extern int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res);
extern const nfs4_stateid zero_stateid;
extern const nfs4_stateid invalid_stateid;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 979631411a0e..146e30862234 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -156,9 +156,23 @@ nfs4_shutdown_ds_clients(struct nfs_client *clp)
}
}
+static void
+nfs4_cleanup_callback(struct nfs_client *clp)
+{
+ struct nfs4_copy_state *cp_state;
+
+ while (!list_empty(&clp->pending_cb_stateids)) {
+ cp_state = list_entry(clp->pending_cb_stateids.next,
+ struct nfs4_copy_state, copies);
+ list_del(&cp_state->copies);
+ kfree(cp_state);
+ }
+}
+
void nfs41_shutdown_client(struct nfs_client *clp)
{
if (nfs4_has_session(clp)) {
+ nfs4_cleanup_callback(clp);
nfs4_shutdown_ds_clients(clp);
nfs4_destroy_session(clp->cl_session);
nfs4_destroy_clientid(clp);
@@ -202,6 +216,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
#if IS_ENABLED(CONFIG_NFS_V4_1)
init_waitqueue_head(&clp->cl_lock_waitq);
#endif
+ INIT_LIST_HEAD(&clp->pending_cb_stateids);
return clp;
error:
@@ -1127,7 +1142,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
nfs_server_copy_userdata(server, parent_server);
/* Get a client representation */
-#ifdef CONFIG_SUNRPC_XPRT_RDMA
+#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
rpc_set_port(data->addr, NFS_RDMA_PORT);
error = nfs4_set_client(server, data->hostname,
data->addr,
@@ -1139,7 +1154,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
parent_client->cl_net);
if (!error)
goto init_server;
-#endif /* CONFIG_SUNRPC_XPRT_RDMA */
+#endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */
rpc_set_port(data->addr, NFS_PORT);
error = nfs4_set_client(server, data->hostname,
@@ -1153,7 +1168,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (error < 0)
goto error;
-#ifdef CONFIG_SUNRPC_XPRT_RDMA
+#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
init_server:
#endif
error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 6b3b372b59b9..4288a6ecaf75 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -133,10 +133,15 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t count, unsigned int flags)
{
+ ssize_t ret;
+
if (file_inode(file_in) == file_inode(file_out))
return -EINVAL;
-
- return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
+retry:
+ ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
}
static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
@@ -149,6 +154,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
ret = nfs42_proc_llseek(filep, offset, whence);
if (ret != -ENOTSUPP)
return ret;
+ /* Fall through */
default:
return nfs_file_llseek(filep, offset, whence);
}
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index b6f9d84ba19b..3f23b6840547 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -506,6 +506,7 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
switch (token) {
case Opt_find_uid:
im->im_type = IDMAP_TYPE_USER;
+ /* Fall through */
case Opt_find_gid:
im->im_conv = IDMAP_CONV_NAMETOID;
ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ);
@@ -513,9 +514,12 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
case Opt_find_user:
im->im_type = IDMAP_TYPE_USER;
+ /* Fall through */
case Opt_find_group:
im->im_conv = IDMAP_CONV_IDTONAME;
ret = match_int(&substr, &im->im_id);
+ if (ret)
+ goto out;
break;
default:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f6c4ccd693f4..34830f6457ea 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -449,6 +449,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
stateid);
goto wait_on_recovery;
}
+ /* Fall through */
case -NFS4ERR_OPENMODE:
if (inode) {
int err;
@@ -501,8 +502,10 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
ret = -EBUSY;
break;
}
+ /* Fall through */
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
+ /* Fall through */
case -NFS4ERR_GRACE:
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
@@ -581,12 +584,19 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
ret = -EIO;
return ret;
out_retry:
- if (ret == 0)
+ if (ret == 0) {
exception->retry = 1;
+ /*
+ * For NFS4ERR_MOVED, the client transport will need to
+ * be recomputed after migration recovery has completed.
+ */
+ if (errorcode == -NFS4ERR_MOVED)
+ rpc_task_release_transport(task);
+ }
return ret;
}
-static int
+int
nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
struct nfs4_state *state, long *timeout)
{
@@ -1071,15 +1081,30 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
return nfs4_call_sync_sequence(clnt, server, msg, args, res);
}
-static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
- unsigned long timestamp)
+static void
+nfs4_inc_nlink_locked(struct inode *inode)
+{
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
+ inc_nlink(inode);
+}
+
+static void
+nfs4_dec_nlink_locked(struct inode *inode)
+{
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
+ drop_nlink(inode);
+}
+
+static void
+update_changeattr_locked(struct inode *dir, struct nfs4_change_info *cinfo,
+ unsigned long timestamp, unsigned long cache_validity)
{
struct nfs_inode *nfsi = NFS_I(dir);
- spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_CTIME
| NFS_INO_INVALID_MTIME
- | NFS_INO_INVALID_DATA;
+ | NFS_INO_INVALID_DATA
+ | cache_validity;
if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) {
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
nfsi->attrtimeo_timestamp = jiffies;
@@ -1092,7 +1117,16 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
inode_set_iversion_raw(dir, cinfo->after);
nfsi->read_cache_jiffies = timestamp;
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
+ nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
nfs_fscache_invalidate(dir);
+}
+
+static void
+update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
+ unsigned long timestamp, unsigned long cache_validity)
+{
+ spin_lock(&dir->i_lock);
+ update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
spin_unlock(&dir->i_lock);
}
@@ -1354,6 +1388,7 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
case NFS4_OPEN_CLAIM_PREVIOUS:
if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
break;
+ /* Fall through */
default:
return 0;
}
@@ -1773,6 +1808,10 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
data->o_res.delegation_type,
&data->o_res.delegation,
data->o_res.pagemod_limit);
+
+ if (data->o_res.do_recall)
+ nfs_async_inode_return_delegation(state->inode,
+ &data->o_res.delegation);
}
/*
@@ -2119,6 +2158,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
if (err)
break;
+ /* Fall through */
case FMODE_READ:
err = nfs4_open_recover_helper(opendata, FMODE_READ);
}
@@ -2248,6 +2288,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
+ /* Fall through */
case NFS4_OPEN_CLAIM_FH:
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
}
@@ -2481,7 +2522,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data,
if (data->file_created ||
inode_peek_iversion_raw(dir) != o_res->cinfo.after)
update_changeattr(dir, &o_res->cinfo,
- o_res->f_attr->time_start);
+ o_res->f_attr->time_start, 0);
}
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
server->caps &= ~NFS_CAP_POSIX_LOCK;
@@ -2843,6 +2884,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
nfs_save_change_attribute(d_inode(opendata->dir)));
}
+ /* Parse layoutget results before we check for access */
+ pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
+
ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
if (ret != 0)
goto out;
@@ -2851,8 +2895,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
nfs_inode_attach_open_context(ctx);
if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
nfs4_schedule_stateid_recovery(server, state);
- else
- pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
}
out:
@@ -2951,7 +2993,7 @@ static int _nfs4_do_open(struct inode *dir,
}
}
if (opened && opendata->file_created)
- *opened |= FILE_CREATED;
+ *opened = 1;
if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
*ctx_th = opendata->f_attr.mdsthreshold;
@@ -3220,7 +3262,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
calldata->res.lr_res = NULL;
break;
case -NFS4ERR_OLD_STATEID:
- if (nfs4_refresh_layout_stateid(&calldata->arg.lr_args->stateid,
+ if (nfs4_layoutreturn_refresh_stateid(&calldata->arg.lr_args->stateid,
+ &calldata->arg.lr_args->range,
calldata->inode))
goto lr_restart;
/* Fallthrough */
@@ -4236,7 +4279,8 @@ out:
return status;
}
-static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
+static int
+_nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs args = {
@@ -4255,8 +4299,14 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
int status;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
- if (status == 0)
- update_changeattr(dir, &res.cinfo, timestamp);
+ if (status == 0) {
+ spin_lock(&dir->i_lock);
+ update_changeattr_locked(dir, &res.cinfo, timestamp, 0);
+ /* Removing a directory decrements nlink in the parent */
+ if (ftype == NF4DIR && dir->i_nlink > 2)
+ nfs4_dec_nlink_locked(dir);
+ spin_unlock(&dir->i_lock);
+ }
return status;
}
@@ -4273,7 +4323,7 @@ static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
nfs4_inode_make_writeable(inode);
}
do {
- err = _nfs4_proc_remove(dir, &dentry->d_name);
+ err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
trace_nfs4_remove(dir, &dentry->d_name, err);
err = nfs4_handle_exception(NFS_SERVER(dir), err,
&exception);
@@ -4287,7 +4337,7 @@ static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
int err;
do {
- err = _nfs4_proc_remove(dir, name);
+ err = _nfs4_proc_remove(dir, name, NF4DIR);
trace_nfs4_remove(dir, name, err);
err = nfs4_handle_exception(NFS_SERVER(dir), err,
&exception);
@@ -4331,7 +4381,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
&data->timeout) == -EAGAIN)
return 0;
if (task->tk_status == 0)
- update_changeattr(dir, &res->cinfo, res->dir_attr->time_start);
+ update_changeattr(dir, &res->cinfo,
+ res->dir_attr->time_start, 0);
return 1;
}
@@ -4373,9 +4424,18 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
return 0;
if (task->tk_status == 0) {
- update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start);
- if (new_dir != old_dir)
- update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start);
+ if (new_dir != old_dir) {
+ /* Note: If we moved a directory, nlink will change */
+ update_changeattr(old_dir, &res->old_cinfo,
+ res->old_fattr->time_start,
+ NFS_INO_INVALID_OTHER);
+ update_changeattr(new_dir, &res->new_cinfo,
+ res->new_fattr->time_start,
+ NFS_INO_INVALID_OTHER);
+ } else
+ update_changeattr(old_dir, &res->old_cinfo,
+ res->old_fattr->time_start,
+ 0);
}
return 1;
}
@@ -4416,7 +4476,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
- update_changeattr(dir, &res.cinfo, res.fattr->time_start);
+ update_changeattr(dir, &res.cinfo, res.fattr->time_start, 0);
status = nfs_post_op_update_inode(inode, res.fattr);
if (!status)
nfs_setsecurity(inode, res.fattr, res.label);
@@ -4491,8 +4551,13 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
- update_changeattr(dir, &data->res.dir_cinfo,
- data->res.fattr->time_start);
+ spin_lock(&dir->i_lock);
+ update_changeattr_locked(dir, &data->res.dir_cinfo,
+ data->res.fattr->time_start, 0);
+ /* Creating a directory bumps nlink in the parent */
+ if (data->arg.ftype == NF4DIR)
+ nfs4_inc_nlink_locked(dir);
+ spin_unlock(&dir->i_lock);
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
}
return status;
@@ -5073,6 +5138,40 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
}
+static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
+ struct nfs_commitres *res)
+{
+ struct inode *dst_inode = file_inode(dst);
+ struct nfs_server *server = NFS_SERVER(dst_inode);
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
+ .rpc_argp = args,
+ .rpc_resp = res,
+ };
+
+ args->fh = NFS_FH(dst_inode);
+ return nfs4_call_sync(server->client, server, &msg,
+ &args->seq_args, &res->seq_res, 1);
+}
+
+int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
+{
+ struct nfs_commitargs args = {
+ .offset = offset,
+ .count = count,
+ };
+ struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
+ struct nfs4_exception exception = { };
+ int status;
+
+ do {
+ status = _nfs4_proc_commit(dst, &args, res);
+ status = nfs4_handle_exception(dst_server, status, &exception);
+ } while (exception.retry);
+
+ return status;
+}
+
struct nfs4_renewdata {
struct nfs_client *client;
unsigned long timestamp;
@@ -5902,7 +6001,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
data->res.lr_res = NULL;
break;
case -NFS4ERR_OLD_STATEID:
- if (nfs4_refresh_layout_stateid(&data->args.lr_args->stateid,
+ if (nfs4_layoutreturn_refresh_stateid(&data->args.lr_args->stateid,
+ &data->args.lr_args->range,
data->inode))
goto lr_restart;
/* Fallthrough */
@@ -6209,11 +6309,13 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
if (nfs4_update_lock_stateid(calldata->lsp,
&calldata->res.stateid))
break;
+ /* Fall through */
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_EXPIRED:
nfs4_free_revoked_stateid(calldata->server,
&calldata->arg.stateid,
task->tk_msg.rpc_cred);
+ /* Fall through */
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
@@ -7727,7 +7829,7 @@ static int nfs4_sp4_select_mode(struct nfs_client *clp,
}
out:
clp->cl_sp4_flags = flags;
- return 0;
+ return ret;
}
struct nfs41_exchange_id_data {
@@ -8168,7 +8270,7 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
args->bc_attrs.max_resp_sz = max_bc_payload;
args->bc_attrs.max_resp_sz_cached = 0;
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
- args->bc_attrs.max_reqs = min_t(unsigned short, max_session_cb_slots, 1);
+ args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
@@ -8851,7 +8953,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
server = NFS_SERVER(lrp->args.inode);
switch (task->tk_status) {
case -NFS4ERR_OLD_STATEID:
- if (nfs4_refresh_layout_stateid(&lrp->args.stateid,
+ if (nfs4_layoutreturn_refresh_stateid(&lrp->args.stateid,
+ &lrp->args.range,
lrp->args.inode))
goto out_restart;
/* Fallthrough */
@@ -9554,6 +9657,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_LGOPEN
| NFS_CAP_ALLOCATE
| NFS_CAP_COPY
+ | NFS_CAP_OFFLOAD_CANCEL
| NFS_CAP_DEALLOCATE
| NFS_CAP_SEEK
| NFS_CAP_LAYOUTSTATS
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 2bf2eaa08ca7..3df0eb52da1c 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -274,7 +274,7 @@ static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
static int nfs4_begin_drain_session(struct nfs_client *clp)
{
struct nfs4_session *ses = clp->cl_session;
- int ret = 0;
+ int ret;
if (clp->cl_slot_tbl)
return nfs4_drain_slot_tbl(clp->cl_slot_tbl);
@@ -1525,6 +1525,7 @@ restart:
default:
pr_err("NFS: %s: unhandled error %d\n",
__func__, status);
+ /* Fall through */
case -ENOMEM:
case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
@@ -1588,6 +1589,22 @@ restart:
}
clear_bit(NFS_STATE_RECLAIM_NOGRACE,
&state->flags);
+#ifdef CONFIG_NFS_V4_2
+ if (test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags)) {
+ struct nfs4_copy_state *copy;
+
+ spin_lock(&sp->so_server->nfs_client->cl_lock);
+ list_for_each_entry(copy, &sp->so_server->ss_copies, copies) {
+ if (memcmp(&state->stateid.other, &copy->parent_state->stateid.other, NFS4_STATEID_SIZE))
+ continue;
+ copy->flags = 1;
+ complete(&copy->completion);
+ printk("AGLO: server rebooted waking up the copy\n");
+ break;
+ }
+ spin_unlock(&sp->so_server->nfs_client->cl_lock);
+ }
+#endif /* CONFIG_NFS_V4_2 */
nfs4_put_open_state(state);
spin_lock(&sp->so_lock);
goto restart;
@@ -1597,6 +1614,7 @@ restart:
default:
printk(KERN_ERR "NFS: %s: unhandled error %d\n",
__func__, status);
+ /* Fall through */
case -ENOENT:
case -ENOMEM:
case -EACCES:
@@ -1608,6 +1626,7 @@ restart:
break;
case -EAGAIN:
ssleep(1);
+ /* Fall through */
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
@@ -1939,7 +1958,9 @@ static int nfs4_establish_lease(struct nfs_client *clp)
clp->cl_mvops->reboot_recovery_ops;
int status;
- nfs4_begin_drain_session(clp);
+ status = nfs4_begin_drain_session(clp);
+ if (status != 0)
+ return status;
cred = nfs4_get_clid_cred(clp);
if (cred == NULL)
return -ENOENT;
@@ -2027,7 +2048,9 @@ static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred)
goto out;
}
- nfs4_begin_drain_session(clp);
+ status = nfs4_begin_drain_session(clp);
+ if (status != 0)
+ return status;
status = nfs4_replace_transport(server, locations);
if (status != 0) {
@@ -2190,9 +2213,11 @@ again:
case -ETIMEDOUT:
if (clnt->cl_softrtry)
break;
+ /* Fall through */
case -NFS4ERR_DELAY:
case -EAGAIN:
ssleep(1);
+ /* Fall through */
case -NFS4ERR_STALE_CLIENTID:
dprintk("NFS: %s after status %d, retrying\n",
__func__, status);
@@ -2204,6 +2229,7 @@ again:
}
if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX)
break;
+ /* Fall through */
case -NFS4ERR_CLID_INUSE:
case -NFS4ERR_WRONGSEC:
/* No point in retrying if we already used RPC_AUTH_UNIX */
@@ -2374,7 +2400,9 @@ static int nfs4_reset_session(struct nfs_client *clp)
if (!nfs4_has_session(clp))
return 0;
- nfs4_begin_drain_session(clp);
+ status = nfs4_begin_drain_session(clp);
+ if (status != 0)
+ return status;
cred = nfs4_get_clid_cred(clp);
status = nfs4_proc_destroy_session(clp->cl_session, cred);
switch (status) {
@@ -2417,7 +2445,9 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
if (!nfs4_has_session(clp))
return 0;
- nfs4_begin_drain_session(clp);
+ ret = nfs4_begin_drain_session(clp);
+ if (ret != 0)
+ return ret;
cred = nfs4_get_clid_cred(clp);
ret = nfs4_proc_bind_conn_to_session(clp, cred);
if (cred)
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index cd41d2577a04..b7bde12d8cd5 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -7789,6 +7789,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
PROC42(CLONE, enc_clone, dec_clone),
PROC42(COPY, enc_copy, dec_copy),
+ PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel),
PROC(LOOKUPP, enc_lookupp, dec_lookupp),
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 67d19cd92e44..bb5476a6d264 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -561,6 +561,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
case FLUSH_COND_STABLE:
if (nfs_reqs_to_commit(cinfo))
break;
+ /* fall through */
default:
hdr->args.stable = NFS_FILE_SYNC;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index bcc3addec3c5..e8f232de484f 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -361,18 +361,32 @@ pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
/*
* Update the seqid of a layout stateid
*/
-bool nfs4_refresh_layout_stateid(nfs4_stateid *dst, struct inode *inode)
+bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+ struct pnfs_layout_range *dst_range,
+ struct inode *inode)
{
struct pnfs_layout_hdr *lo;
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
bool ret = false;
+ LIST_HEAD(head);
+ int err;
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout;
if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
- dst->seqid = lo->plh_stateid.seqid;
- ret = true;
+ err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
+ if (err != -EBUSY) {
+ dst->seqid = lo->plh_stateid.seqid;
+ *dst_range = range;
+ ret = true;
+ }
}
spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&head);
return ret;
}
@@ -1018,7 +1032,6 @@ pnfs_alloc_init_layoutget_args(struct inode *ino,
nfs4_stateid_copy(&lgp->args.stateid, stateid);
lgp->gfp_flags = gfp_flags;
lgp->cred = get_rpccred(ctx->cred);
- lgp->callback_count = raw_seqcount_begin(&server->nfs_client->cl_callback_count);
return lgp;
}
@@ -1160,12 +1173,21 @@ static bool
pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
{
struct pnfs_layout_segment *s;
+ enum pnfs_iomode iomode;
+ u32 seq;
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
return false;
- /* Defer layoutreturn until all lsegs are done */
+ seq = lo->plh_return_seq;
+ iomode = lo->plh_return_iomode;
+
+ /* Defer layoutreturn until all recalled lsegs are done */
list_for_each_entry(s, &lo->plh_segs, pls_list) {
+ if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
+ continue;
+ if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
+ continue;
if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
return false;
}
@@ -1609,7 +1631,7 @@ pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
(range->iomode != ls_range->iomode &&
strict_iomode) ||
!pnfs_lseg_range_intersecting(ls_range, range))
- return 0;
+ return false;
/* range1 covers only the first byte in the range */
range1 = *range;
@@ -1631,7 +1653,6 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
- !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
pnfs_lseg_range_match(&lseg->pls_range, range,
strict_iomode)) {
ret = pnfs_get_lseg(lseg);
@@ -1731,6 +1752,17 @@ static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
TASK_UNINTERRUPTIBLE);
}
+static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
+{
+ atomic_inc(&lo->plh_outstanding);
+}
+
+static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
+{
+ if (atomic_dec_and_test(&lo->plh_outstanding))
+ wake_up_var(&lo->plh_outstanding);
+}
+
static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
{
unsigned long *bitlock = &lo->plh_flags;
@@ -1791,12 +1823,6 @@ pnfs_update_layout(struct inode *ino,
goto out;
}
- if (iomode == IOMODE_READ && i_size_read(ino) == 0) {
- trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
- PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
- goto out;
- }
-
if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_MDSTHRESH);
@@ -1830,6 +1856,21 @@ lookup_again:
goto out_unlock;
}
+ /*
+ * If the layout segment list is empty, but there are outstanding
+ * layoutget calls, then they might be subject to a layoutrecall.
+ */
+ if (list_empty(&lo->plh_segs) &&
+ atomic_read(&lo->plh_outstanding) != 0) {
+ spin_unlock(&ino->i_lock);
+ if (wait_var_event_killable(&lo->plh_outstanding,
+ atomic_read(&lo->plh_outstanding) == 0
+ || !list_empty(&lo->plh_segs)))
+ goto out_put_layout_hdr;
+ pnfs_put_layout_hdr(lo);
+ goto lookup_again;
+ }
+
lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
if (lseg) {
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
@@ -1903,7 +1944,7 @@ lookup_again:
PNFS_UPDATE_LAYOUT_BLOCKED);
goto out_unlock;
}
- atomic_inc(&lo->plh_outstanding);
+ nfs_layoutget_begin(lo);
spin_unlock(&ino->i_lock);
_add_to_server_list(lo, server);
@@ -1920,14 +1961,14 @@ lookup_again:
if (!lgp) {
trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
PNFS_UPDATE_LAYOUT_NOMEM);
- atomic_dec(&lo->plh_outstanding);
+ nfs_layoutget_end(lo);
goto out_put_layout_hdr;
}
lseg = nfs4_proc_layoutget(lgp, &timeout);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
- atomic_dec(&lo->plh_outstanding);
+ nfs_layoutget_end(lo);
if (IS_ERR(lseg)) {
switch(PTR_ERR(lseg)) {
case -EBUSY:
@@ -1935,15 +1976,6 @@ lookup_again:
lseg = NULL;
break;
case -ERECALLCONFLICT:
- /* Huh? We hold no layouts, how is there a recall? */
- if (first) {
- lseg = NULL;
- break;
- }
- /* Destroy the existing layout and start over */
- if (time_after(jiffies, giveup))
- pnfs_destroy_layout(NFS_I(ino));
- /* Fallthrough */
case -EAGAIN:
break;
default:
@@ -2022,7 +2054,7 @@ _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
goto out_unlock;
if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags))
goto out_unlock;
- atomic_inc(&lo->plh_outstanding);
+ nfs_layoutget_begin(lo);
spin_unlock(&ino->i_lock);
_add_to_server_list(lo, NFS_SERVER(ino));
return lo;
@@ -2146,9 +2178,6 @@ void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
} else
lo = NFS_I(lgp->args.inode)->layout;
- if (read_seqcount_retry(&srv->nfs_client->cl_callback_count,
- lgp->callback_count))
- return;
lseg = pnfs_layout_process(lgp);
if (!IS_ERR(lseg)) {
iomode = lgp->args.range.iomode;
@@ -2163,8 +2192,8 @@ void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
struct inode *inode = lgp->args.inode;
if (inode) {
struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
- atomic_dec(&lo->plh_outstanding);
pnfs_clear_first_layoutget(lo);
+ nfs_layoutget_end(lo);
}
pnfs_layoutget_free(lgp);
}
@@ -2238,15 +2267,31 @@ out_forget:
return ERR_PTR(-EAGAIN);
}
+static int
+mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
+ struct list_head *tmp_list)
+{
+ if (!mark_lseg_invalid(lseg, tmp_list))
+ return 0;
+ pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
+ return 1;
+}
+
/**
* pnfs_mark_matching_lsegs_return - Free or return matching layout segments
* @lo: pointer to layout header
* @tmp_list: list header to be used with pnfs_free_lseg_list()
* @return_range: describe layout segment ranges to be returned
+ * @seq: stateid seqid to match
*
* This function is mainly intended for use by layoutrecall. It attempts
* to free the layout segment immediately, or else to mark it for return
* as soon as its reference count drops to zero.
+ *
+ * Returns
+ * - 0: a layoutreturn needs to be scheduled.
+ * - EBUSY: there are layout segment that are still in use.
+ * - ENOENT: there are no layout segments that need to be returned.
*/
int
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
@@ -2259,9 +2304,6 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
dprintk("%s:Begin lo %p\n", __func__, lo);
- if (list_empty(&lo->plh_segs))
- return 0;
-
assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
@@ -2271,16 +2313,23 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
lseg, lseg->pls_range.iomode,
lseg->pls_range.offset,
lseg->pls_range.length);
- if (mark_lseg_invalid(lseg, tmp_list))
+ if (mark_lseg_invalid_or_return(lseg, tmp_list))
continue;
remaining++;
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
}
- if (remaining)
+ if (remaining) {
pnfs_set_plh_return_info(lo, return_range->iomode, seq);
+ return -EBUSY;
+ }
- return remaining;
+ if (!list_empty(&lo->plh_return_segs)) {
+ pnfs_set_plh_return_info(lo, return_range->iomode, seq);
+ return 0;
+ }
+
+ return -ENOENT;
}
void pnfs_error_mark_layout_for_return(struct inode *inode,
@@ -2305,7 +2354,7 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
* segments at hand when sending layoutreturn. See pnfs_put_lseg()
* for how it works.
*/
- if (!pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0)) {
+ if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0) != -EBUSY) {
nfs4_stateid stateid;
enum pnfs_iomode iomode;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 3fe81424337d..ece367ebde69 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -259,7 +259,9 @@ int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
bool is_recall);
int pnfs_destroy_layouts_byclid(struct nfs_client *clp,
bool is_recall);
-bool nfs4_refresh_layout_stateid(nfs4_stateid *dst, struct inode *inode);
+bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+ struct pnfs_layout_range *dst_range,
+ struct inode *inode);
void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo);
void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
const nfs4_stateid *new,
@@ -780,7 +782,8 @@ static inline void nfs4_pnfs_v3_ds_connect_unload(void)
{
}
-static inline bool nfs4_refresh_layout_stateid(nfs4_stateid *dst,
+static inline bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
+ struct pnfs_layout_range *dst_range,
struct inode *inode)
{
return false;
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 32ba2d471853..d5e4d3cd8c7f 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
/* The generic layer is about to remove the req from the commit list.
* If this will make the bucket empty, it will need to put the lseg reference.
- * Note this must be called holding i_lock
+ * Note this must be called holding nfsi->commit_mutex
*/
void
pnfs_generic_clear_request_commit(struct nfs_page *req,
@@ -149,9 +149,7 @@ restart:
if (list_empty(&b->written)) {
freeme = b->wlseg;
b->wlseg = NULL;
- spin_unlock(&cinfo->inode->i_lock);
pnfs_put_lseg(freeme);
- spin_lock(&cinfo->inode->i_lock);
goto restart;
}
}
@@ -167,7 +165,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
LIST_HEAD(pages);
int i;
- spin_lock(&cinfo->inode->i_lock);
+ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
for (i = idx; i < fl_cinfo->nbuckets; i++) {
bucket = &fl_cinfo->buckets[i];
if (list_empty(&bucket->committing))
@@ -177,12 +175,12 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
list_for_each(pos, &bucket->committing)
cinfo->ds->ncommitting--;
list_splice_init(&bucket->committing, &pages);
- spin_unlock(&cinfo->inode->i_lock);
+ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_retry_commit(&pages, freeme, cinfo, i);
pnfs_put_lseg(freeme);
- spin_lock(&cinfo->inode->i_lock);
+ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
}
- spin_unlock(&cinfo->inode->i_lock);
+ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
}
static unsigned int
@@ -222,13 +220,13 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
struct list_head *pos;
bucket = &cinfo->ds->buckets[data->ds_commit_index];
- spin_lock(&cinfo->inode->i_lock);
+ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
list_for_each(pos, &bucket->committing)
cinfo->ds->ncommitting--;
list_splice_init(&bucket->committing, pages);
data->lseg = bucket->clseg;
bucket->clseg = NULL;
- spin_unlock(&cinfo->inode->i_lock);
+ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 5e470e233c83..ac4b2f005778 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -884,7 +884,7 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
#endif
seq_printf(m, "\n");
- rpc_print_iostats(m, nfss->client);
+ rpc_clnt_show_stats(m, nfss->client);
return 0;
}
@@ -2899,7 +2899,7 @@ static int param_set_portnr(const char *val, const struct kernel_param *kp)
if (!val)
return -EINVAL;
ret = kstrtoul(val, 0, &num);
- if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
+ if (ret || num > NFS_CALLBACK_MAXPORTNR)
return -EINVAL;
*((unsigned int *)kp->arg) = num;
return 0;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index a057b4f45a46..586726a590d8 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1406,6 +1406,8 @@ static void nfs_async_write_error(struct list_head *head)
static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
{
nfs_async_write_error(&hdr->pages);
+ filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
+ hdr->args.offset + hdr->args.count - 1);
}
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 36358d435cb0..426f55005697 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -102,6 +102,7 @@ struct nfsd_net {
time_t nfsd4_lease;
time_t nfsd4_grace;
+ bool somebody_reclaimed;
bool nfsd_net_up;
bool lockd_up;
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 6259a4b8579f..9eb8086ea841 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -202,7 +202,8 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
fh_copy(&resp->fh, &argp->fh);
resp->committed = argp->stable;
- nvecs = svc_fill_write_vector(rqstp, &argp->first, cnt);
+ nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages,
+ &argp->first, cnt);
if (!nvecs)
RETURN_STATUS(nfserr_io);
nfserr = nfsd_write(rqstp, &resp->fh, argp->offset,
@@ -289,6 +290,7 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
RETURN_STATUS(nfserr_nametoolong);
argp->tname = svc_fill_symlink_pathname(rqstp, &argp->first,
+ page_address(rqstp->rq_arg.pages[0]),
argp->tlen);
if (IS_ERR(argp->tname))
RETURN_STATUS(nfserrno(PTR_ERR(argp->tname)));
@@ -302,6 +304,7 @@ nfsd3_proc_symlink(struct svc_rqst *rqstp)
fh_init(&resp->fh, NFS3_FHSIZE);
nfserr = nfsd_symlink(rqstp, &resp->dirfh, argp->fname, argp->flen,
argp->tname, &resp->fh);
+ kfree(argp->tname);
RETURN_STATUS(nfserr);
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 1f04d2a70d25..601bf33c26a0 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -746,30 +746,17 @@ static int max_cb_time(struct net *net)
return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
}
-static struct rpc_cred *callback_cred;
-
-int set_callback_cred(void)
-{
- if (callback_cred)
- return 0;
- callback_cred = rpc_lookup_machine_cred("nfs");
- if (!callback_cred)
- return -ENOMEM;
- return 0;
-}
-
-void cleanup_callback_cred(void)
-{
- if (callback_cred) {
- put_rpccred(callback_cred);
- callback_cred = NULL;
- }
-}
-
static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
{
if (clp->cl_minorversion == 0) {
- return get_rpccred(callback_cred);
+ char *principal = clp->cl_cred.cr_targ_princ ?
+ clp->cl_cred.cr_targ_princ : "nfs";
+ struct rpc_cred *cred;
+
+ cred = rpc_lookup_machine_cred(principal);
+ if (!IS_ERR(cred))
+ get_rpccred(cred);
+ return cred;
} else {
struct rpc_auth *auth = client->cl_auth;
struct auth_cred acred = {};
@@ -980,6 +967,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
break;
case -ESERVERFAULT:
++session->se_cb_seq_nr;
+ /* Fall through */
case 1:
case -NFS4ERR_BADSESSION:
nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 228faf00a594..2b36aa037ce0 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -133,27 +133,20 @@ void nfsd4_setup_layout_type(struct svc_export *exp)
if (!(exp->ex_flags & NFSEXP_PNFS))
return;
- /*
- * If flex file is configured, use it by default. Otherwise
- * check if the file system supports exporting a block-like layout.
- * If the block device supports reservations prefer the SCSI layout,
- * otherwise advertise the block layout.
- */
#ifdef CONFIG_NFSD_FLEXFILELAYOUT
exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
#endif
#ifdef CONFIG_NFSD_BLOCKLAYOUT
- /* overwrite flex file layout selection if needed */
if (sb->s_export_op->get_uuid &&
sb->s_export_op->map_blocks &&
sb->s_export_op->commit_blocks)
exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
#endif
#ifdef CONFIG_NFSD_SCSILAYOUT
- /* overwrite block layout selection if needed */
if (sb->s_export_op->map_blocks &&
sb->s_export_op->commit_blocks &&
- sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops)
+ sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops &&
+ blk_queue_scsi_passthrough(sb->s_bdev->bd_disk->queue))
exp->ex_layout_types |= 1 << LAYOUT_SCSI;
#endif
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 5d99e8810b85..b7bc6e1a85ac 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -354,6 +354,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct svc_fh *resfh = NULL;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ bool reclaim = false;
dprintk("NFSD: nfsd4_open filename %.*s op_openowner %p\n",
(int)open->op_fname.len, open->op_fname.data,
@@ -424,6 +425,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
+ reclaim = true;
case NFS4_OPEN_CLAIM_FH:
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
status = do_open_fhandle(rqstp, cstate, open);
@@ -452,6 +454,8 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
WARN(status && open->op_created,
"nfsd4_process_open2 failed to open newly-created file! status=%u\n",
be32_to_cpu(status));
+ if (reclaim && !status)
+ nn->somebody_reclaimed = true;
out:
if (resfh && resfh != &cstate->current_fh) {
fh_dup2(&cstate->current_fh, resfh);
@@ -982,24 +986,6 @@ out:
return status;
}
-static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write)
-{
- int i = 1;
- int buflen = write->wr_buflen;
-
- vec[0].iov_base = write->wr_head.iov_base;
- vec[0].iov_len = min_t(int, buflen, write->wr_head.iov_len);
- buflen -= vec[0].iov_len;
-
- while (buflen) {
- vec[i].iov_base = page_address(write->wr_pagelist[i - 1]);
- vec[i].iov_len = min_t(int, PAGE_SIZE, buflen);
- buflen -= vec[i].iov_len;
- i++;
- }
- return i;
-}
-
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -1027,7 +1013,10 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
write->wr_how_written = write->wr_stable_how;
gen_boot_verifier(&write->wr_verifier, SVC_NET(rqstp));
- nvecs = fill_in_write_vector(rqstp->rq_vec, write);
+ nvecs = svc_fill_write_vector(rqstp, write->wr_pagelist,
+ &write->wr_head, write->wr_buflen);
+ if (!nvecs)
+ return nfserr_io;
WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec));
status = nfsd_vfs_write(rqstp, &cstate->current_fh, filp,
@@ -1599,7 +1588,7 @@ static const char *nfsd4_op_name(unsigned opnum);
*/
static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
{
- struct nfsd4_op *op = &args->ops[0];
+ struct nfsd4_op *first_op = &args->ops[0];
/* These ordering requirements don't apply to NFSv4.0: */
if (args->minorversion == 0)
@@ -1607,12 +1596,17 @@ static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args)
/* This is weird, but OK, not our problem: */
if (args->opcnt == 0)
return nfs_ok;
- if (op->status == nfserr_op_illegal)
+ if (first_op->status == nfserr_op_illegal)
return nfs_ok;
- if (!(nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP))
+ if (!(nfsd4_ops[first_op->opnum].op_flags & ALLOWED_AS_FIRST_OP))
return nfserr_op_not_in_session;
- if (op->opnum == OP_SEQUENCE)
+ if (first_op->opnum == OP_SEQUENCE)
return nfs_ok;
+ /*
+ * So first_op is something allowed outside a session, like
+ * EXCHANGE_ID; but then it has to be the only op in the
+ * compound:
+ */
if (args->opcnt != 1)
return nfserr_not_only_op;
return nfs_ok;
@@ -1726,6 +1720,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
if (status) {
op = &args->ops[0];
op->status = status;
+ resp->opcnt = 1;
goto encode_op;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 857141446d6b..b0ca0efd2875 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1979,8 +1979,10 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source)
target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
target->cr_raw_principal = kstrdup(source->cr_raw_principal,
GFP_KERNEL);
- if ((source->cr_principal && ! target->cr_principal) ||
- (source->cr_raw_principal && ! target->cr_raw_principal))
+ target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
+ if ((source->cr_principal && !target->cr_principal) ||
+ (source->cr_raw_principal && !target->cr_raw_principal) ||
+ (source->cr_targ_princ && !target->cr_targ_princ))
return -ENOMEM;
target->cr_flavor = source->cr_flavor;
@@ -2057,6 +2059,7 @@ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
return false;
+ /* XXX: check that cr_targ_princ fields match ? */
if (cr1->cr_principal == cr2->cr_principal)
return true;
if (!cr1->cr_principal || !cr2->cr_principal)
@@ -2956,18 +2959,18 @@ out_no_session:
return status;
}
-static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
+static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
{
- if (!session)
+ if (!cstate->session)
return false;
- return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
+ return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
}
__be32
nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
- struct nfsd4_destroy_session *sessionid = &u->destroy_session;
+ struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
struct nfsd4_session *ses;
__be32 status;
int ref_held_by_me = 0;
@@ -2975,14 +2978,14 @@ nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
status = nfserr_not_only_op;
- if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
+ if (nfsd4_compound_in_session(cstate, sessionid)) {
if (!nfsd4_last_compound_op(r))
goto out;
ref_held_by_me++;
}
- dump_sessionid(__func__, &sessionid->sessionid);
+ dump_sessionid(__func__, sessionid);
spin_lock(&nn->client_lock);
- ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
+ ses = find_in_sessionid_hashtbl(sessionid, net, &status);
if (!ses)
goto out_client_lock;
status = nfserr_wrong_cred;
@@ -3945,9 +3948,9 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
/*
* We're assuming the state code never drops its reference
* without first removing the lease. Since we're in this lease
- * callback (and since the lease code is serialized by the kernel
- * lock) we know the server hasn't removed the lease yet, we know
- * it's safe to take a reference.
+ * callback (and since the lease code is serialized by the
+ * i_lock) we know the server hasn't removed the lease yet, and
+ * we know it's safe to take a reference.
*/
refcount_inc(&dp->dl_stid.sc_count);
nfsd4_run_cb(&dp->dl_recall);
@@ -4693,6 +4696,28 @@ nfsd4_end_grace(struct nfsd_net *nn)
*/
}
+/*
+ * If we've waited a lease period but there are still clients trying to
+ * reclaim, wait a little longer to give them a chance to finish.
+ */
+static bool clients_still_reclaiming(struct nfsd_net *nn)
+{
+ unsigned long now = get_seconds();
+ unsigned long double_grace_period_end = nn->boot_time +
+ 2 * nn->nfsd4_lease;
+
+ if (!nn->somebody_reclaimed)
+ return false;
+ nn->somebody_reclaimed = false;
+ /*
+ * If we've given them *two* lease times to reclaim, and they're
+ * still not done, give up:
+ */
+ if (time_after(now, double_grace_period_end))
+ return false;
+ return true;
+}
+
static time_t
nfs4_laundromat(struct nfsd_net *nn)
{
@@ -4706,6 +4731,11 @@ nfs4_laundromat(struct nfsd_net *nn)
time_t t, new_timeo = nn->nfsd4_lease;
dprintk("NFSD: laundromat service - starting\n");
+
+ if (clients_still_reclaiming(nn)) {
+ new_timeo = 0;
+ goto out;
+ }
nfsd4_end_grace(nn);
INIT_LIST_HEAD(&reaplist);
spin_lock(&nn->client_lock);
@@ -4803,7 +4833,7 @@ nfs4_laundromat(struct nfsd_net *nn)
posix_unblock_lock(&nbl->nbl_lock);
free_blocked_lock(nbl);
}
-
+out:
new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
return new_timeo;
}
@@ -6053,6 +6083,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
case 0: /* success! */
nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
status = 0;
+ if (lock->lk_reclaim)
+ nn->somebody_reclaimed = true;
break;
case FILE_LOCK_DEFERRED:
nbl = NULL;
@@ -6293,7 +6325,7 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
return status;
}
- inode = file_inode(filp);
+ inode = locks_inode(filp);
flctx = inode->i_flctx;
if (flctx && !list_empty_careful(&flctx->flc_posix)) {
@@ -7199,14 +7231,10 @@ nfs4_state_start(void)
{
int ret;
- ret = set_callback_cred();
- if (ret)
- return ret;
-
laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
if (laundry_wq == NULL) {
ret = -ENOMEM;
- goto out_cleanup_cred;
+ goto out;
}
ret = nfsd4_create_callback_queue();
if (ret)
@@ -7217,8 +7245,7 @@ nfs4_state_start(void)
out_free_laundry:
destroy_workqueue(laundry_wq);
-out_cleanup_cred:
- cleanup_callback_cred();
+out:
return ret;
}
@@ -7255,7 +7282,6 @@ nfs4_state_shutdown(void)
{
destroy_workqueue(laundry_wq);
nfsd4_destroy_callback_queue();
- cleanup_callback_cred();
}
static void
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index a96843c59fc1..418fa9c78186 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1390,10 +1390,8 @@ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
p += XDR_QUADLEN(dummy);
}
- /* ssp_window and ssp_num_gss_handles */
+ /* ignore ssp_window and ssp_num_gss_handles: */
READ_BUF(8);
- dummy = be32_to_cpup(p++);
- dummy = be32_to_cpup(p++);
break;
default:
goto xdr_error;
@@ -2006,6 +2004,31 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
return p;
}
+/*
+ * ctime (in NFSv4, time_metadata) is not writeable, and the client
+ * doesn't really care what resolution could theoretically be stored by
+ * the filesystem.
+ *
+ * The client cares how close together changes can be while still
+ * guaranteeing ctime changes. For most filesystems (which have
+ * timestamps with nanosecond fields) that is limited by the resolution
+ * of the time returned from current_time() (which I'm assuming to be
+ * 1/HZ).
+ */
+static __be32 *encode_time_delta(__be32 *p, struct inode *inode)
+{
+ struct timespec ts;
+ u32 ns;
+
+ ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
+ ts = ns_to_timespec(ns);
+
+ p = xdr_encode_hyper(p, ts.tv_sec);
+ *p++ = cpu_to_be32(ts.tv_nsec);
+
+ return p;
+}
+
static __be32 *encode_cinfo(__be32 *p, struct nfsd4_change_info *c)
{
*p++ = cpu_to_be32(c->atomic);
@@ -2797,9 +2820,7 @@ out_acl:
p = xdr_reserve_space(xdr, 12);
if (!p)
goto out_resource;
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(1);
- *p++ = cpu_to_be32(0);
+ p = encode_time_delta(p, d_inode(dentry));
}
if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
p = xdr_reserve_space(xdr, 12);
@@ -2868,6 +2889,16 @@ out_acl:
goto out;
}
+ if (bmval2 & FATTR4_WORD2_CHANGE_ATTR_TYPE) {
+ p = xdr_reserve_space(xdr, 4);
+ if (!p)
+ goto out_resource;
+ if (IS_I_VERSION(d_inode(dentry)))
+ *p++ = cpu_to_be32(NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR);
+ else
+ *p++ = cpu_to_be32(NFS4_CHANGE_TYPE_IS_TIME_METADATA);
+ }
+
if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
status = nfsd4_encode_security_label(xdr, rqstp, context,
contextlen);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index d107b4426f7e..7fb9f7c667b1 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -73,7 +73,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size);
#endif
-static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+static ssize_t (*const write_op[])(struct file *, char *, size_t) = {
[NFSD_Fh] = write_filehandle,
[NFSD_FO_UnlockIP] = write_unlock_ip,
[NFSD_FO_UnlockFS] = write_unlock_fs,
@@ -1237,8 +1237,9 @@ static __net_init int nfsd_init_net(struct net *net)
retval = nfsd_idmap_init(net);
if (retval)
goto out_idmap_error;
- nn->nfsd4_lease = 90; /* default lease time */
- nn->nfsd4_grace = 90;
+ nn->nfsd4_lease = 45; /* default lease time */
+ nn->nfsd4_grace = 45;
+ nn->somebody_reclaimed = false;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 3fce905d0365..066899929863 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -360,6 +360,7 @@ void nfsd_lockd_shutdown(void);
#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
+ FATTR4_WORD2_CHANGE_ATTR_TYPE | \
FATTR4_WORD2_MODE_UMASK | \
NFSD4_2_SECURITY_ATTRS)
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index a008e7634181..b319080288c3 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -451,7 +451,7 @@ static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp)
switch (fsid_type) {
case FSID_DEV:
if (!old_valid_dev(exp_sb(exp)->s_dev))
- return 0;
+ return false;
/* FALL THROUGH */
case FSID_MAJOR_MINOR:
case FSID_ENCODE_DEV:
@@ -461,13 +461,13 @@ static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp)
case FSID_UUID8:
case FSID_UUID16:
if (!is_root_export(exp))
- return 0;
+ return false;
/* fall through */
case FSID_UUID4_INUM:
case FSID_UUID16_INUM:
return exp->ex_uuid != NULL;
}
- return 1;
+ return true;
}
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index f107f9fa8e15..0d20fd161225 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -218,7 +218,8 @@ nfsd_proc_write(struct svc_rqst *rqstp)
SVCFH_fmt(&argp->fh),
argp->len, argp->offset);
- nvecs = svc_fill_write_vector(rqstp, &argp->first, cnt);
+ nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages,
+ &argp->first, cnt);
if (!nvecs)
return nfserr_io;
nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
@@ -453,6 +454,7 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
return nfserr_nametoolong;
argp->tname = svc_fill_symlink_pathname(rqstp, &argp->first,
+ page_address(rqstp->rq_arg.pages[0]),
argp->tlen);
if (IS_ERR(argp->tname))
return nfserrno(PTR_ERR(argp->tname));
@@ -465,6 +467,7 @@ nfsd_proc_symlink(struct svc_rqst *rqstp)
nfserr = nfsd_symlink(rqstp, &argp->ffh, argp->fname, argp->flen,
argp->tname, &newfh);
+ kfree(argp->tname);
fh_put(&argp->ffh);
fh_put(&newfh);
return nfserr;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index f3772ea8ba0d..0b15dac7e609 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -617,8 +617,6 @@ extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir,
struct nfsd_net *nn);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
-extern int set_callback_cred(void);
-extern void cleanup_callback_cred(void);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index b0555d7d8200..55a099e47ba2 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -763,7 +763,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
goto out_nfserr;
}
- host_err = ima_file_check(file, may_flags, 0);
+ host_err = ima_file_check(file, may_flags);
if (host_err) {
fput(file);
goto out_nfserr;
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index c5fa3dee72fc..7da0fac71dc2 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -51,7 +51,7 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
return err;
}
-static int nilfs_page_mkwrite(struct vm_fault *vmf)
+static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = vmf->page;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 6ffeca84d7c3..1b9067cf4511 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -834,7 +834,7 @@ static int nilfs_setup_super(struct super_block *sb, int is_mount)
sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT);
sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1);
- sbp[0]->s_mtime = cpu_to_le64(get_seconds());
+ sbp[0]->s_mtime = cpu_to_le64(ktime_get_real_seconds());
skip_mount_setup:
sbp[0]->s_state =
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index e2bea2ac5dfb..58d77dc696eb 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/dnotify.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -353,7 +354,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
goto out;
}
- __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
+ __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
error = attach_dn(dn, dn_mark, id, fd, filp, mask);
/* !error means that we attached the dn to the dn_mark, so don't free it */
@@ -384,8 +385,9 @@ out_err:
static int __init dnotify_init(void)
{
- dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
- dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC);
+ dnotify_struct_cache = KMEM_CACHE(dnotify_struct,
+ SLAB_PANIC|SLAB_ACCOUNT);
+ dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC|SLAB_ACCOUNT);
dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index f90842efea13..94b52157bf8d 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -8,9 +8,11 @@
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
+#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/audit.h>
+#include <linux/sched/mm.h>
#include "fanotify.h"
@@ -140,8 +142,8 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
struct inode *inode, u32 mask,
const struct path *path)
{
- struct fanotify_event_info *event;
- gfp_t gfp = GFP_KERNEL;
+ struct fanotify_event_info *event = NULL;
+ gfp_t gfp = GFP_KERNEL_ACCOUNT;
/*
* For queues with unlimited length lost events are not expected and
@@ -151,19 +153,22 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
if (group->max_events == UINT_MAX)
gfp |= __GFP_NOFAIL;
+ /* Whoever is interested in the event, pays for the allocation. */
+ memalloc_use_memcg(group->memcg);
+
if (fanotify_is_perm_event(mask)) {
struct fanotify_perm_event_info *pevent;
pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
if (!pevent)
- return NULL;
+ goto out;
event = &pevent->fae;
pevent->response = 0;
goto init;
}
event = kmem_cache_alloc(fanotify_event_cachep, gfp);
if (!event)
- return NULL;
+ goto out;
init: __maybe_unused
fsnotify_init_event(&event->fse, inode, mask);
event->tgid = get_pid(task_tgid(current));
@@ -174,6 +179,8 @@ init: __maybe_unused
event->path.mnt = NULL;
event->path.dentry = NULL;
}
+out:
+ memalloc_unuse_memcg();
return event;
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index ec4d8c59d0e3..69054886915b 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/sched/signal.h>
+#include <linux/memcontrol.h>
#include <asm/ioctls.h>
@@ -524,17 +525,16 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
return mask & oldmask;
}
-static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
- struct vfsmount *mnt, __u32 mask,
- unsigned int flags)
+static int fanotify_remove_mark(struct fsnotify_group *group,
+ fsnotify_connp_t *connp, __u32 mask,
+ unsigned int flags)
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
int destroy_mark;
mutex_lock(&group->mark_mutex);
- fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
- group);
+ fsn_mark = fsnotify_find_mark(connp, group);
if (!fsn_mark) {
mutex_unlock(&group->mark_mutex);
return -ENOENT;
@@ -542,47 +542,33 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
&destroy_mark);
- if (removed & real_mount(mnt)->mnt_fsnotify_mask)
- fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
+ if (removed & fsnotify_conn_mask(fsn_mark->connector))
+ fsnotify_recalc_mask(fsn_mark->connector);
if (destroy_mark)
fsnotify_detach_mark(fsn_mark);
mutex_unlock(&group->mark_mutex);
if (destroy_mark)
fsnotify_free_mark(fsn_mark);
+ /* matches the fsnotify_find_mark() */
fsnotify_put_mark(fsn_mark);
return 0;
}
+static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt, __u32 mask,
+ unsigned int flags)
+{
+ return fanotify_remove_mark(group, &real_mount(mnt)->mnt_fsnotify_marks,
+ mask, flags);
+}
+
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
unsigned int flags)
{
- struct fsnotify_mark *fsn_mark = NULL;
- __u32 removed;
- int destroy_mark;
-
- mutex_lock(&group->mark_mutex);
- fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
- if (!fsn_mark) {
- mutex_unlock(&group->mark_mutex);
- return -ENOENT;
- }
-
- removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
- &destroy_mark);
- if (removed & inode->i_fsnotify_mask)
- fsnotify_recalc_mask(inode->i_fsnotify_marks);
- if (destroy_mark)
- fsnotify_detach_mark(fsn_mark);
- mutex_unlock(&group->mark_mutex);
- if (destroy_mark)
- fsnotify_free_mark(fsn_mark);
-
- /* matches the fsnotify_find_mark() */
- fsnotify_put_mark(fsn_mark);
-
- return 0;
+ return fanotify_remove_mark(group, &inode->i_fsnotify_marks, mask,
+ flags);
}
static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
@@ -615,8 +601,8 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
}
static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
- struct inode *inode,
- struct vfsmount *mnt)
+ fsnotify_connp_t *connp,
+ unsigned int type)
{
struct fsnotify_mark *mark;
int ret;
@@ -629,7 +615,7 @@ static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
return ERR_PTR(-ENOMEM);
fsnotify_init_mark(mark, group);
- ret = fsnotify_add_mark_locked(mark, inode, mnt, 0);
+ ret = fsnotify_add_mark_locked(mark, connp, type, 0);
if (ret) {
fsnotify_put_mark(mark);
return ERR_PTR(ret);
@@ -639,39 +625,43 @@ static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
}
-static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
- struct vfsmount *mnt, __u32 mask,
- unsigned int flags)
+static int fanotify_add_mark(struct fsnotify_group *group,
+ fsnotify_connp_t *connp, unsigned int type,
+ __u32 mask, unsigned int flags)
{
struct fsnotify_mark *fsn_mark;
__u32 added;
mutex_lock(&group->mark_mutex);
- fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
- group);
+ fsn_mark = fsnotify_find_mark(connp, group);
if (!fsn_mark) {
- fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
+ fsn_mark = fanotify_add_new_mark(group, connp, type);
if (IS_ERR(fsn_mark)) {
mutex_unlock(&group->mark_mutex);
return PTR_ERR(fsn_mark);
}
}
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
- if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
- fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
+ if (added & ~fsnotify_conn_mask(fsn_mark->connector))
+ fsnotify_recalc_mask(fsn_mark->connector);
mutex_unlock(&group->mark_mutex);
fsnotify_put_mark(fsn_mark);
return 0;
}
+static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt, __u32 mask,
+ unsigned int flags)
+{
+ return fanotify_add_mark(group, &real_mount(mnt)->mnt_fsnotify_marks,
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags);
+}
+
static int fanotify_add_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
unsigned int flags)
{
- struct fsnotify_mark *fsn_mark;
- __u32 added;
-
pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
/*
@@ -684,22 +674,8 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
(atomic_read(&inode->i_writecount) > 0))
return 0;
- mutex_lock(&group->mark_mutex);
- fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
- if (!fsn_mark) {
- fsn_mark = fanotify_add_new_mark(group, inode, NULL);
- if (IS_ERR(fsn_mark)) {
- mutex_unlock(&group->mark_mutex);
- return PTR_ERR(fsn_mark);
- }
- }
- added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
- if (added & ~inode->i_fsnotify_mask)
- fsnotify_recalc_mask(inode->i_fsnotify_marks);
- mutex_unlock(&group->mark_mutex);
-
- fsnotify_put_mark(fsn_mark);
- return 0;
+ return fanotify_add_mark(group, &inode->i_fsnotify_marks,
+ FSNOTIFY_OBJ_TYPE_INODE, mask, flags);
}
/* fanotify syscalls */
@@ -756,6 +732,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.user = user;
atomic_inc(&user->fanotify_listeners);
+ group->memcg = get_mem_cgroup_from_mm(current->mm);
oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL);
if (unlikely(!oevent)) {
@@ -957,7 +934,8 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
*/
static int __init fanotify_user_setup(void)
{
- fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
+ fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
+ SLAB_PANIC|SLAB_ACCOUNT);
fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
fanotify_perm_event_cachep =
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 10aac1942c9f..86fcf5814279 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -15,7 +15,7 @@
#include <linux/exportfs.h>
#include "inotify/inotify.h"
-#include "../fs/mount.h"
+#include "fsnotify.h"
#if defined(CONFIG_PROC_FS)
@@ -81,7 +81,7 @@ static void inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
return;
inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark);
- inode = igrab(mark->connector->inode);
+ inode = igrab(fsnotify_conn_inode(mark->connector));
if (inode) {
/*
* IN_ALL_EVENTS represents all of the mask bits
@@ -117,7 +117,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
mflags |= FAN_MARK_IGNORED_SURV_MODIFY;
if (mark->connector->type == FSNOTIFY_OBJ_TYPE_INODE) {
- inode = igrab(mark->connector->inode);
+ inode = igrab(fsnotify_conn_inode(mark->connector));
if (!inode)
return;
seq_printf(m, "fanotify ino:%lx sdev:%x mflags:%x mask:%x ignored_mask:%x ",
@@ -127,7 +127,7 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
seq_putc(m, '\n');
iput(inode);
} else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
- struct mount *mnt = real_mount(mark->connector->mnt);
+ struct mount *mnt = fsnotify_conn_mount(mark->connector);
seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x ignored_mask:%x\n",
mnt->mnt_id, mflags, mark->mask, mark->ignored_mask);
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 34515d2c4ba3..7902653dd577 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -9,6 +9,18 @@
#include "../mount.h"
+static inline struct inode *fsnotify_conn_inode(
+ struct fsnotify_mark_connector *conn)
+{
+ return container_of(conn->obj, struct inode, i_fsnotify_marks);
+}
+
+static inline struct mount *fsnotify_conn_mount(
+ struct fsnotify_mark_connector *conn)
+{
+ return container_of(conn->obj, struct mount, mnt_fsnotify_marks);
+}
+
/* destroy all events sitting in this groups notification queue */
extern void fsnotify_flush_notify(struct fsnotify_group *group);
@@ -19,8 +31,8 @@ extern struct srcu_struct fsnotify_mark_srcu;
extern int fsnotify_compare_groups(struct fsnotify_group *a,
struct fsnotify_group *b);
-/* Destroy all marks connected via given connector */
-extern void fsnotify_destroy_marks(struct fsnotify_mark_connector __rcu **connp);
+/* Destroy all marks attached to an object via connector */
+extern void fsnotify_destroy_marks(fsnotify_connp_t *connp);
/* run the list of all marks associated with inode and destroy them */
static inline void fsnotify_clear_marks_by_inode(struct inode *inode)
{
diff --git a/fs/notify/group.c b/fs/notify/group.c
index aa5468f23e45..c03b83662876 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -22,6 +22,7 @@
#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/wait.h>
+#include <linux/memcontrol.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
@@ -36,6 +37,8 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
if (group->ops->free_group_priv)
group->ops->free_group_priv(group);
+ mem_cgroup_put(group->memcg);
+
kfree(group);
}
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 9ab6dde38a14..f4184b4f3815 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
+#include <linux/sched/mm.h>
#include "inotify.h"
@@ -98,7 +99,11 @@ int inotify_handle_event(struct fsnotify_group *group,
i_mark = container_of(inode_mark, struct inotify_inode_mark,
fsn_mark);
- event = kmalloc(alloc_len, GFP_KERNEL);
+ /* Whoever is interested in the event, pays for the allocation. */
+ memalloc_use_memcg(group->memcg);
+ event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT);
+ memalloc_unuse_memcg();
+
if (unlikely(!event)) {
/*
* Treat lost event due to ENOMEM the same way as queue
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 1cf5b779d862..ac6978d3208c 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -38,6 +38,7 @@
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
+#include <linux/memcontrol.h>
#include "inotify.h"
#include "../fdinfo.h"
@@ -510,6 +511,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
+ int create = (arg & IN_MASK_CREATE);
int ret;
mask = inotify_arg_to_mask(arg);
@@ -517,6 +519,8 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
if (!fsn_mark)
return -ENOENT;
+ else if (create)
+ return -EEXIST;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
@@ -636,6 +640,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
oevent->name_len = 0;
group->max_events = max_events;
+ group->memcg = get_mem_cgroup_from_mm(current->mm);
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
@@ -718,6 +723,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
if (unlikely(!f.file))
return -EBADF;
+ /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
+ if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE)))
+ return -EINVAL;
+
/* verify that this is indeed an inotify instance */
if (unlikely(f.file->f_op != &inotify_fops)) {
ret = -EINVAL;
@@ -806,9 +815,10 @@ static int __init inotify_user_setup(void)
BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
- BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
+ BUG_ON(hweight32(ALL_INOTIFY_BITS) != 22);
- inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
+ inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark,
+ SLAB_PANIC|SLAB_ACCOUNT);
inotify_max_queued_events = 16384;
init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 61f4c5fa34c7..05506d60131c 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -109,6 +109,23 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
refcount_inc(&mark->refcnt);
}
+static __u32 *fsnotify_conn_mask_p(struct fsnotify_mark_connector *conn)
+{
+ if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
+ return &fsnotify_conn_inode(conn)->i_fsnotify_mask;
+ else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT)
+ return &fsnotify_conn_mount(conn)->mnt_fsnotify_mask;
+ return NULL;
+}
+
+__u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn)
+{
+ if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
+ return 0;
+
+ return *fsnotify_conn_mask_p(conn);
+}
+
static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
{
u32 new_mask = 0;
@@ -119,15 +136,15 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
new_mask |= mark->mask;
}
- if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
- conn->inode->i_fsnotify_mask = new_mask;
- else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT)
- real_mount(conn->mnt)->mnt_fsnotify_mask = new_mask;
+ if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
+ return;
+
+ *fsnotify_conn_mask_p(conn) = new_mask;
}
/*
* Calculate mask of events for a list of marks. The caller must make sure
- * connector and connector->inode cannot disappear under us. Callers achieve
+ * connector and connector->obj cannot disappear under us. Callers achieve
* this by holding a mark->lock or mark->group->mark_mutex for a mark on this
* list.
*/
@@ -140,7 +157,8 @@ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
__fsnotify_recalc_mask(conn);
spin_unlock(&conn->lock);
if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
- __fsnotify_update_child_dentry_flags(conn->inode);
+ __fsnotify_update_child_dentry_flags(
+ fsnotify_conn_inode(conn));
}
/* Free all connectors queued for freeing once SRCU period ends */
@@ -166,20 +184,20 @@ static struct inode *fsnotify_detach_connector_from_object(
{
struct inode *inode = NULL;
+ if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED)
+ return NULL;
+
if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
- inode = conn->inode;
- rcu_assign_pointer(inode->i_fsnotify_marks, NULL);
+ inode = fsnotify_conn_inode(conn);
inode->i_fsnotify_mask = 0;
- conn->inode = NULL;
- conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
} else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
- rcu_assign_pointer(real_mount(conn->mnt)->mnt_fsnotify_marks,
- NULL);
- real_mount(conn->mnt)->mnt_fsnotify_mask = 0;
- conn->mnt = NULL;
- conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
+ fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
}
+ rcu_assign_pointer(*(conn->obj), NULL);
+ conn->obj = NULL;
+ conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
+
return inode;
}
@@ -436,11 +454,10 @@ int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
return -1;
}
-static int fsnotify_attach_connector_to_object(
- struct fsnotify_mark_connector __rcu **connp,
- struct inode *inode,
- struct vfsmount *mnt)
+static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
+ unsigned int type)
{
+ struct inode *inode = NULL;
struct fsnotify_mark_connector *conn;
conn = kmem_cache_alloc(fsnotify_mark_connector_cachep, GFP_KERNEL);
@@ -448,13 +465,10 @@ static int fsnotify_attach_connector_to_object(
return -ENOMEM;
spin_lock_init(&conn->lock);
INIT_HLIST_HEAD(&conn->list);
- if (inode) {
- conn->type = FSNOTIFY_OBJ_TYPE_INODE;
- conn->inode = igrab(inode);
- } else {
- conn->type = FSNOTIFY_OBJ_TYPE_VFSMOUNT;
- conn->mnt = mnt;
- }
+ conn->type = type;
+ conn->obj = connp;
+ if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
+ inode = igrab(fsnotify_conn_inode(conn));
/*
* cmpxchg() provides the barrier so that readers of *connp can see
* only initialized structure
@@ -476,7 +490,7 @@ static int fsnotify_attach_connector_to_object(
* they are sure list cannot go away under them.
*/
static struct fsnotify_mark_connector *fsnotify_grab_connector(
- struct fsnotify_mark_connector __rcu **connp)
+ fsnotify_connp_t *connp)
{
struct fsnotify_mark_connector *conn;
int idx;
@@ -503,27 +517,22 @@ out:
* priority, highest number first, and then by the group's location in memory.
*/
static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
- struct inode *inode, struct vfsmount *mnt,
+ fsnotify_connp_t *connp, unsigned int type,
int allow_dups)
{
struct fsnotify_mark *lmark, *last = NULL;
struct fsnotify_mark_connector *conn;
- struct fsnotify_mark_connector __rcu **connp;
int cmp;
int err = 0;
- if (WARN_ON(!inode && !mnt))
+ if (WARN_ON(!fsnotify_valid_obj_type(type)))
return -EINVAL;
- if (inode)
- connp = &inode->i_fsnotify_marks;
- else
- connp = &real_mount(mnt)->mnt_fsnotify_marks;
restart:
spin_lock(&mark->lock);
conn = fsnotify_grab_connector(connp);
if (!conn) {
spin_unlock(&mark->lock);
- err = fsnotify_attach_connector_to_object(connp, inode, mnt);
+ err = fsnotify_attach_connector_to_object(connp, type);
if (err)
return err;
goto restart;
@@ -569,14 +578,13 @@ out_err:
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group.
*/
-int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct inode *inode,
- struct vfsmount *mnt, int allow_dups)
+int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
+ fsnotify_connp_t *connp, unsigned int type,
+ int allow_dups)
{
struct fsnotify_group *group = mark->group;
int ret = 0;
- BUG_ON(inode && mnt);
- BUG_ON(!inode && !mnt);
BUG_ON(!mutex_is_locked(&group->mark_mutex));
/*
@@ -593,7 +601,7 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct inode *inode,
fsnotify_get_mark(mark); /* for g_list */
spin_unlock(&mark->lock);
- ret = fsnotify_add_mark_list(mark, inode, mnt, allow_dups);
+ ret = fsnotify_add_mark_list(mark, connp, type, allow_dups);
if (ret)
goto err;
@@ -613,14 +621,14 @@ err:
return ret;
}
-int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
- struct vfsmount *mnt, int allow_dups)
+int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp,
+ unsigned int type, int allow_dups)
{
int ret;
struct fsnotify_group *group = mark->group;
mutex_lock(&group->mark_mutex);
- ret = fsnotify_add_mark_locked(mark, inode, mnt, allow_dups);
+ ret = fsnotify_add_mark_locked(mark, connp, type, allow_dups);
mutex_unlock(&group->mark_mutex);
return ret;
}
@@ -629,9 +637,8 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
* Given a list of marks, find the mark associated with given group. If found
* take a reference to that mark and return it, else return NULL.
*/
-struct fsnotify_mark *fsnotify_find_mark(
- struct fsnotify_mark_connector __rcu **connp,
- struct fsnotify_group *group)
+struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
+ struct fsnotify_group *group)
{
struct fsnotify_mark_connector *conn;
struct fsnotify_mark *mark;
@@ -697,8 +704,8 @@ clear:
}
}
-/* Destroy all marks attached to inode / vfsmount */
-void fsnotify_destroy_marks(struct fsnotify_mark_connector __rcu **connp)
+/* Destroy all marks attached to an object via connector */
+void fsnotify_destroy_marks(fsnotify_connp_t *connp)
{
struct fsnotify_mark_connector *conn;
struct fsnotify_mark *mark, *old_mark = NULL;
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 3a2e509c77c5..8946130c87ad 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -93,13 +93,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
ofs = 0;
if (file_ofs < init_size)
ofs = init_size - file_ofs;
- local_irq_save(flags);
kaddr = kmap_atomic(page);
memset(kaddr + bh_offset(bh) + ofs, 0,
bh->b_size - ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr);
- local_irq_restore(flags);
}
} else {
clear_buffer_uptodate(bh);
@@ -146,13 +144,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
recs = PAGE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- local_irq_save(flags);
kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
kunmap_atomic(kaddr);
- local_irq_restore(flags);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
@@ -926,7 +922,7 @@ static int ntfs_write_mst_block(struct page *page,
ntfs_volume *vol = ni->vol;
u8 *kaddr;
unsigned int rec_size = ni->itype.index.block_size;
- ntfs_inode *locked_nis[PAGE_SIZE / rec_size];
+ ntfs_inode *locked_nis[PAGE_SIZE / NTFS_BLOCK_SIZE];
struct buffer_head *bh, *head, *tbh, *rec_start_bh;
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
runlist_element *rl;
@@ -935,6 +931,9 @@ static int ntfs_write_mst_block(struct page *page,
bool sync, is_mft, page_is_dirty, rec_is_dirty;
unsigned char bh_size_bits;
+ if (WARN_ON(rec_size < NTFS_BLOCK_SIZE))
+ return -EINVAL;
+
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
"0x%lx.", vi->i_ino, ni->type, page->index);
BUG_ON(!NInoNonResident(ni));
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index fbd0090d7d0c..df7c32b5fac7 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -128,6 +128,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
/**
* ntfs_decompress - decompress a compression block into an array of pages
* @dest_pages: destination array of pages
+ * @completed_pages: scratch space to track completed pages
* @dest_index: current index into @dest_pages (IN/OUT)
* @dest_ofs: current offset within @dest_pages[@dest_index] (IN/OUT)
* @dest_max_index: maximum index into @dest_pages (IN)
@@ -162,10 +163,10 @@ static inline void handle_bounds_compressed_page(struct page *page,
* Note to hackers: This function may not sleep until it has finished accessing
* the compression block @cb_start as it is a per-CPU buffer.
*/
-static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
- int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
- const int xpage, char *xpage_done, u8 *const cb_start,
- const u32 cb_size, const loff_t i_size,
+static int ntfs_decompress(struct page *dest_pages[], int completed_pages[],
+ int *dest_index, int *dest_ofs, const int dest_max_index,
+ const int dest_max_ofs, const int xpage, char *xpage_done,
+ u8 *const cb_start, const u32 cb_size, const loff_t i_size,
const s64 initialized_size)
{
/*
@@ -190,9 +191,6 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
/* Variables for tag and token parsing. */
u8 tag; /* Current tag. */
int token; /* Loop counter for the eight tokens in tag. */
-
- /* Need this because we can't sleep, so need two stages. */
- int completed_pages[dest_max_index - *dest_index + 1];
int nr_completed_pages = 0;
/* Default error code. */
@@ -516,6 +514,7 @@ int ntfs_read_compressed_block(struct page *page)
unsigned int cb_clusters, cb_max_ofs;
int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
struct page **pages;
+ int *completed_pages;
unsigned char xpage_done = 0;
ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
@@ -528,14 +527,16 @@ int ntfs_read_compressed_block(struct page *page)
BUG_ON(ni->name_len);
pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
+ completed_pages = kmalloc_array(nr_pages + 1, sizeof(int), GFP_NOFS);
/* Allocate memory to store the buffer heads we need. */
bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
bhs = kmalloc(bhs_size, GFP_NOFS);
- if (unlikely(!pages || !bhs)) {
+ if (unlikely(!pages || !bhs || !completed_pages)) {
kfree(bhs);
kfree(pages);
+ kfree(completed_pages);
unlock_page(page);
ntfs_error(vol->sb, "Failed to allocate internal buffers.");
return -ENOMEM;
@@ -562,6 +563,7 @@ int ntfs_read_compressed_block(struct page *page)
if (xpage >= max_page) {
kfree(bhs);
kfree(pages);
+ kfree(completed_pages);
zero_user(page, 0, PAGE_SIZE);
ntfs_debug("Compressed read outside i_size - truncated?");
SetPageUptodate(page);
@@ -854,10 +856,10 @@ lock_retry_remap:
unsigned int prev_cur_page = cur_page;
ntfs_debug("Found compressed compression block.");
- err = ntfs_decompress(pages, &cur_page, &cur_ofs,
- cb_max_page, cb_max_ofs, xpage, &xpage_done,
- cb_pos, cb_size - (cb_pos - cb), i_size,
- initialized_size);
+ err = ntfs_decompress(pages, completed_pages, &cur_page,
+ &cur_ofs, cb_max_page, cb_max_ofs, xpage,
+ &xpage_done, cb_pos, cb_size - (cb_pos - cb),
+ i_size, initialized_size);
/*
* We can sleep from now on, lock already dropped by
* ntfs_decompress().
@@ -912,6 +914,7 @@ lock_retry_remap:
/* We no longer need the list of pages. */
kfree(pages);
+ kfree(completed_pages);
/* If we have completed the requested page, we return success. */
if (likely(xpage_done))
@@ -956,5 +959,6 @@ err_out:
}
}
kfree(pages);
+ kfree(completed_pages);
return -EIO;
}
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index decaf75d1cd5..bd3221cbdd95 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -667,18 +667,18 @@ static int ntfs_read_locked_inode(struct inode *vi)
* mtime is the last change of the data within the file. Not changed
* when only metadata is changed, e.g. a rename doesn't affect mtime.
*/
- vi->i_mtime = timespec_to_timespec64(ntfs2utc(si->last_data_change_time));
+ vi->i_mtime = ntfs2utc(si->last_data_change_time);
/*
* ctime is the last change of the metadata of the file. This obviously
* always changes, when mtime is changed. ctime can be changed on its
* own, mtime is then not changed, e.g. when a file is renamed.
*/
- vi->i_ctime = timespec_to_timespec64(ntfs2utc(si->last_mft_change_time));
+ vi->i_ctime = ntfs2utc(si->last_mft_change_time);
/*
* Last access to the data within the file. Not changed during a rename
* for example but changed whenever the file is written to.
*/
- vi->i_atime = timespec_to_timespec64(ntfs2utc(si->last_access_time));
+ vi->i_atime = ntfs2utc(si->last_access_time);
/* Find the attribute list attribute if present. */
ntfs_attr_reinit_search_ctx(ctx);
@@ -2997,7 +2997,7 @@ int __ntfs_write_inode(struct inode *vi, int sync)
si = (STANDARD_INFORMATION*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
/* Update the access times if they have changed. */
- nt = utc2ntfs(timespec64_to_timespec(vi->i_mtime));
+ nt = utc2ntfs(vi->i_mtime);
if (si->last_data_change_time != nt) {
ntfs_debug("Updating mtime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
@@ -3006,7 +3006,7 @@ int __ntfs_write_inode(struct inode *vi, int sync)
si->last_data_change_time = nt;
modified = true;
}
- nt = utc2ntfs(timespec64_to_timespec(vi->i_ctime));
+ nt = utc2ntfs(vi->i_ctime);
if (si->last_mft_change_time != nt) {
ntfs_debug("Updating ctime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
@@ -3015,7 +3015,7 @@ int __ntfs_write_inode(struct inode *vi, int sync)
si->last_mft_change_time = nt;
modified = true;
}
- nt = utc2ntfs(timespec64_to_timespec(vi->i_atime));
+ nt = utc2ntfs(vi->i_atime);
if (si->last_access_time != nt) {
ntfs_debug("Updating atime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino,
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 32c523cf5a2d..fb14d17666c8 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -35,6 +35,8 @@
#include "mft.h"
#include "ntfs.h"
+#define MAX_BHS (PAGE_SIZE / NTFS_BLOCK_SIZE)
+
/**
* map_mft_record_page - map the page in which a specific mft record resides
* @ni: ntfs inode whose mft record page to map
@@ -469,7 +471,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
struct page *page;
unsigned int blocksize = vol->sb->s_blocksize;
int max_bhs = vol->mft_record_size / blocksize;
- struct buffer_head *bhs[max_bhs];
+ struct buffer_head *bhs[MAX_BHS];
struct buffer_head *bh, *head;
u8 *kmirr;
runlist_element *rl;
@@ -479,6 +481,8 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
ntfs_debug("Entering for inode 0x%lx.", mft_no);
BUG_ON(!max_bhs);
+ if (WARN_ON(max_bhs > MAX_BHS))
+ return -EINVAL;
if (unlikely(!vol->mftmirr_ino)) {
/* This could happen during umount... */
err = ntfs_sync_mft_mirror_umount(vol, mft_no, m);
@@ -674,7 +678,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
unsigned int blocksize = vol->sb->s_blocksize;
unsigned char blocksize_bits = vol->sb->s_blocksize_bits;
int max_bhs = vol->mft_record_size / blocksize;
- struct buffer_head *bhs[max_bhs];
+ struct buffer_head *bhs[MAX_BHS];
struct buffer_head *bh, *head;
runlist_element *rl;
unsigned int block_start, block_end, m_start, m_end;
@@ -684,6 +688,10 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
BUG_ON(NInoAttr(ni));
BUG_ON(!max_bhs);
BUG_ON(!PageLocked(page));
+ if (WARN_ON(max_bhs > MAX_BHS)) {
+ err = -EINVAL;
+ goto err_out;
+ }
/*
* If the ntfs_inode is clean no need to do anything. If it is dirty,
* mark it as clean now so that it can be redirtied later on if needed.
diff --git a/fs/ntfs/time.h b/fs/ntfs/time.h
index 01233989d5d1..24cd719f1fd2 100644
--- a/fs/ntfs/time.h
+++ b/fs/ntfs/time.h
@@ -36,16 +36,16 @@
* Convert the Linux UTC time @ts to its corresponding NTFS time and return
* that in little endian format.
*
- * Linux stores time in a struct timespec consisting of a time_t (long at
- * present) tv_sec and a long tv_nsec where tv_sec is the number of 1-second
- * intervals since 1st January 1970, 00:00:00 UTC and tv_nsec is the number of
- * 1-nano-second intervals since the value of tv_sec.
+ * Linux stores time in a struct timespec64 consisting of a time64_t tv_sec
+ * and a long tv_nsec where tv_sec is the number of 1-second intervals since
+ * 1st January 1970, 00:00:00 UTC and tv_nsec is the number of 1-nano-second
+ * intervals since the value of tv_sec.
*
* NTFS uses Microsoft's standard time format which is stored in a s64 and is
* measured as the number of 100-nano-second intervals since 1st January 1601,
* 00:00:00 UTC.
*/
-static inline sle64 utc2ntfs(const struct timespec ts)
+static inline sle64 utc2ntfs(const struct timespec64 ts)
{
/*
* Convert the seconds to 100ns intervals, add the nano-seconds
@@ -63,7 +63,10 @@ static inline sle64 utc2ntfs(const struct timespec ts)
*/
static inline sle64 get_current_ntfs_time(void)
{
- return utc2ntfs(current_kernel_time());
+ struct timespec64 ts;
+
+ ktime_get_coarse_real_ts64(&ts);
+ return utc2ntfs(ts);
}
/**
@@ -73,18 +76,18 @@ static inline sle64 get_current_ntfs_time(void)
* Convert the little endian NTFS time @time to its corresponding Linux UTC
* time and return that in cpu format.
*
- * Linux stores time in a struct timespec consisting of a time_t (long at
- * present) tv_sec and a long tv_nsec where tv_sec is the number of 1-second
- * intervals since 1st January 1970, 00:00:00 UTC and tv_nsec is the number of
- * 1-nano-second intervals since the value of tv_sec.
+ * Linux stores time in a struct timespec64 consisting of a time64_t tv_sec
+ * and a long tv_nsec where tv_sec is the number of 1-second intervals since
+ * 1st January 1970, 00:00:00 UTC and tv_nsec is the number of 1-nano-second
+ * intervals since the value of tv_sec.
*
* NTFS uses Microsoft's standard time format which is stored in a s64 and is
* measured as the number of 100 nano-second intervals since 1st January 1601,
* 00:00:00 UTC.
*/
-static inline struct timespec ntfs2utc(const sle64 time)
+static inline struct timespec64 ntfs2utc(const sle64 time)
{
- struct timespec ts;
+ struct timespec64 ts;
/* Subtract the NTFS time offset. */
u64 t = (u64)(sle64_to_cpu(time) - NTFS_TIME_OFFSET);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 0f157bbd3e0f..a342f008e42f 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -932,13 +932,11 @@ static int ocfs2_validate_extent_block(struct super_block *sb,
goto bail;
}
- if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) {
+ if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation)
rc = ocfs2_error(sb,
"Extent block #%llu has an invalid h_fs_generation of #%u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(eb->h_fs_generation));
- goto bail;
- }
bail:
return rc;
}
@@ -1481,19 +1479,17 @@ static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
while(le16_to_cpu(el->l_tree_depth) > 1) {
if (le16_to_cpu(el->l_next_free_rec) == 0) {
- ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
- "Owner %llu has empty extent list (next_free_rec == 0)\n",
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
- status = -EIO;
+ status = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu has empty extent list (next_free_rec == 0)\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
goto bail;
}
i = le16_to_cpu(el->l_next_free_rec) - 1;
blkno = le64_to_cpu(el->l_recs[i].e_blkno);
if (!blkno) {
- ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
- "Owner %llu has extent list where extent # %d has no physical block start\n",
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i);
- status = -EIO;
+ status = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu has extent list where extent # %d has no physical block start\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i);
goto bail;
}
@@ -1598,10 +1594,8 @@ static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
* the new data. */
ret = ocfs2_add_branch(handle, et, bh, last_eb_bh,
meta_ac);
- if (ret < 0) {
+ if (ret < 0)
mlog_errno(ret);
- goto out;
- }
out:
if (final_depth)
@@ -3214,11 +3208,10 @@ rightmost_no_delete:
goto rightmost_no_delete;
if (le16_to_cpu(el->l_next_free_rec) == 0) {
- ret = -EIO;
- ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
- "Owner %llu has empty extent block at %llu\n",
- (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
- (unsigned long long)le64_to_cpu(eb->h_blkno));
+ ret = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
+ "Owner %llu has empty extent block at %llu\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+ (unsigned long long)le64_to_cpu(eb->h_blkno));
goto out;
}
@@ -4411,12 +4404,11 @@ static int ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
le16_to_cpu(new_el->l_count)) {
bh = path_leaf_bh(left_path);
eb = (struct ocfs2_extent_block *)bh->b_data;
- ocfs2_error(sb,
- "Extent block #%llu has an invalid l_next_free_rec of %d. It should have matched the l_count of %d\n",
- (unsigned long long)le64_to_cpu(eb->h_blkno),
- le16_to_cpu(new_el->l_next_free_rec),
- le16_to_cpu(new_el->l_count));
- status = -EINVAL;
+ status = ocfs2_error(sb,
+ "Extent block #%llu has an invalid l_next_free_rec of %d. It should have matched the l_count of %d\n",
+ (unsigned long long)le64_to_cpu(eb->h_blkno),
+ le16_to_cpu(new_el->l_next_free_rec),
+ le16_to_cpu(new_el->l_count));
goto free_left_path;
}
rec = &new_el->l_recs[
@@ -4466,11 +4458,10 @@ static int ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
if (le16_to_cpu(new_el->l_next_free_rec) <= 1) {
bh = path_leaf_bh(right_path);
eb = (struct ocfs2_extent_block *)bh->b_data;
- ocfs2_error(sb,
- "Extent block #%llu has an invalid l_next_free_rec of %d\n",
- (unsigned long long)le64_to_cpu(eb->h_blkno),
- le16_to_cpu(new_el->l_next_free_rec));
- status = -EINVAL;
+ status = ocfs2_error(sb,
+ "Extent block #%llu has an invalid l_next_free_rec of %d\n",
+ (unsigned long long)le64_to_cpu(eb->h_blkno),
+ le16_to_cpu(new_el->l_next_free_rec));
goto free_right_path;
}
rec = &new_el->l_recs[1];
@@ -5523,10 +5514,8 @@ static int ocfs2_truncate_rec(handle_t *handle,
ocfs2_journal_dirty(handle, path_leaf_bh(path));
ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
- if (ret) {
+ if (ret)
mlog_errno(ret);
- goto out;
- }
out:
ocfs2_free_path(left_path);
@@ -5659,10 +5648,8 @@ int ocfs2_remove_extent(handle_t *handle,
ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
cpos, len);
- if (ret) {
+ if (ret)
mlog_errno(ret);
- goto out;
- }
}
out:
@@ -5707,7 +5694,6 @@ static int ocfs2_reserve_blocks_for_rec_trunc(struct inode *inode,
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
- goto out;
}
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index ea8c551bcd7e..9b2ed62dd638 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -127,13 +127,13 @@ enum o2hb_heartbeat_modes {
O2HB_HEARTBEAT_NUM_MODES,
};
-char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = {
- "local", /* O2HB_HEARTBEAT_LOCAL */
- "global", /* O2HB_HEARTBEAT_GLOBAL */
+static const char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = {
+ "local", /* O2HB_HEARTBEAT_LOCAL */
+ "global", /* O2HB_HEARTBEAT_GLOBAL */
};
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
-unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
+static unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
/*
* o2hb_dependent_users tracks the number of registered callbacks that depend
@@ -141,7 +141,7 @@ unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
* However only o2dlm depends on the heartbeat. It does not want the heartbeat
* to stop while a dlm domain is still active.
*/
-unsigned int o2hb_dependent_users;
+static unsigned int o2hb_dependent_users;
/*
* In global heartbeat mode, all regions are pinned if there are one or more
@@ -2486,7 +2486,7 @@ unlock:
return ret;
}
-void o2hb_region_dec_user(const char *region_uuid)
+static void o2hb_region_dec_user(const char *region_uuid)
{
spin_lock(&o2hb_live_lock);
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index da64c3a20eeb..0e4166cc23a0 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -35,9 +35,9 @@
* cluster references throughout where nodes are looked up */
struct o2nm_cluster *o2nm_single_cluster = NULL;
-char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
- "reset", /* O2NM_FENCE_RESET */
- "panic", /* O2NM_FENCE_PANIC */
+static const char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
+ "reset", /* O2NM_FENCE_RESET */
+ "panic", /* O2NM_FENCE_PANIC */
};
static inline void o2nm_lock_subsystem(void);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1296f78ae966..7d9eea7d4a87 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -872,8 +872,6 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
"for type %u key %08x\n", msg_type, key);
}
write_unlock(&o2net_handler_lock);
- if (ret)
- goto out;
out:
if (ret)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 0ff424c6d17c..8e712b614e6e 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -96,7 +96,7 @@ struct ocfs2_unblock_ctl {
};
/* Lockdep class keys */
-struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
+static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 255f758af03a..9fa35cb6f6e0 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2537,19 +2537,14 @@ static int ocfs2_file_clone_range(struct file *file_in,
len, false);
}
-static ssize_t ocfs2_file_dedupe_range(struct file *src_file,
- u64 loff,
- u64 len,
- struct file *dst_file,
- u64 dst_loff)
+static int ocfs2_file_dedupe_range(struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+ u64 len)
{
- int error;
-
- error = ocfs2_reflink_remap_range(src_file, loff, dst_file, dst_loff,
+ return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, true);
- if (error)
- return error;
- return len;
}
const struct inode_operations ocfs2_file_iops = {
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index ddc3e9470c87..79279240fb6e 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -637,10 +637,8 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
handle = NULL;
status = ocfs2_commit_truncate(osb, inode, fe_bh);
- if (status < 0) {
+ if (status < 0)
mlog_errno(status);
- goto out;
- }
}
out:
@@ -1499,7 +1497,6 @@ static int ocfs2_filecheck_validate_inode_block(struct super_block *sb,
(unsigned long long)bh->b_blocknr,
le32_to_cpu(di->i_fs_generation));
rc = -OCFS2_FILECHECK_ERR_GENERATION;
- goto bail;
}
bail:
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index fe0d1f9571bb..7642b6712c39 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -663,11 +663,10 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
#ifdef CONFIG_OCFS2_DEBUG_FS
if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
ocfs2_local_alloc_count_bits(alloc)) {
- ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n",
- (unsigned long long)le64_to_cpu(alloc->i_blkno),
- le32_to_cpu(alloc->id1.bitmap1.i_used),
- ocfs2_local_alloc_count_bits(alloc));
- status = -EIO;
+ status = ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n",
+ (unsigned long long)le64_to_cpu(alloc->i_blkno),
+ le32_to_cpu(alloc->id1.bitmap1.i_used),
+ ocfs2_local_alloc_count_bits(alloc));
goto bail;
}
#endif
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 16c42ed0dca8..b1a8b046f4c2 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -137,14 +137,13 @@ static int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
int rc = 0;
struct buffer_head *tmp = *bh;
- if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
- ocfs2_error(inode->i_sb,
- "Quota file %llu is probably corrupted! Requested to read block %Lu but file has size only %Lu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)v_block,
- (unsigned long long)i_size_read(inode));
- return -EIO;
- }
+ if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block)
+ return ocfs2_error(inode->i_sb,
+ "Quota file %llu is probably corrupted! Requested to read block %Lu but file has size only %Lu\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ (unsigned long long)v_block,
+ (unsigned long long)i_size_read(inode));
+
rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
ocfs2_validate_quota_block);
if (rc)
diff --git a/fs/open.c b/fs/open.c
index d0e955b558ad..0285ce7dbd51 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -68,7 +68,6 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
long vfs_truncate(const struct path *path, loff_t length)
{
struct inode *inode;
- struct dentry *upperdentry;
long error;
inode = path->dentry->d_inode;
@@ -91,17 +90,7 @@ long vfs_truncate(const struct path *path, loff_t length)
if (IS_APPEND(inode))
goto mnt_drop_write_and_out;
- /*
- * If this is an overlayfs then do as if opening the file so we get
- * write access on the upper inode, not on the overlay inode. For
- * non-overlay filesystems d_real() is an identity function.
- */
- upperdentry = d_real(path->dentry, NULL, O_WRONLY, 0);
- error = PTR_ERR(upperdentry);
- if (IS_ERR(upperdentry))
- goto mnt_drop_write_and_out;
-
- error = get_write_access(upperdentry->d_inode);
+ error = get_write_access(inode);
if (error)
goto mnt_drop_write_and_out;
@@ -120,7 +109,7 @@ long vfs_truncate(const struct path *path, loff_t length)
error = do_truncate(path->dentry, length, 0, NULL);
put_write_and_out:
- put_write_access(upperdentry->d_inode);
+ put_write_access(inode);
mnt_drop_write_and_out:
mnt_drop_write(path->mnt);
out:
@@ -707,12 +696,12 @@ int ksys_fchown(unsigned int fd, uid_t user, gid_t group)
if (!f.file)
goto out;
- error = mnt_want_write_file_path(f.file);
+ error = mnt_want_write_file(f.file);
if (error)
goto out_fput;
audit_file(f.file);
error = chown_common(&f.file->f_path, user, group);
- mnt_drop_write_file_path(f.file);
+ mnt_drop_write_file(f.file);
out_fput:
fdput(f);
out:
@@ -724,27 +713,13 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
return ksys_fchown(fd, user, group);
}
-int open_check_o_direct(struct file *f)
-{
- /* NB: we're sure to have correct a_ops only after f_op->open */
- if (f->f_flags & O_DIRECT) {
- if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
- return -EINVAL;
- }
- return 0;
-}
-
static int do_dentry_open(struct file *f,
struct inode *inode,
- int (*open)(struct inode *, struct file *),
- const struct cred *cred)
+ int (*open)(struct inode *, struct file *))
{
static const struct file_operations empty_fops = {};
int error;
- f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
- FMODE_PREAD | FMODE_PWRITE;
-
path_get(&f->f_path);
f->f_inode = inode;
f->f_mapping = inode->i_mapping;
@@ -753,7 +728,7 @@ static int do_dentry_open(struct file *f,
f->f_wb_err = filemap_sample_wb_err(f->f_mapping);
if (unlikely(f->f_flags & O_PATH)) {
- f->f_mode = FMODE_PATH;
+ f->f_mode = FMODE_PATH | FMODE_OPENED;
f->f_op = &empty_fops;
return 0;
}
@@ -780,7 +755,7 @@ static int do_dentry_open(struct file *f,
goto cleanup_all;
}
- error = security_file_open(f, cred);
+ error = security_file_open(f);
if (error)
goto cleanup_all;
@@ -788,6 +763,8 @@ static int do_dentry_open(struct file *f,
if (error)
goto cleanup_all;
+ /* normally all 3 are set; ->open() can clear them if needed */
+ f->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
if (!open)
open = f->f_op->open;
if (open) {
@@ -795,6 +772,7 @@ static int do_dentry_open(struct file *f,
if (error)
goto cleanup_all;
}
+ f->f_mode |= FMODE_OPENED;
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_inc(inode);
if ((f->f_mode & FMODE_READ) &&
@@ -809,9 +787,16 @@ static int do_dentry_open(struct file *f,
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
+ /* NB: we're sure to have correct a_ops only after f_op->open */
+ if (f->f_flags & O_DIRECT) {
+ if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
+ return -EINVAL;
+ }
return 0;
cleanup_all:
+ if (WARN_ON_ONCE(error > 0))
+ error = -EINVAL;
fops_put(f->f_op);
if (f->f_mode & FMODE_WRITER) {
put_write_access(inode);
@@ -847,19 +832,12 @@ cleanup_file:
* Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened)
+ int (*open)(struct inode *, struct file *))
{
- int error;
- BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
+ BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */
file->f_path.dentry = dentry;
- error = do_dentry_open(file, d_backing_inode(dentry), open,
- current_cred());
- if (!error)
- *opened |= FILE_OPENED;
-
- return error;
+ return do_dentry_open(file, d_backing_inode(dentry), open);
}
EXPORT_SYMBOL(finish_open);
@@ -874,13 +852,13 @@ EXPORT_SYMBOL(finish_open);
* NB: unlike finish_open() this function does consume the dentry reference and
* the caller need not dput() it.
*
- * Returns "1" which must be the return value of ->atomic_open() after having
+ * Returns "0" which must be the return value of ->atomic_open() after having
* called this function.
*/
int finish_no_open(struct file *file, struct dentry *dentry)
{
file->f_path.dentry = dentry;
- return 1;
+ return 0;
}
EXPORT_SYMBOL(finish_no_open);
@@ -896,16 +874,10 @@ EXPORT_SYMBOL(file_path);
* @file: newly allocated file with f_flag initialized
* @cred: credentials to use
*/
-int vfs_open(const struct path *path, struct file *file,
- const struct cred *cred)
+int vfs_open(const struct path *path, struct file *file)
{
- struct dentry *dentry = d_real(path->dentry, NULL, file->f_flags, 0);
-
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
-
file->f_path = *path;
- return do_dentry_open(file, d_backing_inode(dentry), NULL, cred);
+ return do_dentry_open(file, d_backing_inode(path->dentry), NULL);
}
struct file *dentry_open(const struct path *path, int flags,
@@ -919,19 +891,11 @@ struct file *dentry_open(const struct path *path, int flags,
/* We must always pass in a valid mount pointer. */
BUG_ON(!path->mnt);
- f = get_empty_filp();
+ f = alloc_empty_file(flags, cred);
if (!IS_ERR(f)) {
- f->f_flags = flags;
- error = vfs_open(path, f, cred);
- if (!error) {
- /* from now on we need fput() to dispose of f */
- error = open_check_o_direct(f);
- if (error) {
- fput(f);
- f = ERR_PTR(error);
- }
- } else {
- put_filp(f);
+ error = vfs_open(path, f);
+ if (error) {
+ fput(f);
f = ERR_PTR(error);
}
}
@@ -939,6 +903,24 @@ struct file *dentry_open(const struct path *path, int flags,
}
EXPORT_SYMBOL(dentry_open);
+struct file *open_with_fake_path(const struct path *path, int flags,
+ struct inode *inode, const struct cred *cred)
+{
+ struct file *f = alloc_empty_file_noaccount(flags, cred);
+ if (!IS_ERR(f)) {
+ int error;
+
+ f->f_path = *path;
+ error = do_dentry_open(f, inode, NULL);
+ if (error) {
+ fput(f);
+ f = ERR_PTR(error);
+ }
+ }
+ return f;
+}
+EXPORT_SYMBOL(open_with_fake_path);
+
static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
{
int lookup_flags = 0;
@@ -1063,26 +1045,6 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
}
EXPORT_SYMBOL(file_open_root);
-struct file *filp_clone_open(struct file *oldfile)
-{
- struct file *file;
- int retval;
-
- file = get_empty_filp();
- if (IS_ERR(file))
- return file;
-
- file->f_flags = oldfile->f_flags;
- retval = vfs_open(&oldfile->f_path, file, oldfile->f_cred);
- if (retval) {
- put_filp(file);
- return ERR_PTR(retval);
- }
-
- return file;
-}
-EXPORT_SYMBOL(filp_clone_open);
-
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index db0b52187cbc..a5a2fe76568f 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -528,18 +528,19 @@ static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long ar
return ret;
}
-static int orangefs_fault(struct vm_fault *vmf)
+static vm_fault_t orangefs_fault(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
- int rc;
- rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1,
+ int ret;
+
+ ret = orangefs_inode_getattr(file->f_mapping->host, 0, 1,
STATX_SIZE);
- if (rc == -ESTALE)
- rc = -EIO;
- if (rc) {
- gossip_err("%s: orangefs_inode_getattr failed, "
- "rc:%d:.\n", __func__, rc);
- return rc;
+ if (ret == -ESTALE)
+ ret = -EIO;
+ if (ret) {
+ gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
+ __func__, ret);
+ return VM_FAULT_SIGBUS;
}
return filemap_fault(vmf);
}
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 6e4d2af8f5bc..31932879b716 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -251,7 +251,6 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
{
int ret = -ENOENT;
struct inode *inode = path->dentry->d_inode;
- struct orangefs_inode_s *orangefs_inode = NULL;
gossip_debug(GOSSIP_INODE_DEBUG,
"orangefs_getattr: called on %pd\n",
@@ -262,8 +261,6 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
generic_fillattr(inode, stat);
/* override block size reported to stat */
- orangefs_inode = ORANGEFS_I(inode);
-
if (request_mask & STATX_SIZE)
stat->result_mask = STATX_BASIC_STATS;
else
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index 9384164253ac..2ef91be2a04e 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -64,6 +64,7 @@ config OVERLAY_FS_NFS_EXPORT
bool "Overlayfs: turn on NFS export feature by default"
depends on OVERLAY_FS
depends on OVERLAY_FS_INDEX
+ depends on !OVERLAY_FS_METACOPY
help
If this config option is enabled then overlay filesystems will use
the index directory to decode overlay NFS file handles by default.
@@ -103,3 +104,21 @@ config OVERLAY_FS_XINO_AUTO
For more information, see Documentation/filesystems/overlayfs.txt
If unsure, say N.
+
+config OVERLAY_FS_METACOPY
+ bool "Overlayfs: turn on metadata only copy up feature by default"
+ depends on OVERLAY_FS
+ select OVERLAY_FS_REDIRECT_DIR
+ help
+ If this config option is enabled then overlay filesystems will
+ copy up only metadata where appropriate and data copy up will
+ happen when a file is opened for WRITE operation. It is still
+ possible to turn off this feature globally with the "metacopy=off"
+ module option or on a filesystem instance basis with the
+ "metacopy=off" mount option.
+
+ Note, that this feature is not backward compatible. That is,
+ mounting an overlay which has metacopy only inodes on a kernel
+ that doesn't support this feature will have unexpected results.
+
+ If unsure, say N.
diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile
index 30802347a020..46e1ff8ac056 100644
--- a/fs/overlayfs/Makefile
+++ b/fs/overlayfs/Makefile
@@ -4,5 +4,5 @@
obj-$(CONFIG_OVERLAY_FS) += overlay.o
-overlay-objs := super.o namei.o util.o inode.o dir.o readdir.o copy_up.o \
- export.o
+overlay-objs := super.o namei.o util.o inode.o file.o dir.o readdir.o \
+ copy_up.o export.o
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index ddaddb4ce4c3..296037afecdb 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -25,35 +25,20 @@
#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
-static bool __read_mostly ovl_check_copy_up;
-module_param_named(check_copy_up, ovl_check_copy_up, bool,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(ovl_check_copy_up,
- "Warn on copy-up when causing process also has a R/O fd open");
-
-static int ovl_check_fd(const void *data, struct file *f, unsigned int fd)
+static int ovl_ccup_set(const char *buf, const struct kernel_param *param)
{
- const struct dentry *dentry = data;
-
- if (file_inode(f) == d_inode(dentry))
- pr_warn_ratelimited("overlayfs: Warning: Copying up %pD, but open R/O on fd %u which will cease to be coherent [pid=%d %s]\n",
- f, fd, current->pid, current->comm);
+ pr_warn("overlayfs: \"check_copy_up\" module option is obsolete\n");
return 0;
}
-/*
- * Check the fds open by this process and warn if something like the following
- * scenario is about to occur:
- *
- * fd1 = open("foo", O_RDONLY);
- * fd2 = open("foo", O_RDWR);
- */
-static void ovl_do_check_copy_up(struct dentry *dentry)
+static int ovl_ccup_get(char *buf, const struct kernel_param *param)
{
- if (ovl_check_copy_up)
- iterate_fd(current->files, 0, ovl_check_fd, dentry);
+ return sprintf(buf, "N\n");
}
+module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644);
+MODULE_PARM_DESC(ovl_check_copy_up, "Obsolete; does nothing");
+
int ovl_copy_xattr(struct dentry *old, struct dentry *new)
{
ssize_t list_size, size, value_size = 0;
@@ -195,6 +180,16 @@ out_fput:
return error;
}
+static int ovl_set_size(struct dentry *upperdentry, struct kstat *stat)
+{
+ struct iattr attr = {
+ .ia_valid = ATTR_SIZE,
+ .ia_size = stat->size,
+ };
+
+ return notify_change(upperdentry, &attr, NULL);
+}
+
static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
{
struct iattr attr = {
@@ -403,6 +398,7 @@ struct ovl_copy_up_ctx {
bool tmpfile;
bool origin;
bool indexed;
+ bool metacopy;
};
static int ovl_link_up(struct ovl_copy_up_ctx *c)
@@ -505,28 +501,10 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
{
int err;
- if (S_ISREG(c->stat.mode)) {
- struct path upperpath;
-
- ovl_path_upper(c->dentry, &upperpath);
- BUG_ON(upperpath.dentry != NULL);
- upperpath.dentry = temp;
-
- err = ovl_copy_up_data(&c->lowerpath, &upperpath, c->stat.size);
- if (err)
- return err;
- }
-
err = ovl_copy_xattr(c->lowerpath.dentry, temp);
if (err)
return err;
- inode_lock(temp->d_inode);
- err = ovl_set_attr(temp, &c->stat);
- inode_unlock(temp->d_inode);
- if (err)
- return err;
-
/*
* Store identifier of lower inode in upper inode xattr to
* allow lookup of the copy up origin inode.
@@ -540,7 +518,34 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
return err;
}
- return 0;
+ if (S_ISREG(c->stat.mode) && !c->metacopy) {
+ struct path upperpath, datapath;
+
+ ovl_path_upper(c->dentry, &upperpath);
+ BUG_ON(upperpath.dentry != NULL);
+ upperpath.dentry = temp;
+
+ ovl_path_lowerdata(c->dentry, &datapath);
+ err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
+ if (err)
+ return err;
+ }
+
+ if (c->metacopy) {
+ err = ovl_check_setxattr(c->dentry, temp, OVL_XATTR_METACOPY,
+ NULL, 0, -EOPNOTSUPP);
+ if (err)
+ return err;
+ }
+
+ inode_lock(temp->d_inode);
+ if (c->metacopy)
+ err = ovl_set_size(temp, &c->stat);
+ if (!err)
+ err = ovl_set_attr(temp, &c->stat);
+ inode_unlock(temp->d_inode);
+
+ return err;
}
static int ovl_copy_up_locked(struct ovl_copy_up_ctx *c)
@@ -575,6 +580,8 @@ static int ovl_copy_up_locked(struct ovl_copy_up_ctx *c)
if (err)
goto out;
+ if (!c->metacopy)
+ ovl_set_upperdata(d_inode(c->dentry));
inode = d_inode(c->dentry);
ovl_inode_update(inode, newdentry);
if (S_ISDIR(inode->i_mode))
@@ -677,6 +684,49 @@ out:
return err;
}
+static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode,
+ int flags)
+{
+ struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+
+ if (!ofs->config.metacopy)
+ return false;
+
+ if (!S_ISREG(mode))
+ return false;
+
+ if (flags && ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC)))
+ return false;
+
+ return true;
+}
+
+/* Copy up data of an inode which was copied up metadata only in the past. */
+static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
+{
+ struct path upperpath, datapath;
+ int err;
+
+ ovl_path_upper(c->dentry, &upperpath);
+ if (WARN_ON(upperpath.dentry == NULL))
+ return -EIO;
+
+ ovl_path_lowerdata(c->dentry, &datapath);
+ if (WARN_ON(datapath.dentry == NULL))
+ return -EIO;
+
+ err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
+ if (err)
+ return err;
+
+ err = vfs_removexattr(upperpath.dentry, OVL_XATTR_METACOPY);
+ if (err)
+ return err;
+
+ ovl_set_upperdata(d_inode(c->dentry));
+ return err;
+}
+
static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
int flags)
{
@@ -698,6 +748,8 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
if (err)
return err;
+ ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags);
+
if (parent) {
ovl_path_upper(parent, &parentpath);
ctx.destdir = parentpath.dentry;
@@ -719,9 +771,8 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
if (IS_ERR(ctx.link))
return PTR_ERR(ctx.link);
}
- ovl_do_check_copy_up(ctx.lowerpath.dentry);
- err = ovl_copy_up_start(dentry);
+ err = ovl_copy_up_start(dentry, flags);
/* err < 0: interrupted, err > 0: raced with another copy-up */
if (unlikely(err)) {
if (err > 0)
@@ -731,6 +782,8 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
err = ovl_do_copy_up(&ctx);
if (!err && parent && !ovl_dentry_has_upper_alias(dentry))
err = ovl_link_up(&ctx);
+ if (!err && ovl_dentry_needs_data_copy_up_locked(dentry, flags))
+ err = ovl_copy_up_meta_inode_data(&ctx);
ovl_copy_up_end(dentry);
}
do_delayed_call(&done);
@@ -756,21 +809,7 @@ int ovl_copy_up_flags(struct dentry *dentry, int flags)
struct dentry *next;
struct dentry *parent = NULL;
- /*
- * Check if copy-up has happened as well as for upper alias (in
- * case of hard links) is there.
- *
- * Both checks are lockless:
- * - false negatives: will recheck under oi->lock
- * - false positives:
- * + ovl_dentry_upper() uses memory barriers to ensure the
- * upper dentry is up-to-date
- * + ovl_dentry_has_upper_alias() relies on locking of
- * upper parent i_rwsem to prevent reordering copy-up
- * with rename.
- */
- if (ovl_dentry_upper(dentry) &&
- (ovl_dentry_has_upper_alias(dentry) || disconnected))
+ if (ovl_already_copied_up(dentry, flags))
break;
next = dget(dentry);
@@ -795,6 +834,41 @@ int ovl_copy_up_flags(struct dentry *dentry, int flags)
return err;
}
+static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
+{
+ /* Copy up of disconnected dentry does not set upper alias */
+ if (ovl_already_copied_up(dentry, flags))
+ return false;
+
+ if (special_file(d_inode(dentry)->i_mode))
+ return false;
+
+ if (!ovl_open_flags_need_copy_up(flags))
+ return false;
+
+ return true;
+}
+
+int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
+{
+ int err = 0;
+
+ if (ovl_open_need_copy_up(dentry, file_flags)) {
+ err = ovl_want_write(dentry);
+ if (!err) {
+ err = ovl_copy_up_flags(dentry, file_flags);
+ ovl_drop_write(dentry);
+ }
+ }
+
+ return err;
+}
+
+int ovl_copy_up_with_data(struct dentry *dentry)
+{
+ return ovl_copy_up_flags(dentry, O_WRONLY);
+}
+
int ovl_copy_up(struct dentry *dentry)
{
return ovl_copy_up_flags(dentry, 0);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index f480b1a2cd2e..276914ae3c60 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -24,6 +24,8 @@ module_param_named(redirect_max, ovl_redirect_max, ushort, 0644);
MODULE_PARM_DESC(ovl_redirect_max,
"Maximum length of absolute redirect xattr value");
+static int ovl_set_redirect(struct dentry *dentry, bool samedir);
+
int ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
{
int err;
@@ -242,7 +244,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
.newinode = inode,
};
- ovl_dentry_version_inc(dentry->d_parent, false);
+ ovl_dir_modified(dentry->d_parent, false);
ovl_dentry_set_upper_alias(dentry);
if (!hardlink) {
/*
@@ -601,6 +603,10 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
if (!inode)
goto out_drop_write;
+ spin_lock(&inode->i_lock);
+ inode->i_state |= I_CREATING;
+ spin_unlock(&inode->i_lock);
+
inode_init_owner(inode, dentry->d_parent->d_inode, mode);
attr.mode = inode->i_mode;
@@ -657,6 +663,12 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
if (err)
goto out_drop_write;
+ if (ovl_is_metacopy_dentry(old)) {
+ err = ovl_set_redirect(old, false);
+ if (err)
+ goto out_drop_write;
+ }
+
err = ovl_nlink_start(old, &locked);
if (err)
goto out_drop_write;
@@ -722,7 +734,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
if (err)
goto out_d_drop;
- ovl_dentry_version_inc(dentry->d_parent, true);
+ ovl_dir_modified(dentry->d_parent, true);
out_d_drop:
d_drop(dentry);
out_dput_upper:
@@ -767,7 +779,7 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir,
err = vfs_rmdir(dir, upper);
else
err = vfs_unlink(dir, upper, NULL);
- ovl_dentry_version_inc(dentry->d_parent, ovl_type_origin(dentry));
+ ovl_dir_modified(dentry->d_parent, ovl_type_origin(dentry));
/*
* Keeping this dentry hashed would mean having to release
@@ -797,6 +809,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
int err;
bool locked = false;
const struct cred *old_cred;
+ struct dentry *upperdentry;
bool lower_positive = ovl_lower_positive(dentry);
LIST_HEAD(list);
@@ -832,6 +845,17 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
drop_nlink(dentry->d_inode);
}
ovl_nlink_end(dentry, locked);
+
+ /*
+ * Copy ctime
+ *
+ * Note: we fail to update ctime if there was no copy-up, only a
+ * whiteout
+ */
+ upperdentry = ovl_dentry_upper(dentry);
+ if (upperdentry)
+ ovl_copyattr(d_inode(upperdentry), d_inode(dentry));
+
out_drop_write:
ovl_drop_write(dentry);
out:
@@ -862,13 +886,13 @@ static bool ovl_can_move(struct dentry *dentry)
!d_is_dir(dentry) || !ovl_type_merge_or_lower(dentry);
}
-static char *ovl_get_redirect(struct dentry *dentry, bool samedir)
+static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect)
{
char *buf, *ret;
struct dentry *d, *tmp;
int buflen = ovl_redirect_max + 1;
- if (samedir) {
+ if (!abs_redirect) {
ret = kstrndup(dentry->d_name.name, dentry->d_name.len,
GFP_KERNEL);
goto out;
@@ -922,15 +946,43 @@ out:
return ret ? ret : ERR_PTR(-ENOMEM);
}
+static bool ovl_need_absolute_redirect(struct dentry *dentry, bool samedir)
+{
+ struct dentry *lowerdentry;
+
+ if (!samedir)
+ return true;
+
+ if (d_is_dir(dentry))
+ return false;
+
+ /*
+ * For non-dir hardlinked files, we need absolute redirects
+ * in general as two upper hardlinks could be in different
+ * dirs. We could put a relative redirect now and convert
+ * it to absolute redirect later. But when nlink > 1 and
+ * indexing is on, that means relative redirect needs to be
+ * converted to absolute during copy up of another lower
+ * hardllink as well.
+ *
+ * So without optimizing too much, just check if lower is
+ * a hard link or not. If lower is hard link, put absolute
+ * redirect.
+ */
+ lowerdentry = ovl_dentry_lower(dentry);
+ return (d_inode(lowerdentry)->i_nlink > 1);
+}
+
static int ovl_set_redirect(struct dentry *dentry, bool samedir)
{
int err;
const char *redirect = ovl_dentry_get_redirect(dentry);
+ bool absolute_redirect = ovl_need_absolute_redirect(dentry, samedir);
- if (redirect && (samedir || redirect[0] == '/'))
+ if (redirect && (!absolute_redirect || redirect[0] == '/'))
return 0;
- redirect = ovl_get_redirect(dentry, samedir);
+ redirect = ovl_get_redirect(dentry, absolute_redirect);
if (IS_ERR(redirect))
return PTR_ERR(redirect);
@@ -1106,22 +1158,20 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
goto out_dput;
err = 0;
- if (is_dir) {
- if (ovl_type_merge_or_lower(old))
- err = ovl_set_redirect(old, samedir);
- else if (!old_opaque && ovl_type_merge(new->d_parent))
- err = ovl_set_opaque_xerr(old, olddentry, -EXDEV);
- if (err)
- goto out_dput;
- }
- if (!overwrite && new_is_dir) {
- if (ovl_type_merge_or_lower(new))
- err = ovl_set_redirect(new, samedir);
- else if (!new_opaque && ovl_type_merge(old->d_parent))
- err = ovl_set_opaque_xerr(new, newdentry, -EXDEV);
- if (err)
- goto out_dput;
- }
+ if (ovl_type_merge_or_lower(old))
+ err = ovl_set_redirect(old, samedir);
+ else if (is_dir && !old_opaque && ovl_type_merge(new->d_parent))
+ err = ovl_set_opaque_xerr(old, olddentry, -EXDEV);
+ if (err)
+ goto out_dput;
+
+ if (!overwrite && ovl_type_merge_or_lower(new))
+ err = ovl_set_redirect(new, samedir);
+ else if (!overwrite && new_is_dir && !new_opaque &&
+ ovl_type_merge(old->d_parent))
+ err = ovl_set_opaque_xerr(new, newdentry, -EXDEV);
+ if (err)
+ goto out_dput;
err = ovl_do_rename(old_upperdir->d_inode, olddentry,
new_upperdir->d_inode, newdentry, flags);
@@ -1138,10 +1188,15 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
drop_nlink(d_inode(new));
}
- ovl_dentry_version_inc(old->d_parent, ovl_type_origin(old) ||
- (!overwrite && ovl_type_origin(new)));
- ovl_dentry_version_inc(new->d_parent, ovl_type_origin(old) ||
- (d_inode(new) && ovl_type_origin(new)));
+ ovl_dir_modified(old->d_parent, ovl_type_origin(old) ||
+ (!overwrite && ovl_type_origin(new)));
+ ovl_dir_modified(new->d_parent, ovl_type_origin(old) ||
+ (d_inode(new) && ovl_type_origin(new)));
+
+ /* copy ctime: */
+ ovl_copyattr(d_inode(olddentry), d_inode(old));
+ if (d_inode(new) && ovl_dentry_upper(new))
+ ovl_copyattr(d_inode(newdentry), d_inode(new));
out_dput:
dput(newdentry);
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 9941ece61a14..8fa37cd7818a 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -317,6 +317,9 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
return ERR_CAST(inode);
}
+ if (upper)
+ ovl_set_flag(OVL_UPPERDATA, inode);
+
dentry = d_find_any_alias(inode);
if (!dentry) {
dentry = d_alloc_anon(inode->i_sb);
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
new file mode 100644
index 000000000000..32e9282893c9
--- /dev/null
+++ b/fs/overlayfs/file.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (C) 2017 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/cred.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/xattr.h>
+#include <linux/uio.h>
+#include "overlayfs.h"
+
+static char ovl_whatisit(struct inode *inode, struct inode *realinode)
+{
+ if (realinode != ovl_inode_upper(inode))
+ return 'l';
+ if (ovl_has_upperdata(inode))
+ return 'u';
+ else
+ return 'm';
+}
+
+static struct file *ovl_open_realfile(const struct file *file,
+ struct inode *realinode)
+{
+ struct inode *inode = file_inode(file);
+ struct file *realfile;
+ const struct cred *old_cred;
+
+ old_cred = ovl_override_creds(inode->i_sb);
+ realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME,
+ realinode, current_cred());
+ revert_creds(old_cred);
+
+ pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
+ file, file, ovl_whatisit(inode, realinode), file->f_flags,
+ realfile, IS_ERR(realfile) ? 0 : realfile->f_flags);
+
+ return realfile;
+}
+
+#define OVL_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
+
+static int ovl_change_flags(struct file *file, unsigned int flags)
+{
+ struct inode *inode = file_inode(file);
+ int err;
+
+ /* No atime modificaton on underlying */
+ flags |= O_NOATIME;
+
+ /* If some flag changed that cannot be changed then something's amiss */
+ if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK))
+ return -EIO;
+
+ flags &= OVL_SETFL_MASK;
+
+ if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode))
+ return -EPERM;
+
+ if (flags & O_DIRECT) {
+ if (!file->f_mapping->a_ops ||
+ !file->f_mapping->a_ops->direct_IO)
+ return -EINVAL;
+ }
+
+ if (file->f_op->check_flags) {
+ err = file->f_op->check_flags(flags);
+ if (err)
+ return err;
+ }
+
+ spin_lock(&file->f_lock);
+ file->f_flags = (file->f_flags & ~OVL_SETFL_MASK) | flags;
+ spin_unlock(&file->f_lock);
+
+ return 0;
+}
+
+static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
+ bool allow_meta)
+{
+ struct inode *inode = file_inode(file);
+ struct inode *realinode;
+
+ real->flags = 0;
+ real->file = file->private_data;
+
+ if (allow_meta)
+ realinode = ovl_inode_real(inode);
+ else
+ realinode = ovl_inode_realdata(inode);
+
+ /* Has it been copied up since we'd opened it? */
+ if (unlikely(file_inode(real->file) != realinode)) {
+ real->flags = FDPUT_FPUT;
+ real->file = ovl_open_realfile(file, realinode);
+
+ return PTR_ERR_OR_ZERO(real->file);
+ }
+
+ /* Did the flags change since open? */
+ if (unlikely((file->f_flags ^ real->file->f_flags) & ~O_NOATIME))
+ return ovl_change_flags(real->file, file->f_flags);
+
+ return 0;
+}
+
+static int ovl_real_fdget(const struct file *file, struct fd *real)
+{
+ return ovl_real_fdget_meta(file, real, false);
+}
+
+static int ovl_open(struct inode *inode, struct file *file)
+{
+ struct dentry *dentry = file_dentry(file);
+ struct file *realfile;
+ int err;
+
+ err = ovl_open_maybe_copy_up(dentry, file->f_flags);
+ if (err)
+ return err;
+
+ /* No longer need these flags, so don't pass them on to underlying fs */
+ file->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
+
+ realfile = ovl_open_realfile(file, ovl_inode_realdata(inode));
+ if (IS_ERR(realfile))
+ return PTR_ERR(realfile);
+
+ /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
+ file->f_mapping = realfile->f_mapping;
+
+ file->private_data = realfile;
+
+ return 0;
+}
+
+static int ovl_release(struct inode *inode, struct file *file)
+{
+ fput(file->private_data);
+
+ return 0;
+}
+
+static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct inode *realinode = ovl_inode_real(file_inode(file));
+
+ return generic_file_llseek_size(file, offset, whence,
+ realinode->i_sb->s_maxbytes,
+ i_size_read(realinode));
+}
+
+static void ovl_file_accessed(struct file *file)
+{
+ struct inode *inode, *upperinode;
+
+ if (file->f_flags & O_NOATIME)
+ return;
+
+ inode = file_inode(file);
+ upperinode = ovl_inode_upper(inode);
+
+ if (!upperinode)
+ return;
+
+ if ((!timespec64_equal(&inode->i_mtime, &upperinode->i_mtime) ||
+ !timespec64_equal(&inode->i_ctime, &upperinode->i_ctime))) {
+ inode->i_mtime = upperinode->i_mtime;
+ inode->i_ctime = upperinode->i_ctime;
+ }
+
+ touch_atime(&file->f_path);
+}
+
+static rwf_t ovl_iocb_to_rwf(struct kiocb *iocb)
+{
+ int ifl = iocb->ki_flags;
+ rwf_t flags = 0;
+
+ if (ifl & IOCB_NOWAIT)
+ flags |= RWF_NOWAIT;
+ if (ifl & IOCB_HIPRI)
+ flags |= RWF_HIPRI;
+ if (ifl & IOCB_DSYNC)
+ flags |= RWF_DSYNC;
+ if (ifl & IOCB_SYNC)
+ flags |= RWF_SYNC;
+
+ return flags;
+}
+
+static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct fd real;
+ const struct cred *old_cred;
+ ssize_t ret;
+
+ if (!iov_iter_count(iter))
+ return 0;
+
+ ret = ovl_real_fdget(file, &real);
+ if (ret)
+ return ret;
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
+ ovl_iocb_to_rwf(iocb));
+ revert_creds(old_cred);
+
+ ovl_file_accessed(file);
+
+ fdput(real);
+
+ return ret;
+}
+
+static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct fd real;
+ const struct cred *old_cred;
+ ssize_t ret;
+
+ if (!iov_iter_count(iter))
+ return 0;
+
+ inode_lock(inode);
+ /* Update mode */
+ ovl_copyattr(ovl_inode_real(inode), inode);
+ ret = file_remove_privs(file);
+ if (ret)
+ goto out_unlock;
+
+ ret = ovl_real_fdget(file, &real);
+ if (ret)
+ goto out_unlock;
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
+ ovl_iocb_to_rwf(iocb));
+ revert_creds(old_cred);
+
+ /* Update size */
+ ovl_copyattr(ovl_inode_real(inode), inode);
+
+ fdput(real);
+
+out_unlock:
+ inode_unlock(inode);
+
+ return ret;
+}
+
+static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct fd real;
+ const struct cred *old_cred;
+ int ret;
+
+ ret = ovl_real_fdget_meta(file, &real, !datasync);
+ if (ret)
+ return ret;
+
+ /* Don't sync lower file for fear of receiving EROFS error */
+ if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) {
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = vfs_fsync_range(real.file, start, end, datasync);
+ revert_creds(old_cred);
+ }
+
+ fdput(real);
+
+ return ret;
+}
+
+static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct file *realfile = file->private_data;
+ const struct cred *old_cred;
+ int ret;
+
+ if (!realfile->f_op->mmap)
+ return -ENODEV;
+
+ if (WARN_ON(file != vma->vm_file))
+ return -EIO;
+
+ vma->vm_file = get_file(realfile);
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = call_mmap(vma->vm_file, vma);
+ revert_creds(old_cred);
+
+ if (ret) {
+ /* Drop reference count from new vm_file value */
+ fput(realfile);
+ } else {
+ /* Drop reference count from previous vm_file value */
+ fput(file);
+ }
+
+ ovl_file_accessed(file);
+
+ return ret;
+}
+
+static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+{
+ struct inode *inode = file_inode(file);
+ struct fd real;
+ const struct cred *old_cred;
+ int ret;
+
+ ret = ovl_real_fdget(file, &real);
+ if (ret)
+ return ret;
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = vfs_fallocate(real.file, mode, offset, len);
+ revert_creds(old_cred);
+
+ /* Update size */
+ ovl_copyattr(ovl_inode_real(inode), inode);
+
+ fdput(real);
+
+ return ret;
+}
+
+static long ovl_real_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct fd real;
+ const struct cred *old_cred;
+ long ret;
+
+ ret = ovl_real_fdget(file, &real);
+ if (ret)
+ return ret;
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = vfs_ioctl(real.file, cmd, arg);
+ revert_creds(old_cred);
+
+ fdput(real);
+
+ return ret;
+}
+
+static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ struct inode *inode = file_inode(file);
+
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ ret = ovl_real_ioctl(file, cmd, arg);
+ break;
+
+ case FS_IOC_SETFLAGS:
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ ret = ovl_copy_up_with_data(file_dentry(file));
+ if (!ret) {
+ ret = ovl_real_ioctl(file, cmd, arg);
+
+ inode_lock(inode);
+ ovl_copyflags(ovl_inode_real(inode), inode);
+ inode_unlock(inode);
+ }
+
+ mnt_drop_write_file(file);
+ break;
+
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static long ovl_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return ovl_ioctl(file, cmd, arg);
+}
+
+enum ovl_copyop {
+ OVL_COPY,
+ OVL_CLONE,
+ OVL_DEDUPE,
+};
+
+static ssize_t ovl_copyfile(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ u64 len, unsigned int flags, enum ovl_copyop op)
+{
+ struct inode *inode_out = file_inode(file_out);
+ struct fd real_in, real_out;
+ const struct cred *old_cred;
+ ssize_t ret;
+
+ ret = ovl_real_fdget(file_out, &real_out);
+ if (ret)
+ return ret;
+
+ ret = ovl_real_fdget(file_in, &real_in);
+ if (ret) {
+ fdput(real_out);
+ return ret;
+ }
+
+ old_cred = ovl_override_creds(file_inode(file_out)->i_sb);
+ switch (op) {
+ case OVL_COPY:
+ ret = vfs_copy_file_range(real_in.file, pos_in,
+ real_out.file, pos_out, len, flags);
+ break;
+
+ case OVL_CLONE:
+ ret = vfs_clone_file_range(real_in.file, pos_in,
+ real_out.file, pos_out, len);
+ break;
+
+ case OVL_DEDUPE:
+ ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
+ real_out.file, pos_out, len);
+ break;
+ }
+ revert_creds(old_cred);
+
+ /* Update size */
+ ovl_copyattr(ovl_inode_real(inode_out), inode_out);
+
+ fdput(real_in);
+ fdput(real_out);
+
+ return ret;
+}
+
+static ssize_t ovl_copy_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ size_t len, unsigned int flags)
+{
+ return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, flags,
+ OVL_COPY);
+}
+
+static int ovl_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len)
+{
+ return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, 0,
+ OVL_CLONE);
+}
+
+static int ovl_dedupe_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len)
+{
+ /*
+ * Don't copy up because of a dedupe request, this wouldn't make sense
+ * most of the time (data would be duplicated instead of deduplicated).
+ */
+ if (!ovl_inode_upper(file_inode(file_in)) ||
+ !ovl_inode_upper(file_inode(file_out)))
+ return -EPERM;
+
+ return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, 0,
+ OVL_DEDUPE);
+}
+
+const struct file_operations ovl_file_operations = {
+ .open = ovl_open,
+ .release = ovl_release,
+ .llseek = ovl_llseek,
+ .read_iter = ovl_read_iter,
+ .write_iter = ovl_write_iter,
+ .fsync = ovl_fsync,
+ .mmap = ovl_mmap,
+ .fallocate = ovl_fallocate,
+ .unlocked_ioctl = ovl_ioctl,
+ .compat_ioctl = ovl_compat_ioctl,
+
+ .copy_file_range = ovl_copy_file_range,
+ .clone_file_range = ovl_clone_file_range,
+ .dedupe_file_range = ovl_dedupe_file_range,
+};
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index ed16a898caeb..e0bb217c01e2 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -19,18 +19,10 @@
int ovl_setattr(struct dentry *dentry, struct iattr *attr)
{
int err;
+ bool full_copy_up = false;
struct dentry *upperdentry;
const struct cred *old_cred;
- /*
- * Check for permissions before trying to copy-up. This is redundant
- * since it will be rechecked later by ->setattr() on upper dentry. But
- * without this, copy-up can be triggered by just about anybody.
- *
- * We don't initialize inode->size, which just means that
- * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
- * check for a swapfile (which this won't be anyway).
- */
err = setattr_prepare(dentry, attr);
if (err)
return err;
@@ -39,10 +31,33 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
goto out;
- err = ovl_copy_up(dentry);
+ if (attr->ia_valid & ATTR_SIZE) {
+ struct inode *realinode = d_inode(ovl_dentry_real(dentry));
+
+ err = -ETXTBSY;
+ if (atomic_read(&realinode->i_writecount) < 0)
+ goto out_drop_write;
+
+ /* Truncate should trigger data copy up as well */
+ full_copy_up = true;
+ }
+
+ if (!full_copy_up)
+ err = ovl_copy_up(dentry);
+ else
+ err = ovl_copy_up_with_data(dentry);
if (!err) {
+ struct inode *winode = NULL;
+
upperdentry = ovl_dentry_upper(dentry);
+ if (attr->ia_valid & ATTR_SIZE) {
+ winode = d_inode(upperdentry);
+ err = get_write_access(winode);
+ if (err)
+ goto out_drop_write;
+ }
+
if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
attr->ia_valid &= ~ATTR_MODE;
@@ -53,7 +68,11 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
if (!err)
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
inode_unlock(upperdentry->d_inode);
+
+ if (winode)
+ put_write_access(winode);
}
+out_drop_write:
ovl_drop_write(dentry);
out:
return err;
@@ -133,6 +152,9 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
bool samefs = ovl_same_sb(dentry->d_sb);
struct ovl_layer *lower_layer = NULL;
int err;
+ bool metacopy_blocks = false;
+
+ metacopy_blocks = ovl_is_metacopy_dentry(dentry);
type = ovl_path_real(dentry, &realpath);
old_cred = ovl_override_creds(dentry->d_sb);
@@ -154,7 +176,8 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
lower_layer = ovl_layer_lower(dentry);
} else if (OVL_TYPE_ORIGIN(type)) {
struct kstat lowerstat;
- u32 lowermask = STATX_INO | (!is_dir ? STATX_NLINK : 0);
+ u32 lowermask = STATX_INO | STATX_BLOCKS |
+ (!is_dir ? STATX_NLINK : 0);
ovl_path_lower(dentry, &realpath);
err = vfs_getattr(&realpath, &lowerstat,
@@ -183,6 +206,35 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
stat->ino = lowerstat.ino;
lower_layer = ovl_layer_lower(dentry);
}
+
+ /*
+ * If we are querying a metacopy dentry and lower
+ * dentry is data dentry, then use the blocks we
+ * queried just now. We don't have to do additional
+ * vfs_getattr(). If lower itself is metacopy, then
+ * additional vfs_getattr() is unavoidable.
+ */
+ if (metacopy_blocks &&
+ realpath.dentry == ovl_dentry_lowerdata(dentry)) {
+ stat->blocks = lowerstat.blocks;
+ metacopy_blocks = false;
+ }
+ }
+
+ if (metacopy_blocks) {
+ /*
+ * If lower is not same as lowerdata or if there was
+ * no origin on upper, we can end up here.
+ */
+ struct kstat lowerdatastat;
+ u32 lowermask = STATX_BLOCKS;
+
+ ovl_path_lowerdata(dentry, &realpath);
+ err = vfs_getattr(&realpath, &lowerdatastat,
+ lowermask, flags);
+ if (err)
+ goto out;
+ stat->blocks = lowerdatastat.blocks;
}
}
@@ -304,6 +356,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
}
revert_creds(old_cred);
+ /* copy c/mtime */
+ ovl_copyattr(d_inode(realdentry), inode);
+
out_drop_write:
ovl_drop_write(dentry);
out:
@@ -384,38 +439,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
return acl;
}
-static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
-{
- /* Copy up of disconnected dentry does not set upper alias */
- if (ovl_dentry_upper(dentry) &&
- (ovl_dentry_has_upper_alias(dentry) ||
- (dentry->d_flags & DCACHE_DISCONNECTED)))
- return false;
-
- if (special_file(d_inode(dentry)->i_mode))
- return false;
-
- if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC))
- return false;
-
- return true;
-}
-
-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
-{
- int err = 0;
-
- if (ovl_open_need_copy_up(dentry, file_flags)) {
- err = ovl_want_write(dentry);
- if (!err) {
- err = ovl_copy_up_flags(dentry, file_flags);
- ovl_drop_write(dentry);
- }
- }
-
- return err;
-}
-
int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
{
if (flags & S_ATIME) {
@@ -433,6 +456,23 @@ int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
return 0;
}
+static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
+{
+ int err;
+ struct inode *realinode = ovl_inode_real(inode);
+ const struct cred *old_cred;
+
+ if (!realinode->i_op->fiemap)
+ return -EOPNOTSUPP;
+
+ old_cred = ovl_override_creds(inode->i_sb);
+ err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
+ revert_creds(old_cred);
+
+ return err;
+}
+
static const struct inode_operations ovl_file_inode_operations = {
.setattr = ovl_setattr,
.permission = ovl_permission,
@@ -440,6 +480,7 @@ static const struct inode_operations ovl_file_inode_operations = {
.listxattr = ovl_listxattr,
.get_acl = ovl_get_acl,
.update_time = ovl_update_time,
+ .fiemap = ovl_fiemap,
};
static const struct inode_operations ovl_symlink_inode_operations = {
@@ -450,6 +491,15 @@ static const struct inode_operations ovl_symlink_inode_operations = {
.update_time = ovl_update_time,
};
+static const struct inode_operations ovl_special_inode_operations = {
+ .setattr = ovl_setattr,
+ .permission = ovl_permission,
+ .getattr = ovl_getattr,
+ .listxattr = ovl_listxattr,
+ .get_acl = ovl_get_acl,
+ .update_time = ovl_update_time,
+};
+
/*
* It is possible to stack overlayfs instance on top of another
* overlayfs instance as lower layer. We need to annonate the
@@ -520,6 +570,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
switch (mode & S_IFMT) {
case S_IFREG:
inode->i_op = &ovl_file_inode_operations;
+ inode->i_fop = &ovl_file_operations;
break;
case S_IFDIR:
@@ -532,7 +583,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
break;
default:
- inode->i_op = &ovl_file_inode_operations;
+ inode->i_op = &ovl_special_inode_operations;
init_special_inode(inode, mode, rdev);
break;
}
@@ -769,8 +820,9 @@ struct inode *ovl_get_inode(struct super_block *sb,
bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
oip->index);
int fsid = bylower ? oip->lowerpath->layer->fsid : 0;
- bool is_dir;
+ bool is_dir, metacopy = false;
unsigned long ino = 0;
+ int err = -ENOMEM;
if (!realinode)
realinode = d_inode(lowerdentry);
@@ -787,7 +839,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
inode = ovl_iget5(sb, oip->newinode, key);
if (!inode)
- goto out_nomem;
+ goto out_err;
if (!(inode->i_state & I_NEW)) {
/*
* Verify that the underlying files stored in the inode
@@ -796,11 +848,12 @@ struct inode *ovl_get_inode(struct super_block *sb,
if (!ovl_verify_inode(inode, lowerdentry, upperdentry,
true)) {
iput(inode);
- inode = ERR_PTR(-ESTALE);
- goto out;
+ err = -ESTALE;
+ goto out_err;
}
dput(upperdentry);
+ kfree(oip->redirect);
goto out;
}
@@ -812,11 +865,13 @@ struct inode *ovl_get_inode(struct super_block *sb,
} else {
/* Lower hardlink that will be broken on copy up */
inode = new_inode(sb);
- if (!inode)
- goto out_nomem;
+ if (!inode) {
+ err = -ENOMEM;
+ goto out_err;
+ }
}
ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev, ino, fsid);
- ovl_inode_init(inode, upperdentry, lowerdentry);
+ ovl_inode_init(inode, upperdentry, lowerdentry, oip->lowerdata);
if (upperdentry && ovl_is_impuredir(upperdentry))
ovl_set_flag(OVL_IMPURE, inode);
@@ -824,6 +879,20 @@ struct inode *ovl_get_inode(struct super_block *sb,
if (oip->index)
ovl_set_flag(OVL_INDEX, inode);
+ if (upperdentry) {
+ err = ovl_check_metacopy_xattr(upperdentry);
+ if (err < 0)
+ goto out_err;
+ metacopy = err;
+ if (!metacopy)
+ ovl_set_flag(OVL_UPPERDATA, inode);
+ }
+
+ OVL_I(inode)->redirect = oip->redirect;
+
+ if (bylower)
+ ovl_set_flag(OVL_CONST_INO, inode);
+
/* Check for non-merge dir that may have whiteouts */
if (is_dir) {
if (((upperdentry && lowerdentry) || oip->numlower > 1) ||
@@ -837,7 +906,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
out:
return inode;
-out_nomem:
- inode = ERR_PTR(-ENOMEM);
+out_err:
+ inode = ERR_PTR(err);
goto out;
}
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index c993dd8db739..f28711846dd6 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -24,38 +24,20 @@ struct ovl_lookup_data {
bool stop;
bool last;
char *redirect;
+ bool metacopy;
};
static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
size_t prelen, const char *post)
{
int res;
- char *s, *next, *buf = NULL;
+ char *buf;
- res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, NULL, 0);
- if (res < 0) {
- if (res == -ENODATA || res == -EOPNOTSUPP)
- return 0;
- goto fail;
- }
- buf = kzalloc(prelen + res + strlen(post) + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ buf = ovl_get_redirect_xattr(dentry, prelen + strlen(post));
+ if (IS_ERR_OR_NULL(buf))
+ return PTR_ERR(buf);
- if (res == 0)
- goto invalid;
-
- res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, buf, res);
- if (res < 0)
- goto fail;
- if (res == 0)
- goto invalid;
if (buf[0] == '/') {
- for (s = buf; *s++ == '/'; s = next) {
- next = strchrnul(s, '/');
- if (s == next)
- goto invalid;
- }
/*
* One of the ancestor path elements in an absolute path
* lookup in ovl_lookup_layer() could have been opaque and
@@ -66,9 +48,7 @@ static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
*/
d->stop = false;
} else {
- if (strchr(buf, '/') != NULL)
- goto invalid;
-
+ res = strlen(buf) + 1;
memmove(buf + prelen, buf, res);
memcpy(buf, d->name.name, prelen);
}
@@ -80,16 +60,6 @@ static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
d->name.len = strlen(d->redirect);
return 0;
-
-err_free:
- kfree(buf);
- return 0;
-fail:
- pr_warn_ratelimited("overlayfs: failed to get redirect (%i)\n", res);
- goto err_free;
-invalid:
- pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
- goto err_free;
}
static int ovl_acceptable(void *ctx, struct dentry *dentry)
@@ -252,28 +222,39 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
d->stop = d->opaque = true;
goto put_and_out;
}
- if (!d_can_lookup(this)) {
+ /*
+ * This dentry should be a regular file if previous layer lookup
+ * found a metacopy dentry.
+ */
+ if (last_element && d->metacopy && !d_is_reg(this)) {
d->stop = true;
- if (d->is_dir)
- goto put_and_out;
-
- /*
- * NB: handle failure to lookup non-last element when non-dir
- * redirects become possible
- */
- WARN_ON(!last_element);
- goto out;
+ goto put_and_out;
}
- if (last_element)
- d->is_dir = true;
- if (d->last)
- goto out;
+ if (!d_can_lookup(this)) {
+ if (d->is_dir || !last_element) {
+ d->stop = true;
+ goto put_and_out;
+ }
+ err = ovl_check_metacopy_xattr(this);
+ if (err < 0)
+ goto out_err;
- if (ovl_is_opaquedir(this)) {
- d->stop = true;
+ d->metacopy = err;
+ d->stop = !d->metacopy;
+ if (!d->metacopy || d->last)
+ goto out;
+ } else {
if (last_element)
- d->opaque = true;
- goto out;
+ d->is_dir = true;
+ if (d->last)
+ goto out;
+
+ if (ovl_is_opaquedir(this)) {
+ d->stop = true;
+ if (last_element)
+ d->opaque = true;
+ goto out;
+ }
}
err = ovl_check_redirect(this, d, prelen, post);
if (err)
@@ -823,7 +804,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
struct ovl_entry *poe = dentry->d_parent->d_fsdata;
struct ovl_entry *roe = dentry->d_sb->s_root->d_fsdata;
- struct ovl_path *stack = NULL;
+ struct ovl_path *stack = NULL, *origin_path = NULL;
struct dentry *upperdir, *upperdentry = NULL;
struct dentry *origin = NULL;
struct dentry *index = NULL;
@@ -834,6 +815,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *this;
unsigned int i;
int err;
+ bool metacopy = false;
struct ovl_lookup_data d = {
.name = dentry->d_name,
.is_dir = false,
@@ -841,6 +823,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
.stop = false,
.last = ofs->config.redirect_follow ? false : !poe->numlower,
.redirect = NULL,
+ .metacopy = false,
};
if (dentry->d_name.len > ofs->namelen)
@@ -859,7 +842,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
if (upperdentry && !d.is_dir) {
- BUG_ON(!d.stop || d.redirect);
+ unsigned int origin_ctr = 0;
+
/*
* Lookup copy up origin by decoding origin file handle.
* We may get a disconnected dentry, which is fine,
@@ -870,9 +854,13 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
* number - it's the same as if we held a reference
* to a dentry in lower layer that was moved under us.
*/
- err = ovl_check_origin(ofs, upperdentry, &stack, &ctr);
+ err = ovl_check_origin(ofs, upperdentry, &origin_path,
+ &origin_ctr);
if (err)
goto out_put_upper;
+
+ if (d.metacopy)
+ metacopy = true;
}
if (d.redirect) {
@@ -913,7 +901,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
* If no origin fh is stored in upper of a merge dir, store fh
* of lower dir and set upper parent "impure".
*/
- if (upperdentry && !ctr && !ofs->noxattr) {
+ if (upperdentry && !ctr && !ofs->noxattr && d.is_dir) {
err = ovl_fix_origin(dentry, this, upperdentry);
if (err) {
dput(this);
@@ -925,18 +913,35 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
* When "verify_lower" feature is enabled, do not merge with a
* lower dir that does not match a stored origin xattr. In any
* case, only verified origin is used for index lookup.
+ *
+ * For non-dir dentry, if index=on, then ensure origin
+ * matches the dentry found using path based lookup,
+ * otherwise error out.
*/
- if (upperdentry && !ctr && ovl_verify_lower(dentry->d_sb)) {
+ if (upperdentry && !ctr &&
+ ((d.is_dir && ovl_verify_lower(dentry->d_sb)) ||
+ (!d.is_dir && ofs->config.index && origin_path))) {
err = ovl_verify_origin(upperdentry, this, false);
if (err) {
dput(this);
- break;
+ if (d.is_dir)
+ break;
+ goto out_put;
}
-
- /* Bless lower dir as verified origin */
origin = this;
}
+ if (d.metacopy)
+ metacopy = true;
+ /*
+ * Do not store intermediate metacopy dentries in chain,
+ * except top most lower metacopy dentry
+ */
+ if (d.metacopy && ctr) {
+ dput(this);
+ continue;
+ }
+
stack[ctr].dentry = this;
stack[ctr].layer = lower.layer;
ctr++;
@@ -968,13 +973,48 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
}
}
+ if (metacopy) {
+ /*
+ * Found a metacopy dentry but did not find corresponding
+ * data dentry
+ */
+ if (d.metacopy) {
+ err = -EIO;
+ goto out_put;
+ }
+
+ err = -EPERM;
+ if (!ofs->config.metacopy) {
+ pr_warn_ratelimited("overlay: refusing to follow metacopy origin for (%pd2)\n",
+ dentry);
+ goto out_put;
+ }
+ } else if (!d.is_dir && upperdentry && !ctr && origin_path) {
+ if (WARN_ON(stack != NULL)) {
+ err = -EIO;
+ goto out_put;
+ }
+ stack = origin_path;
+ ctr = 1;
+ origin_path = NULL;
+ }
+
/*
* Lookup index by lower inode and verify it matches upper inode.
* We only trust dir index if we verified that lower dir matches
* origin, otherwise dir index entries may be inconsistent and we
- * ignore them. Always lookup index of non-dir and non-upper.
+ * ignore them.
+ *
+ * For non-dir upper metacopy dentry, we already set "origin" if we
+ * verified that lower matched upper origin. If upper origin was
+ * not present (because lower layer did not support fh encode/decode),
+ * or indexing is not enabled, do not set "origin" and skip looking up
+ * index. This case should be handled in same way as a non-dir upper
+ * without ORIGIN is handled.
+ *
+ * Always lookup index of non-dir non-metacopy and non-upper.
*/
- if (ctr && (!upperdentry || !d.is_dir))
+ if (ctr && (!upperdentry || (!d.is_dir && !metacopy)))
origin = stack[0].dentry;
if (origin && ovl_indexdir(dentry->d_sb) &&
@@ -1000,8 +1040,15 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
if (upperdentry)
ovl_dentry_set_upper_alias(dentry);
- else if (index)
+ else if (index) {
upperdentry = dget(index);
+ upperredirect = ovl_get_redirect_xattr(upperdentry, 0);
+ if (IS_ERR(upperredirect)) {
+ err = PTR_ERR(upperredirect);
+ upperredirect = NULL;
+ goto out_free_oe;
+ }
+ }
if (upperdentry || ctr) {
struct ovl_inode_params oip = {
@@ -1009,22 +1056,22 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
.lowerpath = stack,
.index = index,
.numlower = ctr,
+ .redirect = upperredirect,
+ .lowerdata = (ctr > 1 && !d.is_dir) ?
+ stack[ctr - 1].dentry : NULL,
};
inode = ovl_get_inode(dentry->d_sb, &oip);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free_oe;
-
- /*
- * NB: handle redirected hard links when non-dir redirects
- * become possible
- */
- WARN_ON(OVL_I(inode)->redirect);
- OVL_I(inode)->redirect = upperredirect;
}
revert_creds(old_cred);
+ if (origin_path) {
+ dput(origin_path->dentry);
+ kfree(origin_path);
+ }
dput(index);
kfree(stack);
kfree(d.redirect);
@@ -1039,6 +1086,10 @@ out_put:
dput(stack[i].dentry);
kfree(stack);
out_put_upper:
+ if (origin_path) {
+ dput(origin_path->dentry);
+ kfree(origin_path);
+ }
dput(upperdentry);
kfree(upperredirect);
out:
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 7538b9b56237..f61839e1054c 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/uuid.h>
+#include <linux/fs.h>
#include "ovl_entry.h"
enum ovl_path_type {
@@ -28,6 +29,7 @@ enum ovl_path_type {
#define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure"
#define OVL_XATTR_NLINK OVL_XATTR_PREFIX "nlink"
#define OVL_XATTR_UPPER OVL_XATTR_PREFIX "upper"
+#define OVL_XATTR_METACOPY OVL_XATTR_PREFIX "metacopy"
enum ovl_inode_flag {
/* Pure upper dir that may contain non pure upper entries */
@@ -35,6 +37,9 @@ enum ovl_inode_flag {
/* Non-merge dir that may contain whiteout entries */
OVL_WHITEOUTS,
OVL_INDEX,
+ OVL_UPPERDATA,
+ /* Inode number will remain constant over copy up. */
+ OVL_CONST_INO,
};
enum ovl_entry_flag {
@@ -190,6 +195,14 @@ static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode)
return ret;
}
+static inline bool ovl_open_flags_need_copy_up(int flags)
+{
+ if (!flags)
+ return false;
+
+ return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC));
+}
+
/* util.c */
int ovl_want_write(struct dentry *dentry);
void ovl_drop_write(struct dentry *dentry);
@@ -206,15 +219,19 @@ bool ovl_dentry_weird(struct dentry *dentry);
enum ovl_path_type ovl_path_type(struct dentry *dentry);
void ovl_path_upper(struct dentry *dentry, struct path *path);
void ovl_path_lower(struct dentry *dentry, struct path *path);
+void ovl_path_lowerdata(struct dentry *dentry, struct path *path);
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
struct dentry *ovl_dentry_upper(struct dentry *dentry);
struct dentry *ovl_dentry_lower(struct dentry *dentry);
+struct dentry *ovl_dentry_lowerdata(struct dentry *dentry);
struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
struct dentry *ovl_dentry_real(struct dentry *dentry);
struct dentry *ovl_i_dentry_upper(struct inode *inode);
struct inode *ovl_inode_upper(struct inode *inode);
struct inode *ovl_inode_lower(struct inode *inode);
+struct inode *ovl_inode_lowerdata(struct inode *inode);
struct inode *ovl_inode_real(struct inode *inode);
+struct inode *ovl_inode_realdata(struct inode *inode);
struct ovl_dir_cache *ovl_dir_cache(struct inode *inode);
void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache);
void ovl_dentry_set_flag(unsigned long flag, struct dentry *dentry);
@@ -225,18 +242,23 @@ bool ovl_dentry_is_whiteout(struct dentry *dentry);
void ovl_dentry_set_opaque(struct dentry *dentry);
bool ovl_dentry_has_upper_alias(struct dentry *dentry);
void ovl_dentry_set_upper_alias(struct dentry *dentry);
+bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags);
+bool ovl_dentry_needs_data_copy_up_locked(struct dentry *dentry, int flags);
+bool ovl_has_upperdata(struct inode *inode);
+void ovl_set_upperdata(struct inode *inode);
bool ovl_redirect_dir(struct super_block *sb);
const char *ovl_dentry_get_redirect(struct dentry *dentry);
void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
void ovl_inode_init(struct inode *inode, struct dentry *upperdentry,
- struct dentry *lowerdentry);
+ struct dentry *lowerdentry, struct dentry *lowerdata);
void ovl_inode_update(struct inode *inode, struct dentry *upperdentry);
-void ovl_dentry_version_inc(struct dentry *dentry, bool impurity);
+void ovl_dir_modified(struct dentry *dentry, bool impurity);
u64 ovl_dentry_version_get(struct dentry *dentry);
bool ovl_is_whiteout(struct dentry *dentry);
struct file *ovl_path_open(struct path *path, int flags);
-int ovl_copy_up_start(struct dentry *dentry);
+int ovl_copy_up_start(struct dentry *dentry, int flags);
void ovl_copy_up_end(struct dentry *dentry);
+bool ovl_already_copied_up(struct dentry *dentry, int flags);
bool ovl_check_origin_xattr(struct dentry *dentry);
bool ovl_check_dir_xattr(struct dentry *dentry, const char *name);
int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
@@ -252,6 +274,9 @@ bool ovl_need_index(struct dentry *dentry);
int ovl_nlink_start(struct dentry *dentry, bool *locked);
void ovl_nlink_end(struct dentry *dentry, bool locked);
int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
+int ovl_check_metacopy_xattr(struct dentry *dentry);
+bool ovl_is_metacopy_dentry(struct dentry *dentry);
+char *ovl_get_redirect_xattr(struct dentry *dentry, int padding);
static inline bool ovl_is_impuredir(struct dentry *dentry)
{
@@ -324,7 +349,6 @@ int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
void *value, size_t size);
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
struct posix_acl *ovl_get_acl(struct inode *inode, int type);
-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags);
bool ovl_is_private_xattr(const char *name);
@@ -334,6 +358,8 @@ struct ovl_inode_params {
struct ovl_path *lowerpath;
struct dentry *index;
unsigned int numlower;
+ char *redirect;
+ struct dentry *lowerdata;
};
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
@@ -348,6 +374,14 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
to->i_atime = from->i_atime;
to->i_mtime = from->i_mtime;
to->i_ctime = from->i_ctime;
+ i_size_write(to, i_size_read(from));
+}
+
+static inline void ovl_copyflags(struct inode *from, struct inode *to)
+{
+ unsigned int mask = S_SYNC | S_IMMUTABLE | S_APPEND | S_NOATIME;
+
+ inode_set_flags(to, from->i_flags & mask, mask);
}
/* dir.c */
@@ -368,9 +402,14 @@ struct dentry *ovl_create_real(struct inode *dir, struct dentry *newdentry,
int ovl_cleanup(struct inode *dir, struct dentry *dentry);
struct dentry *ovl_create_temp(struct dentry *workdir, struct ovl_cattr *attr);
+/* file.c */
+extern const struct file_operations ovl_file_operations;
+
/* copy_up.c */
int ovl_copy_up(struct dentry *dentry);
+int ovl_copy_up_with_data(struct dentry *dentry);
int ovl_copy_up_flags(struct dentry *dentry, int flags);
+int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
int ovl_copy_xattr(struct dentry *old, struct dentry *new);
int ovl_set_attr(struct dentry *upper, struct kstat *stat);
struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 41655a7d6894..ec237035333a 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -19,6 +19,7 @@ struct ovl_config {
bool index;
bool nfs_export;
int xino;
+ bool metacopy;
};
struct ovl_sb {
@@ -88,7 +89,10 @@ static inline struct ovl_entry *OVL_E(struct dentry *dentry)
}
struct ovl_inode {
- struct ovl_dir_cache *cache;
+ union {
+ struct ovl_dir_cache *cache; /* directory */
+ struct inode *lowerdata; /* regular file */
+ };
const char *redirect;
u64 version;
unsigned long flags;
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index ef1fe42ff7bb..cc8303a806b4 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -668,6 +668,21 @@ static int ovl_fill_real(struct dir_context *ctx, const char *name,
return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
}
+static bool ovl_is_impure_dir(struct file *file)
+{
+ struct ovl_dir_file *od = file->private_data;
+ struct inode *dir = d_inode(file->f_path.dentry);
+
+ /*
+ * Only upper dir can be impure, but if we are in the middle of
+ * iterating a lower real dir, dir could be copied up and marked
+ * impure. We only want the impure cache if we started iterating
+ * a real upper dir to begin with.
+ */
+ return od->is_upper && ovl_test_flag(OVL_IMPURE, dir);
+
+}
+
static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
{
int err;
@@ -696,7 +711,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
rdt.parent_ino = stat.ino;
}
- if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) {
+ if (ovl_is_impure_dir(file)) {
rdt.cache = ovl_cache_get_impure(&file->f_path);
if (IS_ERR(rdt.cache))
return PTR_ERR(rdt.cache);
@@ -727,7 +742,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
*/
if (ovl_xino_bits(dentry->d_sb) ||
(ovl_same_sb(dentry->d_sb) &&
- (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
+ (ovl_is_impure_dir(file) ||
OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
return ovl_iterate_real(file, ctx);
}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 704b37311467..2e0fc93c2c06 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -64,6 +64,11 @@ static void ovl_entry_stack_free(struct ovl_entry *oe)
dput(oe->lowerstack[i].dentry);
}
+static bool ovl_metacopy_def = IS_ENABLED(CONFIG_OVERLAY_FS_METACOPY);
+module_param_named(metacopy, ovl_metacopy_def, bool, 0644);
+MODULE_PARM_DESC(ovl_metacopy_def,
+ "Default to on or off for the metadata only copy up feature");
+
static void ovl_dentry_release(struct dentry *dentry)
{
struct ovl_entry *oe = dentry->d_fsdata;
@@ -74,31 +79,14 @@ static void ovl_dentry_release(struct dentry *dentry)
}
}
-static int ovl_check_append_only(struct inode *inode, int flag)
-{
- /*
- * This test was moot in vfs may_open() because overlay inode does
- * not have the S_APPEND flag, so re-check on real upper inode
- */
- if (IS_APPEND(inode)) {
- if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
- return -EPERM;
- if (flag & O_TRUNC)
- return -EPERM;
- }
-
- return 0;
-}
-
static struct dentry *ovl_d_real(struct dentry *dentry,
- const struct inode *inode,
- unsigned int open_flags, unsigned int flags)
+ const struct inode *inode)
{
struct dentry *real;
- int err;
- if (flags & D_REAL_UPPER)
- return ovl_dentry_upper(dentry);
+ /* It's an overlay file */
+ if (inode && d_inode(dentry) == inode)
+ return dentry;
if (!d_is_reg(dentry)) {
if (!inode || inode == d_inode(dentry))
@@ -106,28 +94,19 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
goto bug;
}
- if (open_flags) {
- err = ovl_open_maybe_copy_up(dentry, open_flags);
- if (err)
- return ERR_PTR(err);
- }
-
real = ovl_dentry_upper(dentry);
- if (real && (!inode || inode == d_inode(real))) {
- if (!inode) {
- err = ovl_check_append_only(d_inode(real), open_flags);
- if (err)
- return ERR_PTR(err);
- }
+ if (real && (inode == d_inode(real)))
+ return real;
+
+ if (real && !inode && ovl_has_upperdata(d_inode(dentry)))
return real;
- }
- real = ovl_dentry_lower(dentry);
+ real = ovl_dentry_lowerdata(dentry);
if (!real)
goto bug;
/* Handle recursion */
- real = d_real(real, inode, open_flags, 0);
+ real = d_real(real, inode);
if (!inode || inode == d_inode(real))
return real;
@@ -205,6 +184,7 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
oi->flags = 0;
oi->__upperdentry = NULL;
oi->lower = NULL;
+ oi->lowerdata = NULL;
mutex_init(&oi->lock);
return &oi->vfs_inode;
@@ -223,8 +203,11 @@ static void ovl_destroy_inode(struct inode *inode)
dput(oi->__upperdentry);
iput(oi->lower);
+ if (S_ISDIR(inode->i_mode))
+ ovl_dir_cache_free(inode);
+ else
+ iput(oi->lowerdata);
kfree(oi->redirect);
- ovl_dir_cache_free(inode);
mutex_destroy(&oi->lock);
call_rcu(&inode->i_rcu, ovl_i_callback);
@@ -376,6 +359,9 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
"on" : "off");
if (ofs->config.xino != ovl_xino_def())
seq_printf(m, ",xino=%s", ovl_xino_str[ofs->config.xino]);
+ if (ofs->config.metacopy != ovl_metacopy_def)
+ seq_printf(m, ",metacopy=%s",
+ ofs->config.metacopy ? "on" : "off");
return 0;
}
@@ -413,6 +399,8 @@ enum {
OPT_XINO_ON,
OPT_XINO_OFF,
OPT_XINO_AUTO,
+ OPT_METACOPY_ON,
+ OPT_METACOPY_OFF,
OPT_ERR,
};
@@ -429,6 +417,8 @@ static const match_table_t ovl_tokens = {
{OPT_XINO_ON, "xino=on"},
{OPT_XINO_OFF, "xino=off"},
{OPT_XINO_AUTO, "xino=auto"},
+ {OPT_METACOPY_ON, "metacopy=on"},
+ {OPT_METACOPY_OFF, "metacopy=off"},
{OPT_ERR, NULL}
};
@@ -481,6 +471,7 @@ static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode)
static int ovl_parse_opt(char *opt, struct ovl_config *config)
{
char *p;
+ int err;
config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
if (!config->redirect_mode)
@@ -555,6 +546,14 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
config->xino = OVL_XINO_AUTO;
break;
+ case OPT_METACOPY_ON:
+ config->metacopy = true;
+ break;
+
+ case OPT_METACOPY_OFF:
+ config->metacopy = false;
+ break;
+
default:
pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
return -EINVAL;
@@ -569,7 +568,20 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
config->workdir = NULL;
}
- return ovl_parse_redirect_mode(config, config->redirect_mode);
+ err = ovl_parse_redirect_mode(config, config->redirect_mode);
+ if (err)
+ return err;
+
+ /* metacopy feature with upper requires redirect_dir=on */
+ if (config->upperdir && config->metacopy && !config->redirect_dir) {
+ pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=on\", falling back to metacopy=off.\n");
+ config->metacopy = false;
+ } else if (config->metacopy && !config->redirect_follow) {
+ pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=follow\" on non-upper mount, falling back to metacopy=off.\n");
+ config->metacopy = false;
+ }
+
+ return 0;
}
#define OVL_WORKDIR_NAME "work"
@@ -1042,7 +1054,8 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
if (err) {
ofs->noxattr = true;
ofs->config.index = false;
- pr_warn("overlayfs: upper fs does not support xattr, falling back to index=off.\n");
+ ofs->config.metacopy = false;
+ pr_warn("overlayfs: upper fs does not support xattr, falling back to index=off and metacopy=off.\n");
err = 0;
} else {
vfs_removexattr(ofs->workdir, OVL_XATTR_OPAQUE);
@@ -1064,7 +1077,6 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
pr_warn("overlayfs: NFS export requires \"index=on\", falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
-
out:
mnt_drop_write(mnt);
return err;
@@ -1375,6 +1387,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
ofs->config.index = ovl_index_def;
ofs->config.nfs_export = ovl_nfs_export_def;
ofs->config.xino = ovl_xino_def();
+ ofs->config.metacopy = ovl_metacopy_def;
err = ovl_parse_opt((char *) data, &ofs->config);
if (err)
goto out_err;
@@ -1445,6 +1458,11 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
}
}
+ if (ofs->config.metacopy && ofs->config.nfs_export) {
+ pr_warn("overlayfs: NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n");
+ ofs->config.nfs_export = false;
+ }
+
if (ofs->config.nfs_export)
sb->s_export_op = &ovl_export_operations;
@@ -1455,7 +1473,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &ovl_super_operations;
sb->s_xattr = ovl_xattr_handlers;
sb->s_fs_info = ofs;
- sb->s_flags |= SB_POSIXACL | SB_NOREMOTELOCK;
+ sb->s_flags |= SB_POSIXACL;
err = -ENOMEM;
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
@@ -1474,8 +1492,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
/* Root is always merge -> can have whiteouts */
ovl_set_flag(OVL_WHITEOUTS, d_inode(root_dentry));
ovl_dentry_set_flag(OVL_E_CONNECTED, root_dentry);
+ ovl_set_upperdata(d_inode(root_dentry));
ovl_inode_init(d_inode(root_dentry), upperpath.dentry,
- ovl_dentry_lower(root_dentry));
+ ovl_dentry_lower(root_dentry), NULL);
sb->s_root = root_dentry;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 6f1078028c66..8cfb62cc8672 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -133,8 +133,10 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
* Non-dir dentry can hold lower dentry of its copy up origin.
*/
if (oe->numlower) {
- type |= __OVL_PATH_ORIGIN;
- if (d_is_dir(dentry))
+ if (ovl_test_flag(OVL_CONST_INO, d_inode(dentry)))
+ type |= __OVL_PATH_ORIGIN;
+ if (d_is_dir(dentry) ||
+ !ovl_has_upperdata(d_inode(dentry)))
type |= __OVL_PATH_MERGE;
}
} else {
@@ -164,6 +166,18 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
}
}
+void ovl_path_lowerdata(struct dentry *dentry, struct path *path)
+{
+ struct ovl_entry *oe = dentry->d_fsdata;
+
+ if (oe->numlower) {
+ path->mnt = oe->lowerstack[oe->numlower - 1].layer->mnt;
+ path->dentry = oe->lowerstack[oe->numlower - 1].dentry;
+ } else {
+ *path = (struct path) { };
+ }
+}
+
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
{
enum ovl_path_type type = ovl_path_type(dentry);
@@ -195,6 +209,19 @@ struct ovl_layer *ovl_layer_lower(struct dentry *dentry)
return oe->numlower ? oe->lowerstack[0].layer : NULL;
}
+/*
+ * ovl_dentry_lower() could return either a data dentry or metacopy dentry
+ * dependig on what is stored in lowerstack[0]. At times we need to find
+ * lower dentry which has data (and not metacopy dentry). This helper
+ * returns the lower data dentry.
+ */
+struct dentry *ovl_dentry_lowerdata(struct dentry *dentry)
+{
+ struct ovl_entry *oe = dentry->d_fsdata;
+
+ return oe->numlower ? oe->lowerstack[oe->numlower - 1].dentry : NULL;
+}
+
struct dentry *ovl_dentry_real(struct dentry *dentry)
{
return ovl_dentry_upper(dentry) ?: ovl_dentry_lower(dentry);
@@ -222,6 +249,26 @@ struct inode *ovl_inode_real(struct inode *inode)
return ovl_inode_upper(inode) ?: ovl_inode_lower(inode);
}
+/* Return inode which contains lower data. Do not return metacopy */
+struct inode *ovl_inode_lowerdata(struct inode *inode)
+{
+ if (WARN_ON(!S_ISREG(inode->i_mode)))
+ return NULL;
+
+ return OVL_I(inode)->lowerdata ?: ovl_inode_lower(inode);
+}
+
+/* Return real inode which contains data. Does not return metacopy inode */
+struct inode *ovl_inode_realdata(struct inode *inode)
+{
+ struct inode *upperinode;
+
+ upperinode = ovl_inode_upper(inode);
+ if (upperinode && ovl_has_upperdata(inode))
+ return upperinode;
+
+ return ovl_inode_lowerdata(inode);
+}
struct ovl_dir_cache *ovl_dir_cache(struct inode *inode)
{
@@ -279,6 +326,62 @@ void ovl_dentry_set_upper_alias(struct dentry *dentry)
ovl_dentry_set_flag(OVL_E_UPPER_ALIAS, dentry);
}
+static bool ovl_should_check_upperdata(struct inode *inode)
+{
+ if (!S_ISREG(inode->i_mode))
+ return false;
+
+ if (!ovl_inode_lower(inode))
+ return false;
+
+ return true;
+}
+
+bool ovl_has_upperdata(struct inode *inode)
+{
+ if (!ovl_should_check_upperdata(inode))
+ return true;
+
+ if (!ovl_test_flag(OVL_UPPERDATA, inode))
+ return false;
+ /*
+ * Pairs with smp_wmb() in ovl_set_upperdata(). Main user of
+ * ovl_has_upperdata() is ovl_copy_up_meta_inode_data(). Make sure
+ * if setting of OVL_UPPERDATA is visible, then effects of writes
+ * before that are visible too.
+ */
+ smp_rmb();
+ return true;
+}
+
+void ovl_set_upperdata(struct inode *inode)
+{
+ /*
+ * Pairs with smp_rmb() in ovl_has_upperdata(). Make sure
+ * if OVL_UPPERDATA flag is visible, then effects of write operations
+ * before it are visible as well.
+ */
+ smp_wmb();
+ ovl_set_flag(OVL_UPPERDATA, inode);
+}
+
+/* Caller should hold ovl_inode->lock */
+bool ovl_dentry_needs_data_copy_up_locked(struct dentry *dentry, int flags)
+{
+ if (!ovl_open_flags_need_copy_up(flags))
+ return false;
+
+ return !ovl_test_flag(OVL_UPPERDATA, d_inode(dentry));
+}
+
+bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags)
+{
+ if (!ovl_open_flags_need_copy_up(flags))
+ return false;
+
+ return !ovl_has_upperdata(d_inode(dentry));
+}
+
bool ovl_redirect_dir(struct super_block *sb)
{
struct ovl_fs *ofs = sb->s_fs_info;
@@ -300,7 +403,7 @@ void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect)
}
void ovl_inode_init(struct inode *inode, struct dentry *upperdentry,
- struct dentry *lowerdentry)
+ struct dentry *lowerdentry, struct dentry *lowerdata)
{
struct inode *realinode = d_inode(upperdentry ?: lowerdentry);
@@ -308,8 +411,11 @@ void ovl_inode_init(struct inode *inode, struct dentry *upperdentry,
OVL_I(inode)->__upperdentry = upperdentry;
if (lowerdentry)
OVL_I(inode)->lower = igrab(d_inode(lowerdentry));
+ if (lowerdata)
+ OVL_I(inode)->lowerdata = igrab(d_inode(lowerdata));
ovl_copyattr(realinode, inode);
+ ovl_copyflags(realinode, inode);
if (!inode->i_ino)
inode->i_ino = realinode->i_ino;
}
@@ -333,7 +439,7 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
}
}
-void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
+static void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
{
struct inode *inode = d_inode(dentry);
@@ -348,6 +454,14 @@ void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
OVL_I(inode)->version++;
}
+void ovl_dir_modified(struct dentry *dentry, bool impurity)
+{
+ /* Copy mtime/ctime */
+ ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
+
+ ovl_dentry_version_inc(dentry, impurity);
+}
+
u64 ovl_dentry_version_get(struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
@@ -368,13 +482,51 @@ struct file *ovl_path_open(struct path *path, int flags)
return dentry_open(path, flags | O_NOATIME, current_cred());
}
-int ovl_copy_up_start(struct dentry *dentry)
+/* Caller should hold ovl_inode->lock */
+static bool ovl_already_copied_up_locked(struct dentry *dentry, int flags)
+{
+ bool disconnected = dentry->d_flags & DCACHE_DISCONNECTED;
+
+ if (ovl_dentry_upper(dentry) &&
+ (ovl_dentry_has_upper_alias(dentry) || disconnected) &&
+ !ovl_dentry_needs_data_copy_up_locked(dentry, flags))
+ return true;
+
+ return false;
+}
+
+bool ovl_already_copied_up(struct dentry *dentry, int flags)
+{
+ bool disconnected = dentry->d_flags & DCACHE_DISCONNECTED;
+
+ /*
+ * Check if copy-up has happened as well as for upper alias (in
+ * case of hard links) is there.
+ *
+ * Both checks are lockless:
+ * - false negatives: will recheck under oi->lock
+ * - false positives:
+ * + ovl_dentry_upper() uses memory barriers to ensure the
+ * upper dentry is up-to-date
+ * + ovl_dentry_has_upper_alias() relies on locking of
+ * upper parent i_rwsem to prevent reordering copy-up
+ * with rename.
+ */
+ if (ovl_dentry_upper(dentry) &&
+ (ovl_dentry_has_upper_alias(dentry) || disconnected) &&
+ !ovl_dentry_needs_data_copy_up(dentry, flags))
+ return true;
+
+ return false;
+}
+
+int ovl_copy_up_start(struct dentry *dentry, int flags)
{
struct ovl_inode *oi = OVL_I(d_inode(dentry));
int err;
err = mutex_lock_interruptible(&oi->lock);
- if (!err && ovl_dentry_has_upper_alias(dentry)) {
+ if (!err && ovl_already_copied_up_locked(dentry, flags)) {
err = 1; /* Already copied up */
mutex_unlock(&oi->lock);
}
@@ -675,3 +827,91 @@ err:
pr_err("overlayfs: failed to lock workdir+upperdir\n");
return -EIO;
}
+
+/* err < 0, 0 if no metacopy xattr, 1 if metacopy xattr found */
+int ovl_check_metacopy_xattr(struct dentry *dentry)
+{
+ int res;
+
+ /* Only regular files can have metacopy xattr */
+ if (!S_ISREG(d_inode(dentry)->i_mode))
+ return 0;
+
+ res = vfs_getxattr(dentry, OVL_XATTR_METACOPY, NULL, 0);
+ if (res < 0) {
+ if (res == -ENODATA || res == -EOPNOTSUPP)
+ return 0;
+ goto out;
+ }
+
+ return 1;
+out:
+ pr_warn_ratelimited("overlayfs: failed to get metacopy (%i)\n", res);
+ return res;
+}
+
+bool ovl_is_metacopy_dentry(struct dentry *dentry)
+{
+ struct ovl_entry *oe = dentry->d_fsdata;
+
+ if (!d_is_reg(dentry))
+ return false;
+
+ if (ovl_dentry_upper(dentry)) {
+ if (!ovl_has_upperdata(d_inode(dentry)))
+ return true;
+ return false;
+ }
+
+ return (oe->numlower > 1);
+}
+
+char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
+{
+ int res;
+ char *s, *next, *buf = NULL;
+
+ res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, NULL, 0);
+ if (res < 0) {
+ if (res == -ENODATA || res == -EOPNOTSUPP)
+ return NULL;
+ goto fail;
+ }
+
+ buf = kzalloc(res + padding + 1, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ if (res == 0)
+ goto invalid;
+
+ res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, buf, res);
+ if (res < 0)
+ goto fail;
+ if (res == 0)
+ goto invalid;
+
+ if (buf[0] == '/') {
+ for (s = buf; *s++ == '/'; s = next) {
+ next = strchrnul(s, '/');
+ if (s == next)
+ goto invalid;
+ }
+ } else {
+ if (strchr(buf, '/') != NULL)
+ goto invalid;
+ }
+
+ return buf;
+
+err_free:
+ kfree(buf);
+ return ERR_PTR(res);
+fail:
+ pr_warn_ratelimited("overlayfs: failed to get redirect (%i)\n", res);
+ goto err_free;
+invalid:
+ pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
+ res = -EINVAL;
+ goto err_free;
+}
diff --git a/fs/pipe.c b/fs/pipe.c
index 39d6f431da83..bdc5d3c0977d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -741,54 +741,33 @@ fail_inode:
int create_pipe_files(struct file **res, int flags)
{
- int err;
struct inode *inode = get_pipe_inode();
struct file *f;
- struct path path;
if (!inode)
return -ENFILE;
- err = -ENOMEM;
- path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &empty_name);
- if (!path.dentry)
- goto err_inode;
- path.mnt = mntget(pipe_mnt);
-
- d_instantiate(path.dentry, inode);
-
- f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
+ f = alloc_file_pseudo(inode, pipe_mnt, "",
+ O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
+ &pipefifo_fops);
if (IS_ERR(f)) {
- err = PTR_ERR(f);
- goto err_dentry;
+ free_pipe_info(inode->i_pipe);
+ iput(inode);
+ return PTR_ERR(f);
}
- f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
f->private_data = inode->i_pipe;
- res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
+ res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
+ &pipefifo_fops);
if (IS_ERR(res[0])) {
- err = PTR_ERR(res[0]);
- goto err_file;
+ put_pipe_info(inode, inode->i_pipe);
+ fput(f);
+ return PTR_ERR(res[0]);
}
-
- path_get(&path);
res[0]->private_data = inode->i_pipe;
- res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
res[1] = f;
return 0;
-
-err_file:
- put_filp(f);
-err_dentry:
- free_pipe_info(inode->i_pipe);
- path_put(&path);
- return err;
-
-err_inode:
- free_pipe_info(inode->i_pipe);
- iput(inode);
- return err;
}
static int __do_pipe_flags(int *fd, struct file **files, int flags)
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 0eaeb41453f5..817c02b13b1d 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -31,6 +31,7 @@ config PROC_FS
config PROC_KCORE
bool "/proc/kcore support" if !ARM
depends on PROC_FS && MMU
+ select CRASH_CORE
help
Provides a virtual ELF core file of the live kernel. This can
be read with gdb and other ELF tools. No modifications can be
diff --git a/fs/proc/base.c b/fs/proc/base.c
index aaffc0c30216..ccf86f16d9f0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -463,7 +463,7 @@ static int lstats_show_proc(struct seq_file *m, void *v)
if (!task)
return -ESRCH;
seq_puts(m, "Latency Top version : v0.1\n");
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < LT_SAVECOUNT; i++) {
struct latency_record *lr = &task->latency_record[i];
if (lr->backtrace[0]) {
int q;
@@ -1366,10 +1366,8 @@ static ssize_t proc_fail_nth_read(struct file *file, char __user *buf,
if (!task)
return -ESRCH;
len = snprintf(numbuf, sizeof(numbuf), "%u\n", task->fail_nth);
- len = simple_read_from_buffer(buf, count, ppos, numbuf, len);
put_task_struct(task);
-
- return len;
+ return simple_read_from_buffer(buf, count, ppos, numbuf, len);
}
static const struct file_operations proc_fail_nth_operations = {
@@ -2519,47 +2517,47 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
struct inode * inode = file_inode(file);
+ struct task_struct *task;
void *page;
- ssize_t length;
- struct task_struct *task = get_proc_task(inode);
-
- length = -ESRCH;
- if (!task)
- goto out_no_task;
+ int rv;
+ rcu_read_lock();
+ task = pid_task(proc_pid(inode), PIDTYPE_PID);
+ if (!task) {
+ rcu_read_unlock();
+ return -ESRCH;
+ }
/* A task may only write its own attributes. */
- length = -EACCES;
- if (current != task)
- goto out;
+ if (current != task) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
+ rcu_read_unlock();
if (count > PAGE_SIZE)
count = PAGE_SIZE;
/* No partial writes. */
- length = -EINVAL;
if (*ppos != 0)
- goto out;
+ return -EINVAL;
page = memdup_user(buf, count);
if (IS_ERR(page)) {
- length = PTR_ERR(page);
+ rv = PTR_ERR(page);
goto out;
}
/* Guard against adverse ptrace interaction */
- length = mutex_lock_interruptible(&current->signal->cred_guard_mutex);
- if (length < 0)
+ rv = mutex_lock_interruptible(&current->signal->cred_guard_mutex);
+ if (rv < 0)
goto out_free;
- length = security_setprocattr(file->f_path.dentry->d_name.name,
- page, count);
+ rv = security_setprocattr(file->f_path.dentry->d_name.name, page, count);
mutex_unlock(&current->signal->cred_guard_mutex);
out_free:
kfree(page);
out:
- put_task_struct(task);
-out_no_task:
- return length;
+ return rv;
}
static const struct file_operations proc_pid_attr_operations = {
@@ -3309,12 +3307,12 @@ static const struct pid_entry tid_base_stuff[] = {
REG("cmdline", S_IRUGO, proc_pid_cmdline_ops),
ONE("stat", S_IRUGO, proc_tid_stat),
ONE("statm", S_IRUGO, proc_pid_statm),
- REG("maps", S_IRUGO, proc_tid_maps_operations),
+ REG("maps", S_IRUGO, proc_pid_maps_operations),
#ifdef CONFIG_PROC_CHILDREN
REG("children", S_IRUGO, proc_tid_children_operations),
#endif
#ifdef CONFIG_NUMA
- REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
+ REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations),
#endif
REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations),
LNK("cwd", proc_cwd_link),
@@ -3324,7 +3322,7 @@ static const struct pid_entry tid_base_stuff[] = {
REG("mountinfo", S_IRUGO, proc_mountinfo_operations),
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
- REG("smaps", S_IRUGO, proc_tid_smaps_operations),
+ REG("smaps", S_IRUGO, proc_pid_smaps_operations),
REG("smaps_rollup", S_IRUGO, proc_pid_smaps_rollup_operations),
REG("pagemap", S_IRUSR, proc_pagemap_operations),
#endif
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index bb1c1625b158..8ae109429a88 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -286,9 +286,9 @@ int proc_readdir_de(struct file *file, struct dir_context *ctx,
if (!dir_emit_dots(file, ctx))
return 0;
+ i = ctx->pos - 2;
read_lock(&proc_subdir_lock);
de = pde_subdir_first(de);
- i = ctx->pos - 2;
for (;;) {
if (!de) {
read_unlock(&proc_subdir_lock);
@@ -309,8 +309,8 @@ int proc_readdir_de(struct file *file, struct dir_context *ctx,
pde_put(de);
return 0;
}
- read_lock(&proc_subdir_lock);
ctx->pos++;
+ read_lock(&proc_subdir_lock);
next = pde_subdir_next(de);
pde_put(de);
de = next;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 85ffbd27f288..fc5306a31a1d 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -105,8 +105,10 @@ void __init proc_init_kmemcache(void)
kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0,
SLAB_ACCOUNT|SLAB_PANIC, NULL);
proc_dir_entry_cache = kmem_cache_create_usercopy(
- "proc_dir_entry", SIZEOF_PDE_SLOT, 0, SLAB_PANIC,
- OFFSETOF_PDE_NAME, SIZEOF_PDE_INLINE_NAME, NULL);
+ "proc_dir_entry", SIZEOF_PDE, 0, SLAB_PANIC,
+ offsetof(struct proc_dir_entry, inline_name),
+ SIZEOF_PDE_INLINE_NAME, NULL);
+ BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE);
}
static int proc_show_options(struct seq_file *seq, struct dentry *root)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index da3dbfa09e79..5185d7f6a51e 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -65,16 +65,13 @@ struct proc_dir_entry {
char inline_name[];
} __randomize_layout;
-#define OFFSETOF_PDE_NAME offsetof(struct proc_dir_entry, inline_name)
-#define SIZEOF_PDE_SLOT \
- (OFFSETOF_PDE_NAME + 34 <= 64 ? 64 : \
- OFFSETOF_PDE_NAME + 34 <= 128 ? 128 : \
- OFFSETOF_PDE_NAME + 34 <= 192 ? 192 : \
- OFFSETOF_PDE_NAME + 34 <= 256 ? 256 : \
- OFFSETOF_PDE_NAME + 34 <= 512 ? 512 : \
- 0)
-
-#define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE_SLOT - OFFSETOF_PDE_NAME)
+#define SIZEOF_PDE ( \
+ sizeof(struct proc_dir_entry) < 128 ? 128 : \
+ sizeof(struct proc_dir_entry) < 192 ? 192 : \
+ sizeof(struct proc_dir_entry) < 256 ? 256 : \
+ sizeof(struct proc_dir_entry) < 512 ? 512 : \
+ 0)
+#define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry))
extern struct kmem_cache *proc_dir_entry_cache;
void pde_free(struct proc_dir_entry *pde);
@@ -116,12 +113,12 @@ static inline void *__PDE_DATA(const struct inode *inode)
return PDE(inode)->data;
}
-static inline struct pid *proc_pid(struct inode *inode)
+static inline struct pid *proc_pid(const struct inode *inode)
{
return PROC_I(inode)->pid;
}
-static inline struct task_struct *get_proc_task(struct inode *inode)
+static inline struct task_struct *get_proc_task(const struct inode *inode)
{
return get_pid_task(proc_pid(inode), PIDTYPE_PID);
}
@@ -285,7 +282,6 @@ struct proc_maps_private {
struct inode *inode;
struct task_struct *task;
struct mm_struct *mm;
- struct mem_size_stats *rollup;
#ifdef CONFIG_MMU
struct vm_area_struct *tail_vma;
#endif
@@ -297,12 +293,9 @@ struct proc_maps_private {
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
extern const struct file_operations proc_pid_maps_operations;
-extern const struct file_operations proc_tid_maps_operations;
extern const struct file_operations proc_pid_numa_maps_operations;
-extern const struct file_operations proc_tid_numa_maps_operations;
extern const struct file_operations proc_pid_smaps_operations;
extern const struct file_operations proc_pid_smaps_rollup_operations;
-extern const struct file_operations proc_tid_smaps_operations;
extern const struct file_operations proc_clear_refs_operations;
extern const struct file_operations proc_pagemap_operations;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e64ecb9f2720..ad72261ee3fe 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -10,6 +10,7 @@
* Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
*/
+#include <linux/crash_core.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/kcore.h>
@@ -49,32 +50,23 @@ static struct proc_dir_entry *proc_root_kcore;
#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
#endif
-/* An ELF note in memory */
-struct memelfnote
-{
- const char *name;
- int type;
- unsigned int datasz;
- void *data;
-};
-
static LIST_HEAD(kclist_head);
-static DEFINE_RWLOCK(kclist_lock);
+static DECLARE_RWSEM(kclist_lock);
static int kcore_need_update = 1;
-void
-kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
+/* This doesn't grab kclist_lock, so it should only be used at init time. */
+void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
+ int type)
{
new->addr = (unsigned long)addr;
new->size = size;
new->type = type;
- write_lock(&kclist_lock);
list_add_tail(&new->list, &kclist_head);
- write_unlock(&kclist_lock);
}
-static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
+static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
+ size_t *data_offset)
{
size_t try, size;
struct kcore_list *m;
@@ -88,53 +80,19 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
size = try;
*nphdr = *nphdr + 1;
}
- *elf_buflen = sizeof(struct elfhdr) +
- (*nphdr + 2)*sizeof(struct elf_phdr) +
- 3 * ((sizeof(struct elf_note)) +
- roundup(sizeof(CORE_STR), 4)) +
- roundup(sizeof(struct elf_prstatus), 4) +
- roundup(sizeof(struct elf_prpsinfo), 4) +
- roundup(arch_task_struct_size, 4);
- *elf_buflen = PAGE_ALIGN(*elf_buflen);
- return size + *elf_buflen;
-}
-
-static void free_kclist_ents(struct list_head *head)
-{
- struct kcore_list *tmp, *pos;
- list_for_each_entry_safe(pos, tmp, head, list) {
- list_del(&pos->list);
- kfree(pos);
- }
+ *phdrs_len = *nphdr * sizeof(struct elf_phdr);
+ *notes_len = (4 * sizeof(struct elf_note) +
+ 3 * ALIGN(sizeof(CORE_STR), 4) +
+ VMCOREINFO_NOTE_NAME_BYTES +
+ ALIGN(sizeof(struct elf_prstatus), 4) +
+ ALIGN(sizeof(struct elf_prpsinfo), 4) +
+ ALIGN(arch_task_struct_size, 4) +
+ ALIGN(vmcoreinfo_size, 4));
+ *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
+ *notes_len);
+ return *data_offset + size;
}
-/*
- * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
- */
-static void __kcore_update_ram(struct list_head *list)
-{
- int nphdr;
- size_t size;
- struct kcore_list *tmp, *pos;
- LIST_HEAD(garbage);
-
- write_lock(&kclist_lock);
- if (kcore_need_update) {
- list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
- if (pos->type == KCORE_RAM
- || pos->type == KCORE_VMEMMAP)
- list_move(&pos->list, &garbage);
- }
- list_splice_tail(list, &kclist_head);
- } else
- list_splice(list, &garbage);
- kcore_need_update = 0;
- proc_root_kcore->size = get_kcore_size(&nphdr, &size);
- write_unlock(&kclist_lock);
-
- free_kclist_ents(&garbage);
-}
-
#ifdef CONFIG_HIGHMEM
/*
@@ -142,11 +100,9 @@ static void __kcore_update_ram(struct list_head *list)
* because memory hole is not as big as !HIGHMEM case.
* (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
*/
-static int kcore_update_ram(void)
+static int kcore_ram_list(struct list_head *head)
{
- LIST_HEAD(head);
struct kcore_list *ent;
- int ret = 0;
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
@@ -154,9 +110,8 @@ static int kcore_update_ram(void)
ent->addr = (unsigned long)__va(0);
ent->size = max_low_pfn << PAGE_SHIFT;
ent->type = KCORE_RAM;
- list_add(&ent->list, &head);
- __kcore_update_ram(&head);
- return ret;
+ list_add(&ent->list, head);
+ return 0;
}
#else /* !CONFIG_HIGHMEM */
@@ -255,11 +210,10 @@ free_out:
return 1;
}
-static int kcore_update_ram(void)
+static int kcore_ram_list(struct list_head *list)
{
int nid, ret;
unsigned long end_pfn;
- LIST_HEAD(head);
/* Not inialized....update now */
/* find out "max pfn" */
@@ -271,258 +225,258 @@ static int kcore_update_ram(void)
end_pfn = node_end;
}
/* scan 0 to max_pfn */
- ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
- if (ret) {
- free_kclist_ents(&head);
+ ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
+ if (ret)
return -ENOMEM;
- }
- __kcore_update_ram(&head);
- return ret;
+ return 0;
}
#endif /* CONFIG_HIGHMEM */
-/*****************************************************************************/
-/*
- * determine size of ELF note
- */
-static int notesize(struct memelfnote *en)
-{
- int sz;
-
- sz = sizeof(struct elf_note);
- sz += roundup((strlen(en->name) + 1), 4);
- sz += roundup(en->datasz, 4);
-
- return sz;
-} /* end notesize() */
-
-/*****************************************************************************/
-/*
- * store a note in the header buffer
- */
-static char *storenote(struct memelfnote *men, char *bufp)
+static int kcore_update_ram(void)
{
- struct elf_note en;
-
-#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
-
- en.n_namesz = strlen(men->name) + 1;
- en.n_descsz = men->datasz;
- en.n_type = men->type;
-
- DUMP_WRITE(&en, sizeof(en));
- DUMP_WRITE(men->name, en.n_namesz);
-
- /* XXX - cast from long long to long to avoid need for libgcc.a */
- bufp = (char*) roundup((unsigned long)bufp,4);
- DUMP_WRITE(men->data, men->datasz);
- bufp = (char*) roundup((unsigned long)bufp,4);
-
-#undef DUMP_WRITE
-
- return bufp;
-} /* end storenote() */
+ LIST_HEAD(list);
+ LIST_HEAD(garbage);
+ int nphdr;
+ size_t phdrs_len, notes_len, data_offset;
+ struct kcore_list *tmp, *pos;
+ int ret = 0;
-/*
- * store an ELF coredump header in the supplied buffer
- * nphdr is the number of elf_phdr to insert
- */
-static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
-{
- struct elf_prstatus prstatus; /* NT_PRSTATUS */
- struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
- struct elf_phdr *nhdr, *phdr;
- struct elfhdr *elf;
- struct memelfnote notes[3];
- off_t offset = 0;
- struct kcore_list *m;
+ down_write(&kclist_lock);
+ if (!xchg(&kcore_need_update, 0))
+ goto out;
- /* setup ELF header */
- elf = (struct elfhdr *) bufp;
- bufp += sizeof(struct elfhdr);
- offset += sizeof(struct elfhdr);
- memcpy(elf->e_ident, ELFMAG, SELFMAG);
- elf->e_ident[EI_CLASS] = ELF_CLASS;
- elf->e_ident[EI_DATA] = ELF_DATA;
- elf->e_ident[EI_VERSION]= EV_CURRENT;
- elf->e_ident[EI_OSABI] = ELF_OSABI;
- memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
- elf->e_type = ET_CORE;
- elf->e_machine = ELF_ARCH;
- elf->e_version = EV_CURRENT;
- elf->e_entry = 0;
- elf->e_phoff = sizeof(struct elfhdr);
- elf->e_shoff = 0;
- elf->e_flags = ELF_CORE_EFLAGS;
- elf->e_ehsize = sizeof(struct elfhdr);
- elf->e_phentsize= sizeof(struct elf_phdr);
- elf->e_phnum = nphdr;
- elf->e_shentsize= 0;
- elf->e_shnum = 0;
- elf->e_shstrndx = 0;
-
- /* setup ELF PT_NOTE program header */
- nhdr = (struct elf_phdr *) bufp;
- bufp += sizeof(struct elf_phdr);
- offset += sizeof(struct elf_phdr);
- nhdr->p_type = PT_NOTE;
- nhdr->p_offset = 0;
- nhdr->p_vaddr = 0;
- nhdr->p_paddr = 0;
- nhdr->p_filesz = 0;
- nhdr->p_memsz = 0;
- nhdr->p_flags = 0;
- nhdr->p_align = 0;
-
- /* setup ELF PT_LOAD program header for every area */
- list_for_each_entry(m, &kclist_head, list) {
- phdr = (struct elf_phdr *) bufp;
- bufp += sizeof(struct elf_phdr);
- offset += sizeof(struct elf_phdr);
-
- phdr->p_type = PT_LOAD;
- phdr->p_flags = PF_R|PF_W|PF_X;
- phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
- phdr->p_vaddr = (size_t)m->addr;
- if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
- phdr->p_paddr = __pa(m->addr);
- else
- phdr->p_paddr = (elf_addr_t)-1;
- phdr->p_filesz = phdr->p_memsz = m->size;
- phdr->p_align = PAGE_SIZE;
+ ret = kcore_ram_list(&list);
+ if (ret) {
+ /* Couldn't get the RAM list, try again next time. */
+ WRITE_ONCE(kcore_need_update, 1);
+ list_splice_tail(&list, &garbage);
+ goto out;
}
- /*
- * Set up the notes in similar form to SVR4 core dumps made
- * with info from their /proc.
- */
- nhdr->p_offset = offset;
-
- /* set up the process status */
- notes[0].name = CORE_STR;
- notes[0].type = NT_PRSTATUS;
- notes[0].datasz = sizeof(struct elf_prstatus);
- notes[0].data = &prstatus;
-
- memset(&prstatus, 0, sizeof(struct elf_prstatus));
-
- nhdr->p_filesz = notesize(&notes[0]);
- bufp = storenote(&notes[0], bufp);
-
- /* set up the process info */
- notes[1].name = CORE_STR;
- notes[1].type = NT_PRPSINFO;
- notes[1].datasz = sizeof(struct elf_prpsinfo);
- notes[1].data = &prpsinfo;
-
- memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
- prpsinfo.pr_state = 0;
- prpsinfo.pr_sname = 'R';
- prpsinfo.pr_zomb = 0;
-
- strcpy(prpsinfo.pr_fname, "vmlinux");
- strlcpy(prpsinfo.pr_psargs, saved_command_line, sizeof(prpsinfo.pr_psargs));
-
- nhdr->p_filesz += notesize(&notes[1]);
- bufp = storenote(&notes[1], bufp);
+ list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
+ if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
+ list_move(&pos->list, &garbage);
+ }
+ list_splice_tail(&list, &kclist_head);
- /* set up the task structure */
- notes[2].name = CORE_STR;
- notes[2].type = NT_TASKSTRUCT;
- notes[2].datasz = arch_task_struct_size;
- notes[2].data = current;
+ proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
+ &data_offset);
- nhdr->p_filesz += notesize(&notes[2]);
- bufp = storenote(&notes[2], bufp);
+out:
+ up_write(&kclist_lock);
+ list_for_each_entry_safe(pos, tmp, &garbage, list) {
+ list_del(&pos->list);
+ kfree(pos);
+ }
+ return ret;
+}
-} /* end elf_kcore_store_hdr() */
+static void append_kcore_note(char *notes, size_t *i, const char *name,
+ unsigned int type, const void *desc,
+ size_t descsz)
+{
+ struct elf_note *note = (struct elf_note *)&notes[*i];
+
+ note->n_namesz = strlen(name) + 1;
+ note->n_descsz = descsz;
+ note->n_type = type;
+ *i += sizeof(*note);
+ memcpy(&notes[*i], name, note->n_namesz);
+ *i = ALIGN(*i + note->n_namesz, 4);
+ memcpy(&notes[*i], desc, descsz);
+ *i = ALIGN(*i + descsz, 4);
+}
-/*****************************************************************************/
-/*
- * read from the ELF header and then kernel memory
- */
static ssize_t
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
char *buf = file->private_data;
- ssize_t acc = 0;
- size_t size, tsz;
- size_t elf_buflen;
+ size_t phdrs_offset, notes_offset, data_offset;
+ size_t phdrs_len, notes_len;
+ struct kcore_list *m;
+ size_t tsz;
int nphdr;
unsigned long start;
+ size_t orig_buflen = buflen;
+ int ret = 0;
- read_lock(&kclist_lock);
- size = get_kcore_size(&nphdr, &elf_buflen);
+ down_read(&kclist_lock);
+
+ get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
+ phdrs_offset = sizeof(struct elfhdr);
+ notes_offset = phdrs_offset + phdrs_len;
+
+ /* ELF file header. */
+ if (buflen && *fpos < sizeof(struct elfhdr)) {
+ struct elfhdr ehdr = {
+ .e_ident = {
+ [EI_MAG0] = ELFMAG0,
+ [EI_MAG1] = ELFMAG1,
+ [EI_MAG2] = ELFMAG2,
+ [EI_MAG3] = ELFMAG3,
+ [EI_CLASS] = ELF_CLASS,
+ [EI_DATA] = ELF_DATA,
+ [EI_VERSION] = EV_CURRENT,
+ [EI_OSABI] = ELF_OSABI,
+ },
+ .e_type = ET_CORE,
+ .e_machine = ELF_ARCH,
+ .e_version = EV_CURRENT,
+ .e_phoff = sizeof(struct elfhdr),
+ .e_flags = ELF_CORE_EFLAGS,
+ .e_ehsize = sizeof(struct elfhdr),
+ .e_phentsize = sizeof(struct elf_phdr),
+ .e_phnum = nphdr,
+ };
+
+ tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
+ if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
- if (buflen == 0 || *fpos >= size) {
- read_unlock(&kclist_lock);
- return 0;
+ buffer += tsz;
+ buflen -= tsz;
+ *fpos += tsz;
}
- /* trim buflen to not go beyond EOF */
- if (buflen > size - *fpos)
- buflen = size - *fpos;
-
- /* construct an ELF core header if we'll need some of it */
- if (*fpos < elf_buflen) {
- char * elf_buf;
-
- tsz = elf_buflen - *fpos;
- if (buflen < tsz)
- tsz = buflen;
- elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
- if (!elf_buf) {
- read_unlock(&kclist_lock);
- return -ENOMEM;
+ /* ELF program headers. */
+ if (buflen && *fpos < phdrs_offset + phdrs_len) {
+ struct elf_phdr *phdrs, *phdr;
+
+ phdrs = kzalloc(phdrs_len, GFP_KERNEL);
+ if (!phdrs) {
+ ret = -ENOMEM;
+ goto out;
}
- elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
- read_unlock(&kclist_lock);
- if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
- kfree(elf_buf);
- return -EFAULT;
+
+ phdrs[0].p_type = PT_NOTE;
+ phdrs[0].p_offset = notes_offset;
+ phdrs[0].p_filesz = notes_len;
+
+ phdr = &phdrs[1];
+ list_for_each_entry(m, &kclist_head, list) {
+ phdr->p_type = PT_LOAD;
+ phdr->p_flags = PF_R | PF_W | PF_X;
+ phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
+ if (m->type == KCORE_REMAP)
+ phdr->p_vaddr = (size_t)m->vaddr;
+ else
+ phdr->p_vaddr = (size_t)m->addr;
+ if (m->type == KCORE_RAM || m->type == KCORE_REMAP)
+ phdr->p_paddr = __pa(m->addr);
+ else if (m->type == KCORE_TEXT)
+ phdr->p_paddr = __pa_symbol(m->addr);
+ else
+ phdr->p_paddr = (elf_addr_t)-1;
+ phdr->p_filesz = phdr->p_memsz = m->size;
+ phdr->p_align = PAGE_SIZE;
+ phdr++;
}
- kfree(elf_buf);
+
+ tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
+ if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
+ tsz)) {
+ kfree(phdrs);
+ ret = -EFAULT;
+ goto out;
+ }
+ kfree(phdrs);
+
+ buffer += tsz;
buflen -= tsz;
*fpos += tsz;
- buffer += tsz;
- acc += tsz;
+ }
+
+ /* ELF note segment. */
+ if (buflen && *fpos < notes_offset + notes_len) {
+ struct elf_prstatus prstatus = {};
+ struct elf_prpsinfo prpsinfo = {
+ .pr_sname = 'R',
+ .pr_fname = "vmlinux",
+ };
+ char *notes;
+ size_t i = 0;
+
+ strlcpy(prpsinfo.pr_psargs, saved_command_line,
+ sizeof(prpsinfo.pr_psargs));
+
+ notes = kzalloc(notes_len, GFP_KERNEL);
+ if (!notes) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
+ sizeof(prstatus));
+ append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
+ sizeof(prpsinfo));
+ append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
+ arch_task_struct_size);
+ /*
+ * vmcoreinfo_size is mostly constant after init time, but it
+ * can be changed by crash_save_vmcoreinfo(). Racing here with a
+ * panic on another CPU before the machine goes down is insanely
+ * unlikely, but it's better to not leave potential buffer
+ * overflows lying around, regardless.
+ */
+ append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
+ vmcoreinfo_data,
+ min(vmcoreinfo_size, notes_len - i));
+
+ tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
+ if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
+ kfree(notes);
+ ret = -EFAULT;
+ goto out;
+ }
+ kfree(notes);
- /* leave now if filled buffer already */
- if (buflen == 0)
- return acc;
- } else
- read_unlock(&kclist_lock);
+ buffer += tsz;
+ buflen -= tsz;
+ *fpos += tsz;
+ }
/*
* Check to see if our file offset matches with any of
* the addresses in the elf_phdr on our list.
*/
- start = kc_offset_to_vaddr(*fpos - elf_buflen);
+ start = kc_offset_to_vaddr(*fpos - data_offset);
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
-
- while (buflen) {
- struct kcore_list *m;
- read_lock(&kclist_lock);
- list_for_each_entry(m, &kclist_head, list) {
- if (start >= m->addr && start < (m->addr+m->size))
- break;
+ m = NULL;
+ while (buflen) {
+ /*
+ * If this is the first iteration or the address is not within
+ * the previous entry, search for a matching entry.
+ */
+ if (!m || start < m->addr || start >= m->addr + m->size) {
+ list_for_each_entry(m, &kclist_head, list) {
+ if (start >= m->addr &&
+ start < m->addr + m->size)
+ break;
+ }
}
- read_unlock(&kclist_lock);
if (&m->list == &kclist_head) {
- if (clear_user(buffer, tsz))
- return -EFAULT;
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else if (m->type == KCORE_VMALLOC) {
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
- if (copy_to_user(buffer, buf, tsz))
- return -EFAULT;
+ if (copy_to_user(buffer, buf, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else if (m->type == KCORE_USER) {
/* User page is handled prior to normal kernel page: */
- if (copy_to_user(buffer, (char *)start, tsz))
- return -EFAULT;
+ if (copy_to_user(buffer, (char *)start, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else {
if (kern_addr_valid(start)) {
/*
@@ -530,29 +484,37 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
* hardened user copy kernel text checks.
*/
if (probe_kernel_read(buf, (void *) start, tsz)) {
- if (clear_user(buffer, tsz))
- return -EFAULT;
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else {
- if (copy_to_user(buffer, buf, tsz))
- return -EFAULT;
+ if (copy_to_user(buffer, buf, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
} else {
- if (clear_user(buffer, tsz))
- return -EFAULT;
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
}
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
- acc += tsz;
start += tsz;
tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
}
- return acc;
+out:
+ up_read(&kclist_lock);
+ if (ret)
+ return ret;
+ return orig_buflen - buflen;
}
-
static int open_kcore(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_RAWIO))
@@ -592,9 +554,8 @@ static int __meminit kcore_callback(struct notifier_block *self,
switch (action) {
case MEM_ONLINE:
case MEM_OFFLINE:
- write_lock(&kclist_lock);
kcore_need_update = 1;
- write_unlock(&kclist_lock);
+ break;
}
return NOTIFY_OK;
}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 2fb04846ed11..edda898714eb 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -7,6 +7,7 @@
#include <linux/mman.h>
#include <linux/mmzone.h>
#include <linux/proc_fs.h>
+#include <linux/percpu.h>
#include <linux/quicklist.h>
#include <linux/seq_file.h>
#include <linux/swap.h>
@@ -121,6 +122,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
(unsigned long)VMALLOC_TOTAL >> 10);
show_val_kb(m, "VmallocUsed: ", 0ul);
show_val_kb(m, "VmallocChunk: ", 0ul);
+ show_val_kb(m, "Percpu: ", pcpu_nr_pages());
#ifdef CONFIG_MEMORY_FAILURE
seq_printf(m, "HardwareCorrupted: %5lu kB\n",
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 59749dfaef67..535eda7857cf 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -183,7 +183,7 @@ static int show_stat(struct seq_file *p, void *v)
static int stat_open(struct inode *inode, struct file *file)
{
- size_t size = 1024 + 128 * num_online_cpus();
+ unsigned int size = 1024 + 128 * num_online_cpus();
/* minimum size to display an interrupt count : 2 bytes */
size += 2 * nr_irqs;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dfd73a4616ce..5ea1d64cb0b4 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -247,7 +247,6 @@ static int proc_map_release(struct inode *inode, struct file *file)
if (priv->mm)
mmdrop(priv->mm);
- kfree(priv->rollup);
return seq_release_private(inode, file);
}
@@ -294,7 +293,7 @@ static void show_vma_header_prefix(struct seq_file *m,
}
static void
-show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
+show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
struct file *file = vma->vm_file;
@@ -357,35 +356,18 @@ done:
seq_putc(m, '\n');
}
-static int show_map(struct seq_file *m, void *v, int is_pid)
+static int show_map(struct seq_file *m, void *v)
{
- show_map_vma(m, v, is_pid);
+ show_map_vma(m, v);
m_cache_vma(m, v);
return 0;
}
-static int show_pid_map(struct seq_file *m, void *v)
-{
- return show_map(m, v, 1);
-}
-
-static int show_tid_map(struct seq_file *m, void *v)
-{
- return show_map(m, v, 0);
-}
-
static const struct seq_operations proc_pid_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
- .show = show_pid_map
-};
-
-static const struct seq_operations proc_tid_maps_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_tid_map
+ .show = show_map
};
static int pid_maps_open(struct inode *inode, struct file *file)
@@ -393,11 +375,6 @@ static int pid_maps_open(struct inode *inode, struct file *file)
return do_maps_open(inode, file, &proc_pid_maps_op);
}
-static int tid_maps_open(struct inode *inode, struct file *file)
-{
- return do_maps_open(inode, file, &proc_tid_maps_op);
-}
-
const struct file_operations proc_pid_maps_operations = {
.open = pid_maps_open,
.read = seq_read,
@@ -405,13 +382,6 @@ const struct file_operations proc_pid_maps_operations = {
.release = proc_map_release,
};
-const struct file_operations proc_tid_maps_operations = {
- .open = tid_maps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = proc_map_release,
-};
-
/*
* Proportional Set Size(PSS): my share of RSS.
*
@@ -433,7 +403,6 @@ const struct file_operations proc_tid_maps_operations = {
#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
- bool first;
unsigned long resident;
unsigned long shared_clean;
unsigned long shared_dirty;
@@ -447,7 +416,6 @@ struct mem_size_stats {
unsigned long swap;
unsigned long shared_hugetlb;
unsigned long private_hugetlb;
- unsigned long first_vma_start;
u64 pss;
u64 pss_locked;
u64 swap_pss;
@@ -731,14 +699,9 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
}
#endif /* HUGETLB_PAGE */
-#define SEQ_PUT_DEC(str, val) \
- seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
-static int show_smap(struct seq_file *m, void *v, int is_pid)
+static void smap_gather_stats(struct vm_area_struct *vma,
+ struct mem_size_stats *mss)
{
- struct proc_maps_private *priv = m->private;
- struct vm_area_struct *vma = v;
- struct mem_size_stats mss_stack;
- struct mem_size_stats *mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
#ifdef CONFIG_HUGETLB_PAGE
@@ -746,23 +709,6 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
#endif
.mm = vma->vm_mm,
};
- int ret = 0;
- bool rollup_mode;
- bool last_vma;
-
- if (priv->rollup) {
- rollup_mode = true;
- mss = priv->rollup;
- if (mss->first) {
- mss->first_vma_start = vma->vm_start;
- mss->first = false;
- }
- last_vma = !m_next_vma(priv, vma);
- } else {
- rollup_mode = false;
- memset(&mss_stack, 0, sizeof(mss_stack));
- mss = &mss_stack;
- }
smaps_walk.private = mss;
@@ -794,79 +740,116 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
walk_page_vma(vma, &smaps_walk);
if (vma->vm_flags & VM_LOCKED)
mss->pss_locked += mss->pss;
+}
- if (!rollup_mode) {
- show_map_vma(m, vma, is_pid);
- } else if (last_vma) {
- show_vma_header_prefix(
- m, mss->first_vma_start, vma->vm_end, 0, 0, 0, 0);
- seq_pad(m, ' ');
- seq_puts(m, "[rollup]\n");
- } else {
- ret = SEQ_SKIP;
- }
-
- if (!rollup_mode) {
- SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start);
- SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
- SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma));
- seq_puts(m, " kB\n");
- }
+#define SEQ_PUT_DEC(str, val) \
+ seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
- if (!rollup_mode || last_vma) {
- SEQ_PUT_DEC("Rss: ", mss->resident);
- SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT);
- SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean);
- SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty);
- SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean);
- SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty);
- SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced);
- SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous);
- SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
- SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
- SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
- SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
- seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
- mss->private_hugetlb >> 10, 7);
- SEQ_PUT_DEC(" kB\nSwap: ", mss->swap);
- SEQ_PUT_DEC(" kB\nSwapPss: ",
- mss->swap_pss >> PSS_SHIFT);
- SEQ_PUT_DEC(" kB\nLocked: ",
- mss->pss_locked >> PSS_SHIFT);
- seq_puts(m, " kB\n");
- }
- if (!rollup_mode) {
- if (arch_pkeys_enabled())
- seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
- show_smap_vma_flags(m, vma);
- }
- m_cache_vma(m, vma);
- return ret;
+/* Show the contents common for smaps and smaps_rollup */
+static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss)
+{
+ SEQ_PUT_DEC("Rss: ", mss->resident);
+ SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT);
+ SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean);
+ SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty);
+ SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean);
+ SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty);
+ SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced);
+ SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous);
+ SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
+ SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
+ SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
+ SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
+ seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
+ mss->private_hugetlb >> 10, 7);
+ SEQ_PUT_DEC(" kB\nSwap: ", mss->swap);
+ SEQ_PUT_DEC(" kB\nSwapPss: ",
+ mss->swap_pss >> PSS_SHIFT);
+ SEQ_PUT_DEC(" kB\nLocked: ",
+ mss->pss_locked >> PSS_SHIFT);
+ seq_puts(m, " kB\n");
}
-#undef SEQ_PUT_DEC
-static int show_pid_smap(struct seq_file *m, void *v)
+static int show_smap(struct seq_file *m, void *v)
{
- return show_smap(m, v, 1);
+ struct vm_area_struct *vma = v;
+ struct mem_size_stats mss;
+
+ memset(&mss, 0, sizeof(mss));
+
+ smap_gather_stats(vma, &mss);
+
+ show_map_vma(m, vma);
+
+ SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start);
+ SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
+ SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma));
+ seq_puts(m, " kB\n");
+
+ __show_smap(m, &mss);
+
+ if (arch_pkeys_enabled())
+ seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
+ show_smap_vma_flags(m, vma);
+
+ m_cache_vma(m, vma);
+
+ return 0;
}
-static int show_tid_smap(struct seq_file *m, void *v)
+static int show_smaps_rollup(struct seq_file *m, void *v)
{
- return show_smap(m, v, 0);
+ struct proc_maps_private *priv = m->private;
+ struct mem_size_stats mss;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ unsigned long last_vma_end = 0;
+ int ret = 0;
+
+ priv->task = get_proc_task(priv->inode);
+ if (!priv->task)
+ return -ESRCH;
+
+ mm = priv->mm;
+ if (!mm || !mmget_not_zero(mm)) {
+ ret = -ESRCH;
+ goto out_put_task;
+ }
+
+ memset(&mss, 0, sizeof(mss));
+
+ down_read(&mm->mmap_sem);
+ hold_task_mempolicy(priv);
+
+ for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
+ smap_gather_stats(vma, &mss);
+ last_vma_end = vma->vm_end;
+ }
+
+ show_vma_header_prefix(m, priv->mm->mmap->vm_start,
+ last_vma_end, 0, 0, 0, 0);
+ seq_pad(m, ' ');
+ seq_puts(m, "[rollup]\n");
+
+ __show_smap(m, &mss);
+
+ release_task_mempolicy(priv);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+
+out_put_task:
+ put_task_struct(priv->task);
+ priv->task = NULL;
+
+ return ret;
}
+#undef SEQ_PUT_DEC
static const struct seq_operations proc_pid_smaps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
- .show = show_pid_smap
-};
-
-static const struct seq_operations proc_tid_smaps_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_tid_smap
+ .show = show_smap
};
static int pid_smaps_open(struct inode *inode, struct file *file)
@@ -874,28 +857,45 @@ static int pid_smaps_open(struct inode *inode, struct file *file)
return do_maps_open(inode, file, &proc_pid_smaps_op);
}
-static int pid_smaps_rollup_open(struct inode *inode, struct file *file)
+static int smaps_rollup_open(struct inode *inode, struct file *file)
{
- struct seq_file *seq;
+ int ret;
struct proc_maps_private *priv;
- int ret = do_maps_open(inode, file, &proc_pid_smaps_op);
-
- if (ret < 0)
- return ret;
- seq = file->private_data;
- priv = seq->private;
- priv->rollup = kzalloc(sizeof(*priv->rollup), GFP_KERNEL);
- if (!priv->rollup) {
- proc_map_release(inode, file);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
+ if (!priv)
return -ENOMEM;
+
+ ret = single_open(file, show_smaps_rollup, priv);
+ if (ret)
+ goto out_free;
+
+ priv->inode = inode;
+ priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
+ if (IS_ERR(priv->mm)) {
+ ret = PTR_ERR(priv->mm);
+
+ single_release(inode, file);
+ goto out_free;
}
- priv->rollup->first = true;
+
return 0;
+
+out_free:
+ kfree(priv);
+ return ret;
}
-static int tid_smaps_open(struct inode *inode, struct file *file)
+static int smaps_rollup_release(struct inode *inode, struct file *file)
{
- return do_maps_open(inode, file, &proc_tid_smaps_op);
+ struct seq_file *seq = file->private_data;
+ struct proc_maps_private *priv = seq->private;
+
+ if (priv->mm)
+ mmdrop(priv->mm);
+
+ kfree(priv);
+ return single_release(inode, file);
}
const struct file_operations proc_pid_smaps_operations = {
@@ -906,17 +906,10 @@ const struct file_operations proc_pid_smaps_operations = {
};
const struct file_operations proc_pid_smaps_rollup_operations = {
- .open = pid_smaps_rollup_open,
+ .open = smaps_rollup_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = proc_map_release,
-};
-
-const struct file_operations proc_tid_smaps_operations = {
- .open = tid_smaps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = proc_map_release,
+ .release = smaps_rollup_release,
};
enum clear_refs_types {
@@ -1728,7 +1721,7 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
/*
* Display pages allocated per node and memory policy via /proc.
*/
-static int show_numa_map(struct seq_file *m, void *v, int is_pid)
+static int show_numa_map(struct seq_file *m, void *v)
{
struct numa_maps_private *numa_priv = m->private;
struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
@@ -1812,45 +1805,17 @@ out:
return 0;
}
-static int show_pid_numa_map(struct seq_file *m, void *v)
-{
- return show_numa_map(m, v, 1);
-}
-
-static int show_tid_numa_map(struct seq_file *m, void *v)
-{
- return show_numa_map(m, v, 0);
-}
-
static const struct seq_operations proc_pid_numa_maps_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
- .show = show_pid_numa_map,
+ .show = show_numa_map,
};
-static const struct seq_operations proc_tid_numa_maps_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_tid_numa_map,
-};
-
-static int numa_maps_open(struct inode *inode, struct file *file,
- const struct seq_operations *ops)
-{
- return proc_maps_open(inode, file, ops,
- sizeof(struct numa_maps_private));
-}
-
static int pid_numa_maps_open(struct inode *inode, struct file *file)
{
- return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
-}
-
-static int tid_numa_maps_open(struct inode *inode, struct file *file)
-{
- return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
+ return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
+ sizeof(struct numa_maps_private));
}
const struct file_operations proc_pid_numa_maps_operations = {
@@ -1860,10 +1825,4 @@ const struct file_operations proc_pid_numa_maps_operations = {
.release = proc_map_release,
};
-const struct file_operations proc_tid_numa_maps_operations = {
- .open = tid_numa_maps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = proc_map_release,
-};
#endif /* CONFIG_NUMA */
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 5b62f57bd9bc..0b63d68dedb2 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -142,8 +142,7 @@ static int is_stack(struct vm_area_struct *vma)
/*
* display a single VMA to a sequenced file
*/
-static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
- int is_pid)
+static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long ino = 0;
@@ -189,22 +188,11 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
/*
* display mapping lines for a particular process's /proc/pid/maps
*/
-static int show_map(struct seq_file *m, void *_p, int is_pid)
+static int show_map(struct seq_file *m, void *_p)
{
struct rb_node *p = _p;
- return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
- is_pid);
-}
-
-static int show_pid_map(struct seq_file *m, void *_p)
-{
- return show_map(m, _p, 1);
-}
-
-static int show_tid_map(struct seq_file *m, void *_p)
-{
- return show_map(m, _p, 0);
+ return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
}
static void *m_start(struct seq_file *m, loff_t *pos)
@@ -260,14 +248,7 @@ static const struct seq_operations proc_pid_maps_ops = {
.start = m_start,
.next = m_next,
.stop = m_stop,
- .show = show_pid_map
-};
-
-static const struct seq_operations proc_tid_maps_ops = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_tid_map
+ .show = show_map
};
static int maps_open(struct inode *inode, struct file *file,
@@ -308,11 +289,6 @@ static int pid_maps_open(struct inode *inode, struct file *file)
return maps_open(inode, file, &proc_pid_maps_ops);
}
-static int tid_maps_open(struct inode *inode, struct file *file)
-{
- return maps_open(inode, file, &proc_tid_maps_ops);
-}
-
const struct file_operations proc_pid_maps_operations = {
.open = pid_maps_open,
.read = seq_read,
@@ -320,10 +296,3 @@ const struct file_operations proc_pid_maps_operations = {
.release = map_release,
};
-const struct file_operations proc_tid_maps_operations = {
- .open = tid_maps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = map_release,
-};
-
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
index 3f723cb478af..a4c2791ab70b 100644
--- a/fs/proc/uptime.c
+++ b/fs/proc/uptime.c
@@ -9,7 +9,7 @@
static int uptime_proc_show(struct seq_file *m, void *v)
{
- struct timespec uptime;
+ struct timespec64 uptime;
struct timespec64 idle;
u64 nsec;
u32 rem;
@@ -19,7 +19,7 @@ static int uptime_proc_show(struct seq_file *m, void *v)
for_each_possible_cpu(i)
nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
- get_monotonic_boottime(&uptime);
+ ktime_get_boottime_ts64(&uptime);
idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
idle.tv_nsec = rem;
seq_printf(m, "%lu.%02lu %lu.%02lu\n",
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index cfb6674331fd..cbde728f8ac6 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -225,6 +225,7 @@ out_unlock:
return ret;
}
+#ifdef CONFIG_MMU
static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
u64 start, size_t size)
{
@@ -259,6 +260,7 @@ out_unlock:
mutex_unlock(&vmcoredd_mutex);
return ret;
}
+#endif /* CONFIG_MMU */
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
/* Read from the ELF header and then the crash dump. On error, negative value is
@@ -379,7 +381,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
* On s390 the fault handler is used for memory regions that can't be mapped
* directly with remap_pfn_range().
*/
-static int mmap_vmcore_fault(struct vm_fault *vmf)
+static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
{
#ifdef CONFIG_S390
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 09c19ef91526..503086f7f7c1 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -50,12 +50,19 @@ config PSTORE_842_COMPRESS
help
This option enables 842 compression algorithm support.
+config PSTORE_ZSTD_COMPRESS
+ bool "zstd compression"
+ depends on PSTORE
+ select CRYPTO_ZSTD
+ help
+ This option enables zstd compression algorithm support.
+
config PSTORE_COMPRESS
def_bool y
depends on PSTORE
depends on PSTORE_DEFLATE_COMPRESS || PSTORE_LZO_COMPRESS || \
PSTORE_LZ4_COMPRESS || PSTORE_LZ4HC_COMPRESS || \
- PSTORE_842_COMPRESS
+ PSTORE_842_COMPRESS || PSTORE_ZSTD_COMPRESS
choice
prompt "Default pstore compression algorithm"
@@ -65,8 +72,8 @@ choice
This change be changed at boot with "pstore.compress=..." on
the kernel command line.
- Currently, pstore has support for 5 compression algorithms:
- deflate, lzo, lz4, lz4hc and 842.
+ Currently, pstore has support for 6 compression algorithms:
+ deflate, lzo, lz4, lz4hc, 842 and zstd.
The default compression algorithm is deflate.
@@ -85,6 +92,9 @@ choice
config PSTORE_842_COMPRESS_DEFAULT
bool "842" if PSTORE_842_COMPRESS
+ config PSTORE_ZSTD_COMPRESS_DEFAULT
+ bool "zstd" if PSTORE_ZSTD_COMPRESS
+
endchoice
config PSTORE_COMPRESS_DEFAULT
@@ -95,6 +105,7 @@ config PSTORE_COMPRESS_DEFAULT
default "lz4" if PSTORE_LZ4_COMPRESS_DEFAULT
default "lz4hc" if PSTORE_LZ4HC_COMPRESS_DEFAULT
default "842" if PSTORE_842_COMPRESS_DEFAULT
+ default "zstd" if PSTORE_ZSTD_COMPRESS_DEFAULT
config PSTORE_CONSOLE
bool "Log kernel console messages"
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index c238ab8ba31d..15e99d5a681d 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -34,6 +34,9 @@
#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
#include <linux/lz4.h>
#endif
+#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
+#include <linux/zstd.h>
+#endif
#include <linux/crypto.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -192,6 +195,13 @@ static int zbufsize_842(size_t size)
}
#endif
+#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
+static int zbufsize_zstd(size_t size)
+{
+ return ZSTD_compressBound(size);
+}
+#endif
+
static const struct pstore_zbackend *zbackend __ro_after_init;
static const struct pstore_zbackend zbackends[] = {
@@ -225,6 +235,12 @@ static const struct pstore_zbackend zbackends[] = {
.name = "842",
},
#endif
+#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
+ {
+ .zbufsize = zbufsize_zstd,
+ .name = "zstd",
+ },
+#endif
{ }
};
diff --git a/fs/read_write.c b/fs/read_write.c
index 153f8f690490..39b4a21dd933 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1964,6 +1964,44 @@ out_error:
}
EXPORT_SYMBOL(vfs_dedupe_file_range_compare);
+int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+ struct file *dst_file, loff_t dst_pos, u64 len)
+{
+ s64 ret;
+
+ ret = mnt_want_write_file(dst_file);
+ if (ret)
+ return ret;
+
+ ret = clone_verify_area(dst_file, dst_pos, len, true);
+ if (ret < 0)
+ goto out_drop_write;
+
+ ret = -EINVAL;
+ if (!(capable(CAP_SYS_ADMIN) || (dst_file->f_mode & FMODE_WRITE)))
+ goto out_drop_write;
+
+ ret = -EXDEV;
+ if (src_file->f_path.mnt != dst_file->f_path.mnt)
+ goto out_drop_write;
+
+ ret = -EISDIR;
+ if (S_ISDIR(file_inode(dst_file)->i_mode))
+ goto out_drop_write;
+
+ ret = -EINVAL;
+ if (!dst_file->f_op->dedupe_file_range)
+ goto out_drop_write;
+
+ ret = dst_file->f_op->dedupe_file_range(src_file, src_pos,
+ dst_file, dst_pos, len);
+out_drop_write:
+ mnt_drop_write_file(dst_file);
+
+ return ret;
+}
+EXPORT_SYMBOL(vfs_dedupe_file_range_one);
+
int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
{
struct file_dedupe_range_info *info;
@@ -1972,11 +2010,8 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
u64 len;
int i;
int ret;
- bool is_admin = capable(CAP_SYS_ADMIN);
u16 count = same->dest_count;
- struct file *dst_file;
- loff_t dst_off;
- ssize_t deduped;
+ int deduped;
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
@@ -2003,6 +2038,9 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
if (off + len > i_size_read(src))
return -EINVAL;
+ /* Arbitrary 1G limit on a single dedupe request, can be raised. */
+ len = min_t(u64, len, 1 << 30);
+
/* pre-format output fields to sane values */
for (i = 0; i < count; i++) {
same->info[i].bytes_deduped = 0ULL;
@@ -2010,54 +2048,28 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
}
for (i = 0, info = same->info; i < count; i++, info++) {
- struct inode *dst;
struct fd dst_fd = fdget(info->dest_fd);
+ struct file *dst_file = dst_fd.file;
- dst_file = dst_fd.file;
if (!dst_file) {
info->status = -EBADF;
goto next_loop;
}
- dst = file_inode(dst_file);
-
- ret = mnt_want_write_file(dst_file);
- if (ret) {
- info->status = ret;
- goto next_fdput;
- }
-
- dst_off = info->dest_offset;
- ret = clone_verify_area(dst_file, dst_off, len, true);
- if (ret < 0) {
- info->status = ret;
- goto next_file;
- }
- ret = 0;
if (info->reserved) {
info->status = -EINVAL;
- } else if (!(is_admin || (dst_file->f_mode & FMODE_WRITE))) {
- info->status = -EINVAL;
- } else if (file->f_path.mnt != dst_file->f_path.mnt) {
- info->status = -EXDEV;
- } else if (S_ISDIR(dst->i_mode)) {
- info->status = -EISDIR;
- } else if (dst_file->f_op->dedupe_file_range == NULL) {
- info->status = -EINVAL;
- } else {
- deduped = dst_file->f_op->dedupe_file_range(file, off,
- len, dst_file,
- info->dest_offset);
- if (deduped == -EBADE)
- info->status = FILE_DEDUPE_RANGE_DIFFERS;
- else if (deduped < 0)
- info->status = deduped;
- else
- info->bytes_deduped += deduped;
+ goto next_fdput;
}
-next_file:
- mnt_drop_write_file(dst_file);
+ deduped = vfs_dedupe_file_range_one(file, off, dst_file,
+ info->dest_offset, len);
+ if (deduped == -EBADE)
+ info->status = FILE_DEDUPE_RANGE_DIFFERS;
+ else if (deduped < 0)
+ info->status = deduped;
+ else
+ info->bytes_deduped = len;
+
next_fdput:
fdput(dst_fd);
next_loop:
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
index e3c558d1b78c..3a5a752d96c7 100644
--- a/fs/reiserfs/item_ops.c
+++ b/fs/reiserfs/item_ops.c
@@ -33,30 +33,22 @@ static int sd_is_left_mergeable(struct reiserfs_key *key, unsigned long bsize)
return 0;
}
-static char *print_time(time_t t)
-{
- static char timebuf[256];
-
- sprintf(timebuf, "%ld", t);
- return timebuf;
-}
-
static void sd_print_item(struct item_head *ih, char *item)
{
printk("\tmode | size | nlinks | first direct | mtime\n");
if (stat_data_v1(ih)) {
struct stat_data_v1 *sd = (struct stat_data_v1 *)item;
- printk("\t0%-6o | %6u | %2u | %d | %s\n", sd_v1_mode(sd),
+ printk("\t0%-6o | %6u | %2u | %d | %u\n", sd_v1_mode(sd),
sd_v1_size(sd), sd_v1_nlink(sd),
sd_v1_first_direct_byte(sd),
- print_time(sd_v1_mtime(sd)));
+ sd_v1_mtime(sd));
} else {
struct stat_data *sd = (struct stat_data *)item;
- printk("\t0%-6o | %6llu | %2u | %d | %s\n", sd_v2_mode(sd),
+ printk("\t0%-6o | %6llu | %2u | %d | %u\n", sd_v2_mode(sd),
(unsigned long long)sd_v2_size(sd), sd_v2_nlink(sd),
- sd_v2_rdev(sd), print_time(sd_v2_mtime(sd)));
+ sd_v2_rdev(sd), sd_v2_mtime(sd));
}
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 52eb5d293a34..8a76f9d14bc6 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2381,7 +2381,7 @@ static int journal_read(struct super_block *sb)
struct reiserfs_journal_desc *desc;
unsigned int oldest_trans_id = 0;
unsigned int oldest_invalid_trans_id = 0;
- time_t start;
+ time64_t start;
unsigned long oldest_start = 0;
unsigned long cur_dblock = 0;
unsigned long newest_mount_id = 9;
@@ -2395,7 +2395,7 @@ static int journal_read(struct super_block *sb)
cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
reiserfs_info(sb, "checking transaction log (%pg)\n",
journal->j_dev_bd);
- start = get_seconds();
+ start = ktime_get_seconds();
/*
* step 1, read in the journal header block. Check the transaction
@@ -2556,7 +2556,7 @@ start_log_replay:
if (replay_count > 0) {
reiserfs_info(sb,
"replayed %d transactions in %lu seconds\n",
- replay_count, get_seconds() - start);
+ replay_count, ktime_get_seconds() - start);
}
/* needed to satisfy the locking in _update_journal_header_block */
reiserfs_write_lock(sb);
@@ -2914,7 +2914,7 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
int new_alloc)
{
struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
- time_t now = get_seconds();
+ time64_t now = ktime_get_seconds();
/* cannot restart while nested */
BUG_ON(!th->t_trans_id);
if (th->t_refcount > 1)
@@ -3023,7 +3023,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
struct super_block *sb, unsigned long nblocks,
int join)
{
- time_t now = get_seconds();
+ time64_t now = ktime_get_seconds();
unsigned int old_trans_id;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
struct reiserfs_transaction_handle myth;
@@ -3056,7 +3056,7 @@ relock:
PROC_INFO_INC(sb, journal.journal_relock_writers);
goto relock;
}
- now = get_seconds();
+ now = ktime_get_seconds();
/*
* if there is no room in the journal OR
@@ -3119,7 +3119,7 @@ relock:
}
/* we are the first writer, set trans_id */
if (journal->j_trans_start_time == 0) {
- journal->j_trans_start_time = get_seconds();
+ journal->j_trans_start_time = ktime_get_seconds();
}
atomic_inc(&journal->j_wcount);
journal->j_len_alloc += nblocks;
@@ -3559,11 +3559,11 @@ static void flush_async_commits(struct work_struct *work)
*/
void reiserfs_flush_old_commits(struct super_block *sb)
{
- time_t now;
+ time64_t now;
struct reiserfs_transaction_handle th;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
- now = get_seconds();
+ now = ktime_get_seconds();
/*
* safety check so we don't flush while we are replaying the log during
* mount
@@ -3613,7 +3613,7 @@ void reiserfs_flush_old_commits(struct super_block *sb)
static int check_journal_end(struct reiserfs_transaction_handle *th, int flags)
{
- time_t now;
+ time64_t now;
int flush = flags & FLUSH_ALL;
int commit_now = flags & COMMIT_NOW;
int wait_on_commit = flags & WAIT;
@@ -3694,7 +3694,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, int flags)
}
/* deal with old transactions where we are the last writers */
- now = get_seconds();
+ now = ktime_get_seconds();
if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
commit_now = 1;
journal->j_next_async_flush = 1;
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index e39b3910d24d..f2cf3441fdfc 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -297,6 +297,13 @@ static int show_oidmap(struct seq_file *m, void *unused)
return 0;
}
+static time64_t ktime_mono_to_real_seconds(time64_t mono)
+{
+ ktime_t kt = ktime_set(mono, NSEC_PER_SEC/2);
+
+ return ktime_divns(ktime_mono_to_real(kt), NSEC_PER_SEC);
+}
+
static int show_journal(struct seq_file *m, void *unused)
{
struct super_block *sb = m->private;
@@ -325,7 +332,7 @@ static int show_journal(struct seq_file *m, void *unused)
"j_bcount: \t%lu\n"
"j_first_unflushed_offset: \t%lu\n"
"j_last_flush_trans_id: \t%u\n"
- "j_trans_start_time: \t%li\n"
+ "j_trans_start_time: \t%lli\n"
"j_list_bitmap_index: \t%i\n"
"j_must_wait: \t%i\n"
"j_next_full_flush: \t%i\n"
@@ -366,7 +373,7 @@ static int show_journal(struct seq_file *m, void *unused)
JF(j_bcount),
JF(j_first_unflushed_offset),
JF(j_last_flush_trans_id),
- JF(j_trans_start_time),
+ ktime_mono_to_real_seconds(JF(j_trans_start_time)),
JF(j_list_bitmap_index),
JF(j_must_wait),
JF(j_next_full_flush),
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index ae4811fecc1f..e5ca9ed79e54 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -271,7 +271,7 @@ struct reiserfs_journal_list {
struct mutex j_commit_mutex;
unsigned int j_trans_id;
- time_t j_timestamp;
+ time64_t j_timestamp; /* write-only but useful for crash dump analysis */
struct reiserfs_list_bitmap *j_list_bitmap;
struct buffer_head *j_commit_bh; /* commit buffer head */
struct reiserfs_journal_cnode *j_realblock;
@@ -331,7 +331,7 @@ struct reiserfs_journal {
struct buffer_head *j_header_bh;
- time_t j_trans_start_time; /* time this transaction started */
+ time64_t j_trans_start_time; /* time this transaction started */
struct mutex j_mutex;
struct mutex j_flush_mutex;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ff94fad477e4..48cdfc81fe10 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -792,8 +792,10 @@ static int listxattr_filler(struct dir_context *ctx, const char *name,
return 0;
size = namelen + 1;
if (b->buf) {
- if (size > b->size)
+ if (b->pos + size > b->size) {
+ b->pos = -ERANGE;
return -ERANGE;
+ }
memcpy(b->buf + b->pos, name, namelen);
b->buf[b->pos + namelen] = 0;
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 4cc090b50cc5..1dea7a8a5255 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -90,23 +90,22 @@ EXPORT_SYMBOL(seq_open);
static int traverse(struct seq_file *m, loff_t offset)
{
- loff_t pos = 0, index;
+ loff_t pos = 0;
int error = 0;
void *p;
m->version = 0;
- index = 0;
+ m->index = 0;
m->count = m->from = 0;
- if (!offset) {
- m->index = index;
+ if (!offset)
return 0;
- }
+
if (!m->buf) {
m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
if (!m->buf)
return -ENOMEM;
}
- p = m->op->start(m, &index);
+ p = m->op->start(m, &m->index);
while (p) {
error = PTR_ERR(p);
if (IS_ERR(p))
@@ -123,20 +122,15 @@ static int traverse(struct seq_file *m, loff_t offset)
if (pos + m->count > offset) {
m->from = offset - pos;
m->count -= m->from;
- m->index = index;
break;
}
pos += m->count;
m->count = 0;
- if (pos == offset) {
- index++;
- m->index = index;
+ p = m->op->next(m, p, &m->index);
+ if (pos == offset)
break;
- }
- p = m->op->next(m, p, &index);
}
m->op->stop(m, p);
- m->index = index;
return error;
Eoverflow:
@@ -160,7 +154,6 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
{
struct seq_file *m = file->private_data;
size_t copied = 0;
- loff_t pos;
size_t n;
void *p;
int err = 0;
@@ -223,16 +216,12 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
size -= n;
buf += n;
copied += n;
- if (!m->count) {
- m->from = 0;
- m->index++;
- }
if (!size)
goto Done;
}
/* we need at least one record in buffer */
- pos = m->index;
- p = m->op->start(m, &pos);
+ m->from = 0;
+ p = m->op->start(m, &m->index);
while (1) {
err = PTR_ERR(p);
if (!p || IS_ERR(p))
@@ -243,8 +232,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
if (unlikely(err))
m->count = 0;
if (unlikely(!m->count)) {
- p = m->op->next(m, p, &pos);
- m->index = pos;
+ p = m->op->next(m, p, &m->index);
continue;
}
if (m->count < m->size)
@@ -256,29 +244,33 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
if (!m->buf)
goto Enomem;
m->version = 0;
- pos = m->index;
- p = m->op->start(m, &pos);
+ p = m->op->start(m, &m->index);
}
m->op->stop(m, p);
m->count = 0;
goto Done;
Fill:
/* they want more? let's try to get some more */
- while (m->count < size) {
+ while (1) {
size_t offs = m->count;
- loff_t next = pos;
- p = m->op->next(m, p, &next);
+ loff_t pos = m->index;
+
+ p = m->op->next(m, p, &m->index);
+ if (pos == m->index)
+ /* Buggy ->next function */
+ m->index++;
if (!p || IS_ERR(p)) {
err = PTR_ERR(p);
break;
}
+ if (m->count >= size)
+ break;
err = m->op->show(m, p);
if (seq_has_overflowed(m) || err) {
m->count = offs;
if (likely(err <= 0))
break;
}
- pos = next;
}
m->op->stop(m, p);
n = min(m->count, size);
@@ -287,11 +279,7 @@ Fill:
goto Efault;
copied += n;
m->count -= n;
- if (m->count)
- m->from = n;
- else
- pos++;
- m->index = pos;
+ m->from = n;
Done:
if (!copied)
copied = err;
diff --git a/fs/statfs.c b/fs/statfs.c
index 5b2a24f0f263..f0216629621d 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -335,7 +335,7 @@ static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstat
return 0;
}
-COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf)
+int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, struct compat_statfs64 __user * buf)
{
struct kstatfs tmp;
int error;
@@ -349,7 +349,12 @@ COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, s
return error;
}
-COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf)
+COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf)
+{
+ return kcompat_sys_statfs64(pathname, sz, buf);
+}
+
+int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user * buf)
{
struct kstatfs tmp;
int error;
@@ -363,6 +368,11 @@ COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct co
return error;
}
+COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf)
+{
+ return kcompat_sys_fstatfs64(fd, sz, buf);
+}
+
/*
* This is a copy of sys_ustat, just dealing with a structure layout.
* Given how simple this syscall is that apporach is more maintainable
diff --git a/fs/super.c b/fs/super.c
index 5a1b22af82db..f3a8c008e164 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -144,6 +144,9 @@ static unsigned long super_cache_count(struct shrinker *shrink,
total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
+ if (!total_objects)
+ return SHRINK_EMPTY;
+
total_objects = vfs_pressure_ratio(total_objects);
return total_objects;
}
@@ -244,10 +247,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
INIT_LIST_HEAD(&s->s_inodes_wb);
spin_lock_init(&s->s_inode_wblist_lock);
- if (list_lru_init_memcg(&s->s_dentry_lru))
- goto fail;
- if (list_lru_init_memcg(&s->s_inode_lru))
- goto fail;
s->s_count = 1;
atomic_set(&s->s_active, 1);
mutex_init(&s->s_vfs_rename_mutex);
@@ -265,6 +264,10 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
if (prealloc_shrinker(&s->s_shrink))
goto fail;
+ if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
+ goto fail;
+ if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
+ goto fail;
return s;
fail:
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 58eba92a0e41..feeae8081c22 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -40,6 +40,8 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
{
struct kernfs_node *parent, *kn;
+ kuid_t uid;
+ kgid_t gid;
BUG_ON(!kobj);
@@ -51,8 +53,11 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
if (!parent)
return -ENOENT;
+ kobject_get_ownership(kobj, &uid, &gid);
+
kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
+ S_IRWXU | S_IRUGO | S_IXUGO, uid, gid,
+ kobj, ns);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(parent, kobject_name(kobj));
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 5c13f29bfcdb..0a7252aecfa5 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -245,7 +245,7 @@ static const struct kernfs_ops sysfs_bin_kfops_mmap = {
int sysfs_add_file_mode_ns(struct kernfs_node *parent,
const struct attribute *attr, bool is_bin,
- umode_t mode, const void *ns)
+ umode_t mode, kuid_t uid, kgid_t gid, const void *ns)
{
struct lock_class_key *key = NULL;
const struct kernfs_ops *ops;
@@ -302,8 +302,9 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent,
if (!attr->ignore_lockdep)
key = attr->key ?: (struct lock_class_key *)&attr->skey;
#endif
- kn = __kernfs_create_file(parent, attr->name, mode & 0777, size, ops,
- (void *)attr, ns, key);
+
+ kn = __kernfs_create_file(parent, attr->name, mode & 0777, uid, gid,
+ size, ops, (void *)attr, ns, key);
if (IS_ERR(kn)) {
if (PTR_ERR(kn) == -EEXIST)
sysfs_warn_dup(parent, attr->name);
@@ -312,12 +313,6 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent,
return 0;
}
-int sysfs_add_file(struct kernfs_node *parent, const struct attribute *attr,
- bool is_bin)
-{
- return sysfs_add_file_mode_ns(parent, attr, is_bin, attr->mode, NULL);
-}
-
/**
* sysfs_create_file_ns - create an attribute file for an object with custom ns
* @kobj: object we're creating for
@@ -327,9 +322,14 @@ int sysfs_add_file(struct kernfs_node *parent, const struct attribute *attr,
int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns)
{
+ kuid_t uid;
+ kgid_t gid;
+
BUG_ON(!kobj || !kobj->sd || !attr);
- return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, ns);
+ kobject_get_ownership(kobj, &uid, &gid);
+ return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode,
+ uid, gid, ns);
}
EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
@@ -358,6 +358,8 @@ int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
struct kernfs_node *parent;
+ kuid_t uid;
+ kgid_t gid;
int error;
if (group) {
@@ -370,7 +372,9 @@ int sysfs_add_file_to_group(struct kobject *kobj,
if (!parent)
return -ENOENT;
- error = sysfs_add_file(parent, attr, false);
+ kobject_get_ownership(kobj, &uid, &gid);
+ error = sysfs_add_file_mode_ns(parent, attr, false,
+ attr->mode, uid, gid, NULL);
kernfs_put(parent);
return error;
@@ -406,6 +410,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
/**
+ * sysfs_break_active_protection - break "active" protection
+ * @kobj: The kernel object @attr is associated with.
+ * @attr: The attribute to break the "active" protection for.
+ *
+ * With sysfs, just like kernfs, deletion of an attribute is postponed until
+ * all active .show() and .store() callbacks have finished unless this function
+ * is called. Hence this function is useful in methods that implement self
+ * deletion.
+ */
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ struct kernfs_node *kn;
+
+ kobject_get(kobj);
+ kn = kernfs_find_and_get(kobj->sd, attr->name);
+ if (kn)
+ kernfs_break_active_protection(kn);
+ return kn;
+}
+EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+
+/**
+ * sysfs_unbreak_active_protection - restore "active" protection
+ * @kn: Pointer returned by sysfs_break_active_protection().
+ *
+ * Undo the effects of sysfs_break_active_protection(). Since this function
+ * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
+ * argument passed to sysfs_break_active_protection() that attribute may have
+ * been removed between the sysfs_break_active_protection() and
+ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
+ * this function has returned.
+ */
+void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+ struct kobject *kobj = kn->parent->priv;
+
+ kernfs_unbreak_active_protection(kn);
+ kernfs_put(kn);
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
+
+/**
* sysfs_remove_file_ns - remove an object attribute with a custom ns tag
* @kobj: object we're acting for
* @attr: attribute descriptor
@@ -486,9 +534,14 @@ EXPORT_SYMBOL_GPL(sysfs_remove_file_from_group);
int sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
+ kuid_t uid;
+ kgid_t gid;
+
BUG_ON(!kobj || !kobj->sd || !attr);
- return sysfs_add_file(kobj->sd, &attr->attr, true);
+ kobject_get_ownership(kobj, &uid, &gid);
+ return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true,
+ attr->attr.mode, uid, gid, NULL);
}
EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 4802ec0e1e3a..1eb2d6307663 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -31,6 +31,7 @@ static void remove_files(struct kernfs_node *parent,
}
static int create_files(struct kernfs_node *parent, struct kobject *kobj,
+ kuid_t uid, kgid_t gid,
const struct attribute_group *grp, int update)
{
struct attribute *const *attr;
@@ -60,7 +61,7 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj,
mode &= SYSFS_PREALLOC | 0664;
error = sysfs_add_file_mode_ns(parent, *attr, false,
- mode, NULL);
+ mode, uid, gid, NULL);
if (unlikely(error))
break;
}
@@ -90,7 +91,8 @@ static int create_files(struct kernfs_node *parent, struct kobject *kobj,
mode &= SYSFS_PREALLOC | 0664;
error = sysfs_add_file_mode_ns(parent,
&(*bin_attr)->attr, true,
- mode, NULL);
+ mode,
+ uid, gid, NULL);
if (error)
break;
}
@@ -106,6 +108,8 @@ static int internal_create_group(struct kobject *kobj, int update,
const struct attribute_group *grp)
{
struct kernfs_node *kn;
+ kuid_t uid;
+ kgid_t gid;
int error;
BUG_ON(!kobj || (!update && !kobj->sd));
@@ -118,23 +122,38 @@ static int internal_create_group(struct kobject *kobj, int update,
kobj->name, grp->name ?: "");
return -EINVAL;
}
+ kobject_get_ownership(kobj, &uid, &gid);
if (grp->name) {
- kn = kernfs_create_dir(kobj->sd, grp->name,
- S_IRWXU | S_IRUGO | S_IXUGO, kobj);
- if (IS_ERR(kn)) {
- if (PTR_ERR(kn) == -EEXIST)
- sysfs_warn_dup(kobj->sd, grp->name);
- return PTR_ERR(kn);
+ if (update) {
+ kn = kernfs_find_and_get(kobj->sd, grp->name);
+ if (!kn) {
+ pr_warn("Can't update unknown attr grp name: %s/%s\n",
+ kobj->name, grp->name);
+ return -EINVAL;
+ }
+ } else {
+ kn = kernfs_create_dir_ns(kobj->sd, grp->name,
+ S_IRWXU | S_IRUGO | S_IXUGO,
+ uid, gid, kobj, NULL);
+ if (IS_ERR(kn)) {
+ if (PTR_ERR(kn) == -EEXIST)
+ sysfs_warn_dup(kobj->sd, grp->name);
+ return PTR_ERR(kn);
+ }
}
} else
kn = kobj->sd;
kernfs_get(kn);
- error = create_files(kn, kobj, grp, update);
+ error = create_files(kn, kobj, uid, gid, grp, update);
if (error) {
if (grp->name)
kernfs_remove(kn);
}
kernfs_put(kn);
+
+ if (grp->name && update)
+ kernfs_put(kn);
+
return error;
}
@@ -199,7 +218,8 @@ EXPORT_SYMBOL_GPL(sysfs_create_groups);
* of the attribute files being created already exist. Furthermore,
* if the visibility of the files has changed through the is_visible()
* callback, it will update the permissions and add or remove the
- * relevant files.
+ * relevant files. Changing a group's name (subdirectory name under
+ * kobj's directory in sysfs) is not allowed.
*
* The primary use for this function is to call it after making a change
* that affects group visibility.
@@ -281,6 +301,8 @@ int sysfs_merge_group(struct kobject *kobj,
const struct attribute_group *grp)
{
struct kernfs_node *parent;
+ kuid_t uid;
+ kgid_t gid;
int error = 0;
struct attribute *const *attr;
int i;
@@ -289,8 +311,11 @@ int sysfs_merge_group(struct kobject *kobj,
if (!parent)
return -ENOENT;
+ kobject_get_ownership(kobj, &uid, &gid);
+
for ((i = 0, attr = grp->attrs); *attr && !error; (++i, ++attr))
- error = sysfs_add_file(parent, *attr, false);
+ error = sysfs_add_file_mode_ns(parent, *attr, false,
+ (*attr)->mode, uid, gid, NULL);
if (error) {
while (--i >= 0)
kernfs_remove_by_name(parent, (*--attr)->name);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index d098e015fcc9..0050cc0c0236 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -27,11 +27,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name);
/*
* file.c
*/
-int sysfs_add_file(struct kernfs_node *parent,
- const struct attribute *attr, bool is_bin);
int sysfs_add_file_mode_ns(struct kernfs_node *parent,
const struct attribute *attr, bool is_bin,
- umode_t amode, const void *ns);
+ umode_t amode, kuid_t uid, kgid_t gid,
+ const void *ns);
/*
* symlink.c
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index bec9f79adb25..499a20a5a010 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -35,7 +35,7 @@
static int sysv_sync_fs(struct super_block *sb, int wait)
{
struct sysv_sb_info *sbi = SYSV_SB(sb);
- unsigned long time = get_seconds(), old_time;
+ u32 time = (u32)ktime_get_real_seconds(), old_time;
mutex_lock(&sbi->s_lock);
@@ -46,8 +46,8 @@ static int sysv_sync_fs(struct super_block *sb, int wait)
*/
old_time = fs32_to_cpu(sbi, *sbi->s_sb_time);
if (sbi->s_type == FSTYPE_SYSV4) {
- if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time))
- *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38 - time);
+ if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38u - old_time))
+ *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38u - time);
*sbi->s_sb_time = cpu_to_fs32(sbi, time);
mark_buffer_dirty(sbi->s_bh2);
}
diff --git a/fs/timerfd.c b/fs/timerfd.c
index cdad49da3ff7..d69ad801eb80 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -66,7 +66,7 @@ static void timerfd_triggered(struct timerfd_ctx *ctx)
spin_lock_irqsave(&ctx->wqh.lock, flags);
ctx->expired = 1;
ctx->ticks++;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
}
@@ -107,7 +107,7 @@ void timerfd_clock_was_set(void)
if (ctx->moffs != moffs) {
ctx->moffs = KTIME_MAX;
ctx->ticks++;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
}
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
}
@@ -345,7 +345,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
spin_lock_irq(&ctx->wqh.lock);
if (!timerfd_canceled(ctx)) {
ctx->ticks = ticks;
- wake_up_locked(&ctx->wqh);
+ wake_up_locked_poll(&ctx->wqh, EPOLLIN);
} else
ret = -ECANCELED;
spin_unlock_irq(&ctx->wqh.lock);
@@ -533,8 +533,8 @@ static int do_timerfd_gettime(int ufd, struct itimerspec64 *t)
}
SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
- const struct itimerspec __user *, utmr,
- struct itimerspec __user *, otmr)
+ const struct __kernel_itimerspec __user *, utmr,
+ struct __kernel_itimerspec __user *, otmr)
{
struct itimerspec64 new, old;
int ret;
@@ -550,7 +550,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
return ret;
}
-SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
+SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *, otmr)
{
struct itimerspec64 kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
@@ -559,7 +559,7 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
return put_itimerspec64(&kotmr, otmr) ? -EFAULT : 0;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
const struct compat_itimerspec __user *, utmr,
struct compat_itimerspec __user *, otmr)
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index bea8ad876bf9..7098c49f3693 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -53,7 +53,7 @@ static const struct file_operations tracefs_file_operations = {
static struct tracefs_dir_ops {
int (*mkdir)(const char *name);
int (*rmdir)(const char *name);
-} tracefs_ops;
+} tracefs_ops __ro_after_init;
static char *get_dname(struct dentry *dentry)
{
@@ -478,7 +478,8 @@ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
*
* Returns the dentry of the instances directory.
*/
-struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
+__init struct dentry *tracefs_create_instance_dir(const char *name,
+ struct dentry *parent,
int (*mkdir)(const char *name),
int (*rmdir)(const char *name))
{
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 83a961bf7280..bbc78549be4c 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -51,9 +51,20 @@ config UBIFS_ATIME_SUPPORT
If unsure, say 'N'
+config UBIFS_FS_XATTR
+ bool "UBIFS XATTR support"
+ depends on UBIFS_FS
+ default y
+ help
+ Saying Y here includes support for extended attributes (xattrs).
+ Xattrs are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page).
+
+ If unsure, say Y.
+
config UBIFS_FS_ENCRYPTION
bool "UBIFS Encryption"
- depends on UBIFS_FS && BLOCK
+ depends on UBIFS_FS && UBIFS_FS_XATTR && BLOCK
select FS_ENCRYPTION
default n
help
@@ -64,7 +75,7 @@ config UBIFS_FS_ENCRYPTION
config UBIFS_FS_SECURITY
bool "UBIFS Security Labels"
- depends on UBIFS_FS
+ depends on UBIFS_FS && UBIFS_FS_XATTR
default y
help
Security labels provide an access control facility to support Linux
diff --git a/fs/ubifs/Makefile b/fs/ubifs/Makefile
index 9758f709c736..6197d7e539e4 100644
--- a/fs/ubifs/Makefile
+++ b/fs/ubifs/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_UBIFS_FS) += ubifs.o
ubifs-y += shrinker.o journal.o file.o dir.o super.o sb.o io.o
ubifs-y += tnc.o master.o scan.o replay.o log.o commit.o gc.o orphan.o
ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o
-ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o xattr.o debug.o
+ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o debug.o
ubifs-y += misc.o
ubifs-$(CONFIG_UBIFS_FS_ENCRYPTION) += crypto.o
+ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 11a11b32a2a9..7ef22baf9d15 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -439,16 +439,16 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
{
int err, idx_growth, data_growth, dd_growth, retried = 0;
- ubifs_assert(req->new_page <= 1);
- ubifs_assert(req->dirtied_page <= 1);
- ubifs_assert(req->new_dent <= 1);
- ubifs_assert(req->mod_dent <= 1);
- ubifs_assert(req->new_ino <= 1);
- ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA);
- ubifs_assert(req->dirtied_ino <= 4);
- ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
- ubifs_assert(!(req->new_ino_d & 7));
- ubifs_assert(!(req->dirtied_ino_d & 7));
+ ubifs_assert(c, req->new_page <= 1);
+ ubifs_assert(c, req->dirtied_page <= 1);
+ ubifs_assert(c, req->new_dent <= 1);
+ ubifs_assert(c, req->mod_dent <= 1);
+ ubifs_assert(c, req->new_ino <= 1);
+ ubifs_assert(c, req->new_ino_d <= UBIFS_MAX_INO_DATA);
+ ubifs_assert(c, req->dirtied_ino <= 4);
+ ubifs_assert(c, req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
+ ubifs_assert(c, !(req->new_ino_d & 7));
+ ubifs_assert(c, !(req->dirtied_ino_d & 7));
data_growth = calc_data_growth(c, req);
dd_growth = calc_dd_growth(c, req);
@@ -458,9 +458,9 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
again:
spin_lock(&c->space_lock);
- ubifs_assert(c->bi.idx_growth >= 0);
- ubifs_assert(c->bi.data_growth >= 0);
- ubifs_assert(c->bi.dd_growth >= 0);
+ ubifs_assert(c, c->bi.idx_growth >= 0);
+ ubifs_assert(c, c->bi.data_growth >= 0);
+ ubifs_assert(c, c->bi.dd_growth >= 0);
if (unlikely(c->bi.nospace) && (c->bi.nospace_rp || !can_use_rp(c))) {
dbg_budg("no space");
@@ -526,20 +526,20 @@ again:
*/
void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
{
- ubifs_assert(req->new_page <= 1);
- ubifs_assert(req->dirtied_page <= 1);
- ubifs_assert(req->new_dent <= 1);
- ubifs_assert(req->mod_dent <= 1);
- ubifs_assert(req->new_ino <= 1);
- ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA);
- ubifs_assert(req->dirtied_ino <= 4);
- ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
- ubifs_assert(!(req->new_ino_d & 7));
- ubifs_assert(!(req->dirtied_ino_d & 7));
+ ubifs_assert(c, req->new_page <= 1);
+ ubifs_assert(c, req->dirtied_page <= 1);
+ ubifs_assert(c, req->new_dent <= 1);
+ ubifs_assert(c, req->mod_dent <= 1);
+ ubifs_assert(c, req->new_ino <= 1);
+ ubifs_assert(c, req->new_ino_d <= UBIFS_MAX_INO_DATA);
+ ubifs_assert(c, req->dirtied_ino <= 4);
+ ubifs_assert(c, req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
+ ubifs_assert(c, !(req->new_ino_d & 7));
+ ubifs_assert(c, !(req->dirtied_ino_d & 7));
if (!req->recalculate) {
- ubifs_assert(req->idx_growth >= 0);
- ubifs_assert(req->data_growth >= 0);
- ubifs_assert(req->dd_growth >= 0);
+ ubifs_assert(c, req->idx_growth >= 0);
+ ubifs_assert(c, req->data_growth >= 0);
+ ubifs_assert(c, req->dd_growth >= 0);
}
if (req->recalculate) {
@@ -561,13 +561,13 @@ void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
c->bi.dd_growth -= req->dd_growth;
c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
- ubifs_assert(c->bi.idx_growth >= 0);
- ubifs_assert(c->bi.data_growth >= 0);
- ubifs_assert(c->bi.dd_growth >= 0);
- ubifs_assert(c->bi.min_idx_lebs < c->main_lebs);
- ubifs_assert(!(c->bi.idx_growth & 7));
- ubifs_assert(!(c->bi.data_growth & 7));
- ubifs_assert(!(c->bi.dd_growth & 7));
+ ubifs_assert(c, c->bi.idx_growth >= 0);
+ ubifs_assert(c, c->bi.data_growth >= 0);
+ ubifs_assert(c, c->bi.dd_growth >= 0);
+ ubifs_assert(c, c->bi.min_idx_lebs < c->main_lebs);
+ ubifs_assert(c, !(c->bi.idx_growth & 7));
+ ubifs_assert(c, !(c->bi.data_growth & 7));
+ ubifs_assert(c, !(c->bi.dd_growth & 7));
spin_unlock(&c->space_lock);
}
@@ -680,7 +680,7 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c)
int rsvd_idx_lebs, lebs;
long long available, outstanding, free;
- ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
+ ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
outstanding = c->bi.data_growth + c->bi.dd_growth;
available = ubifs_calc_available(c, c->bi.min_idx_lebs);
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index 63f56619991d..591f2c7a48f0 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -91,9 +91,9 @@ static int nothing_to_commit(struct ubifs_info *c)
if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags))
return 0;
- ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
- ubifs_assert(c->dirty_pn_cnt == 0);
- ubifs_assert(c->dirty_nn_cnt == 0);
+ ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
+ ubifs_assert(c, c->dirty_pn_cnt == 0);
+ ubifs_assert(c, c->dirty_nn_cnt == 0);
return 1;
}
@@ -113,7 +113,7 @@ static int do_commit(struct ubifs_info *c)
struct ubifs_lp_stats lst;
dbg_cmt("start");
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error) {
err = -EROFS;
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c
index 55c508fe8131..4aaedf2d7f44 100644
--- a/fs/ubifs/crypto.c
+++ b/fs/ubifs/crypto.c
@@ -32,7 +32,7 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
struct page *ret;
unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE);
- ubifs_assert(pad_len <= *out_len);
+ ubifs_assert(c, pad_len <= *out_len);
dn->compr_size = cpu_to_le16(in_len);
/* pad to full block cipher length */
@@ -63,7 +63,7 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
return -EINVAL;
}
- ubifs_assert(dlen <= UBIFS_BLOCK_SIZE);
+ ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE);
err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen,
offset_in_page(&dn->data), block);
if (err) {
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 7cd8a7b95299..564e330d05b1 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -134,7 +134,7 @@ const char *dbg_snprintf_key(const struct ubifs_info *c,
}
} else
len -= snprintf(p, len, "bad key format %d", c->key_fmt);
- ubifs_assert(len > 0);
+ ubifs_assert(c, len > 0);
return p;
}
@@ -276,7 +276,7 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
return;
pr_err("List of directory entries:\n");
- ubifs_assert(!mutex_is_locked(&c->tnc_mutex));
+ ubifs_assert(c, !mutex_is_locked(&c->tnc_mutex));
lowest_dent_key(c, &key, inode->i_ino);
while (1) {
@@ -931,7 +931,7 @@ void ubifs_dump_tnc(struct ubifs_info *c)
pr_err("\n");
pr_err("(pid %d) start dumping TNC tree\n", current->pid);
- znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
+ znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
level = znode->level;
pr_err("== Level %d ==\n", level);
while (znode) {
@@ -940,7 +940,7 @@ void ubifs_dump_tnc(struct ubifs_info *c)
pr_err("== Level %d ==\n", level);
}
ubifs_dump_znode(c, znode);
- znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
+ znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
}
pr_err("(pid %d) finish dumping TNC tree\n", current->pid);
}
@@ -1183,7 +1183,7 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
union ubifs_key key;
char key_buf[DBG_KEY_BUF_LEN];
- ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key));
+ ubifs_assert(c, !keys_cmp(c, &zbr1->key, &zbr2->key));
dent1 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
if (!dent1)
return -ENOMEM;
@@ -1479,7 +1479,7 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
if (!dbg_is_chk_index(c))
return 0;
- ubifs_assert(mutex_is_locked(&c->tnc_mutex));
+ ubifs_assert(c, mutex_is_locked(&c->tnc_mutex));
if (!c->zroot.znode)
return 0;
@@ -1505,7 +1505,7 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
}
prev = znode;
- znode = ubifs_tnc_postorder_next(znode);
+ znode = ubifs_tnc_postorder_next(c, znode);
if (!znode)
break;
@@ -2036,7 +2036,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
long long blk_offs;
struct ubifs_data_node *dn = node;
- ubifs_assert(zbr->len >= UBIFS_DATA_NODE_SZ);
+ ubifs_assert(c, zbr->len >= UBIFS_DATA_NODE_SZ);
/*
* Search the inode node this data node belongs to and insert
@@ -2066,7 +2066,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
struct ubifs_dent_node *dent = node;
struct fsck_inode *fscki1;
- ubifs_assert(zbr->len >= UBIFS_DENT_NODE_SZ);
+ ubifs_assert(c, zbr->len >= UBIFS_DENT_NODE_SZ);
err = ubifs_validate_entry(c, dent);
if (err)
@@ -2461,7 +2461,7 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
{
struct ubifs_debug_info *d = c->dbg;
- ubifs_assert(dbg_is_tst_rcvry(c));
+ ubifs_assert(c, dbg_is_tst_rcvry(c));
if (!d->pc_cnt) {
/* First call - decide delay to the power cut */
@@ -3081,6 +3081,28 @@ void dbg_debugfs_exit(void)
debugfs_remove_recursive(dfs_rootdir);
}
+void ubifs_assert_failed(struct ubifs_info *c, const char *expr,
+ const char *file, int line)
+{
+ ubifs_err(c, "UBIFS assert failed: %s, in %s:%u", expr, file, line);
+
+ switch (c->assert_action) {
+ case ASSACT_PANIC:
+ BUG();
+ break;
+
+ case ASSACT_RO:
+ ubifs_ro_mode(c, -EINVAL);
+ break;
+
+ case ASSACT_REPORT:
+ default:
+ dump_stack();
+ break;
+
+ }
+}
+
/**
* ubifs_debugging_init - initialize UBIFS debugging.
* @c: UBIFS file-system description object
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index e03d5179769a..64c6977c189b 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -148,19 +148,21 @@ struct ubifs_global_debug_info {
unsigned int tst_rcvry:1;
};
-#define ubifs_assert(expr) do { \
+void ubifs_assert_failed(struct ubifs_info *c, const char *expr,
+ const char *file, int line);
+
+#define ubifs_assert(c, expr) do { \
if (unlikely(!(expr))) { \
- pr_crit("UBIFS assert failed in %s at %u (pid %d)\n", \
- __func__, __LINE__, current->pid); \
- dump_stack(); \
+ ubifs_assert_failed((struct ubifs_info *)c, #expr, __FILE__, \
+ __LINE__); \
} \
} while (0)
#define ubifs_assert_cmt_locked(c) do { \
if (unlikely(down_write_trylock(&(c)->commit_sem))) { \
up_write(&(c)->commit_sem); \
- pr_crit("commit lock is not locked!\n"); \
- ubifs_assert(0); \
+ ubifs_err(c, "commit lock is not locked!\n"); \
+ ubifs_assert(c, 0); \
} \
} while (0)
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 9da224d4f2da..5767b373a8ff 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -240,8 +240,8 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
}
if (nm.hash) {
- ubifs_assert(fname_len(&nm) == 0);
- ubifs_assert(fname_name(&nm) == NULL);
+ ubifs_assert(c, fname_len(&nm) == 0);
+ ubifs_assert(c, fname_name(&nm) == NULL);
dent_key_init_hash(c, &key, dir->i_ino, nm.hash);
err = ubifs_tnc_lookup_dh(c, &key, dent, nm.minor_hash);
} else {
@@ -404,7 +404,7 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
if (whiteout) {
init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
- ubifs_assert(inode->i_op == &ubifs_file_inode_operations);
+ ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations);
}
err = ubifs_init_security(dir, inode, &dentry->d_name);
@@ -421,7 +421,7 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
} else {
d_tmpfile(dentry, inode);
}
- ubifs_assert(ui->dirty);
+ ubifs_assert(c, ui->dirty);
instantiated = 1;
mutex_unlock(&ui->ui_mutex);
@@ -556,7 +556,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
/* File positions 0 and 1 correspond to "." and ".." */
if (ctx->pos < 2) {
- ubifs_assert(!file->private_data);
+ ubifs_assert(c, !file->private_data);
if (!dir_emit_dots(file, ctx)) {
if (encrypted)
fscrypt_fname_free_buffer(&fstr);
@@ -597,7 +597,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
dbg_gen("ino %llu, new f_pos %#x",
(unsigned long long)le64_to_cpu(dent->inum),
key_hash_flash(c, &dent->key));
- ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
+ ubifs_assert(c, le64_to_cpu(dent->ch.sqnum) >
ubifs_inode(dir)->creat_sqnum);
fname_len(&nm) = le16_to_cpu(dent->nlen);
@@ -716,8 +716,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
dbg_gen("dent '%pd' to ino %lu (nlink %d) in dir ino %lu",
dentry, inode->i_ino,
inode->i_nlink, dir->i_ino);
- ubifs_assert(inode_is_locked(dir));
- ubifs_assert(inode_is_locked(inode));
+ ubifs_assert(c, inode_is_locked(dir));
+ ubifs_assert(c, inode_is_locked(inode));
err = fscrypt_prepare_link(old_dentry, dir, dentry);
if (err)
@@ -804,8 +804,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
sz_change = CALC_DENT_SIZE(fname_len(&nm));
- ubifs_assert(inode_is_locked(dir));
- ubifs_assert(inode_is_locked(inode));
+ ubifs_assert(c, inode_is_locked(dir));
+ ubifs_assert(c, inode_is_locked(inode));
err = dbg_check_synced_i_size(c, inode);
if (err)
goto out_fname;
@@ -896,8 +896,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
dbg_gen("directory '%pd', ino %lu in dir ino %lu", dentry,
inode->i_ino, dir->i_ino);
- ubifs_assert(inode_is_locked(dir));
- ubifs_assert(inode_is_locked(inode));
+ ubifs_assert(c, inode_is_locked(dir));
+ ubifs_assert(c, inode_is_locked(inode));
err = ubifs_check_dir_empty(d_inode(dentry));
if (err)
return err;
@@ -1123,8 +1123,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
struct ubifs_inode *ui;
struct ubifs_inode *dir_ui = ubifs_inode(dir);
struct ubifs_info *c = dir->i_sb->s_fs_info;
- int err, len = strlen(symname);
- int sz_change = CALC_DENT_SIZE(len);
+ int err, sz_change, len = strlen(symname);
struct fscrypt_str disk_link;
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
.new_ino_d = ALIGN(len, 8),
@@ -1151,6 +1150,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
if (err)
goto out_budg;
+ sz_change = CALC_DENT_SIZE(fname_len(&nm));
+
inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
@@ -1294,7 +1295,7 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dentry, new_dir->i_ino, flags);
if (unlink)
- ubifs_assert(inode_is_locked(new_inode));
+ ubifs_assert(c, inode_is_locked(new_inode));
if (unlink && is_dir) {
err = ubifs_check_dir_empty(new_inode);
@@ -1348,7 +1349,7 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
whiteout_ui = ubifs_inode(whiteout);
whiteout_ui->data = dev;
whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
- ubifs_assert(!whiteout_ui->dirty);
+ ubifs_assert(c, !whiteout_ui->dirty);
}
lock_4_inodes(old_dir, new_dir, new_inode, whiteout);
@@ -1508,7 +1509,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
int err;
struct fscrypt_name fst_nm, snd_nm;
- ubifs_assert(fst_inode && snd_inode);
+ ubifs_assert(c, fst_inode && snd_inode);
err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm);
if (err)
@@ -1555,12 +1556,13 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned int flags)
{
int err;
+ struct ubifs_info *c = old_dir->i_sb->s_fs_info;
if (flags & ~(RENAME_NOREPLACE | RENAME_WHITEOUT | RENAME_EXCHANGE))
return -EINVAL;
- ubifs_assert(inode_is_locked(old_dir));
- ubifs_assert(inode_is_locked(new_dir));
+ ubifs_assert(c, inode_is_locked(old_dir));
+ ubifs_assert(c, inode_is_locked(new_dir));
err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
@@ -1647,7 +1649,9 @@ const struct inode_operations ubifs_dir_inode_operations = {
.rename = ubifs_rename,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
+#ifdef CONFIG_UBIFS_FS_XATTR
.listxattr = ubifs_listxattr,
+#endif
#ifdef CONFIG_UBIFS_ATIME_SUPPORT
.update_time = ubifs_update_time,
#endif
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index fd7eb6fe9090..1b78f2e09218 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -71,7 +71,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
return err;
}
- ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
+ ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
ubifs_inode(inode)->creat_sqnum);
len = le32_to_cpu(dn->size);
if (len <= 0 || len > UBIFS_BLOCK_SIZE)
@@ -115,12 +115,13 @@ static int do_readpage(struct page *page)
unsigned int block, beyond;
struct ubifs_data_node *dn;
struct inode *inode = page->mapping->host;
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
loff_t i_size = i_size_read(inode);
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
inode->i_ino, page->index, i_size, page->flags);
- ubifs_assert(!PageChecked(page));
- ubifs_assert(!PagePrivate(page));
+ ubifs_assert(c, !PageChecked(page));
+ ubifs_assert(c, !PagePrivate(page));
addr = kmap(page);
@@ -441,8 +442,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
int skipped_read = 0;
struct page *page;
- ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (unlikely(c->ro_error))
return -EROFS;
@@ -481,7 +482,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
err = allocate_budget(c, page, ui, appending);
if (unlikely(err)) {
- ubifs_assert(err == -ENOSPC);
+ ubifs_assert(c, err == -ENOSPC);
/*
* If we skipped reading the page because we were going to
* write all of it, then it is not up to date.
@@ -498,7 +499,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
* everything and fall-back to slow-path.
*/
if (appending) {
- ubifs_assert(mutex_is_locked(&ui->ui_mutex));
+ ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
mutex_unlock(&ui->ui_mutex);
}
unlock_page(page);
@@ -595,7 +596,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
* '__set_page_dirty_nobuffers()'.
*/
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- ubifs_assert(mutex_is_locked(&ui->ui_mutex));
+ ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
mutex_unlock(&ui->ui_mutex);
}
@@ -648,7 +649,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
dn = bu->buf + (bu->zbranch[nn].offs - offs);
- ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
+ ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
ubifs_inode(inode)->creat_sqnum);
len = le32_to_cpu(dn->size);
@@ -767,8 +768,8 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
bu->zbranch[bu->cnt - 1].len -
bu->zbranch[0].offs;
- ubifs_assert(bu->buf_len > 0);
- ubifs_assert(bu->buf_len <= c->leb_size);
+ ubifs_assert(c, bu->buf_len > 0);
+ ubifs_assert(c, bu->buf_len <= c->leb_size);
bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
if (!bu->buf)
goto out_bu_off;
@@ -920,7 +921,7 @@ static int do_writepage(struct page *page, int len)
#ifdef UBIFS_DEBUG
struct ubifs_inode *ui = ubifs_inode(inode);
spin_lock(&ui->ui_lock);
- ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
+ ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT);
spin_unlock(&ui->ui_lock);
#endif
@@ -949,7 +950,7 @@ static int do_writepage(struct page *page, int len)
ubifs_ro_mode(c, err);
}
- ubifs_assert(PagePrivate(page));
+ ubifs_assert(c, PagePrivate(page));
if (PageChecked(page))
release_new_page_budget(c);
else
@@ -1014,6 +1015,7 @@ static int do_writepage(struct page *page, int len)
static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
loff_t i_size = i_size_read(inode), synced_i_size;
pgoff_t end_index = i_size >> PAGE_SHIFT;
@@ -1022,7 +1024,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
dbg_gen("ino %lu, pg %lu, pg flags %#lx",
inode->i_ino, page->index, page->flags);
- ubifs_assert(PagePrivate(page));
+ ubifs_assert(c, PagePrivate(page));
/* Is the page fully outside @i_size? (truncate in progress) */
if (page->index > end_index || (page->index == end_index && !len)) {
@@ -1167,7 +1169,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
* 'ubifs_jnl_truncate()' will see an already
* truncated (and up to date) data node.
*/
- ubifs_assert(PagePrivate(page));
+ ubifs_assert(c, PagePrivate(page));
clear_page_dirty_for_io(page);
if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
@@ -1303,7 +1305,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
struct inode *inode = page->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
- ubifs_assert(PagePrivate(page));
+ ubifs_assert(c, PagePrivate(page));
if (offset || length < PAGE_SIZE)
/* Partial page remains dirty */
return;
@@ -1365,11 +1367,10 @@ out:
* granularity, they are not updated. This is an optimization.
*/
static inline int mctime_update_needed(const struct inode *inode,
- const struct timespec *now)
+ const struct timespec64 *now)
{
- struct timespec64 now64 = timespec_to_timespec64(*now);
- if (!timespec64_equal(&inode->i_mtime, &now64) ||
- !timespec64_equal(&inode->i_ctime, &now64))
+ if (!timespec64_equal(&inode->i_mtime, now) ||
+ !timespec64_equal(&inode->i_ctime, now))
return 1;
return 0;
}
@@ -1425,7 +1426,7 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time,
*/
static int update_mctime(struct inode *inode)
{
- struct timespec now = timespec64_to_timespec(current_time(inode));
+ struct timespec64 now = current_time(inode);
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_info *c = inode->i_sb->s_fs_info;
@@ -1462,13 +1463,15 @@ static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
static int ubifs_set_page_dirty(struct page *page)
{
int ret;
+ struct inode *inode = page->mapping->host;
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
ret = __set_page_dirty_nobuffers(page);
/*
* An attempt to dirty a page without budgeting for it - should not
* happen.
*/
- ubifs_assert(ret == 0);
+ ubifs_assert(c, ret == 0);
return ret;
}
@@ -1497,14 +1500,17 @@ static int ubifs_migrate_page(struct address_space *mapping,
static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
{
+ struct inode *inode = page->mapping->host;
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
+
/*
* An attempt to release a dirty page without budgeting for it - should
* not happen.
*/
if (PageWriteback(page))
return 0;
- ubifs_assert(PagePrivate(page));
- ubifs_assert(0);
+ ubifs_assert(c, PagePrivate(page));
+ ubifs_assert(c, 0);
ClearPagePrivate(page);
ClearPageChecked(page);
return 1;
@@ -1519,13 +1525,13 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct ubifs_info *c = inode->i_sb->s_fs_info;
- struct timespec now = timespec64_to_timespec(current_time(inode));
+ struct timespec64 now = current_time(inode);
struct ubifs_budget_req req = { .new_page = 1 };
int err, update_time;
dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
i_size_read(inode));
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (unlikely(c->ro_error))
return VM_FAULT_SIGBUS; /* -EROFS */
@@ -1654,7 +1660,9 @@ const struct address_space_operations ubifs_file_address_operations = {
const struct inode_operations ubifs_file_inode_operations = {
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
+#ifdef CONFIG_UBIFS_FS_XATTR
.listxattr = ubifs_listxattr,
+#endif
#ifdef CONFIG_UBIFS_ATIME_SUPPORT
.update_time = ubifs_update_time,
#endif
@@ -1664,7 +1672,9 @@ const struct inode_operations ubifs_symlink_inode_operations = {
.get_link = ubifs_get_link,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
+#ifdef CONFIG_UBIFS_FS_XATTR
.listxattr = ubifs_listxattr,
+#endif
#ifdef CONFIG_UBIFS_ATIME_SUPPORT
.update_time = ubifs_update_time,
#endif
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
index 9571616b5dda..f9646835b026 100644
--- a/fs/ubifs/find.c
+++ b/fs/ubifs/find.c
@@ -183,18 +183,18 @@ static const struct ubifs_lprops *scan_for_dirty(struct ubifs_info *c,
&data);
if (err)
return ERR_PTR(err);
- ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt);
+ ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt);
c->lscan_lnum = data.lnum;
lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
if (IS_ERR(lprops))
return lprops;
- ubifs_assert(lprops->lnum == data.lnum);
- ubifs_assert(lprops->free + lprops->dirty >= min_space);
- ubifs_assert(lprops->dirty >= c->dead_wm ||
+ ubifs_assert(c, lprops->lnum == data.lnum);
+ ubifs_assert(c, lprops->free + lprops->dirty >= min_space);
+ ubifs_assert(c, lprops->dirty >= c->dead_wm ||
(pick_free &&
lprops->free + lprops->dirty == c->leb_size));
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert(!exclude_index || !(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !exclude_index || !(lprops->flags & LPROPS_INDEX));
return lprops;
}
@@ -315,7 +315,7 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp,
lp = idx_lp;
if (lp) {
- ubifs_assert(lp->free + lp->dirty >= c->dead_wm);
+ ubifs_assert(c, lp->free + lp->dirty >= c->dead_wm);
goto found;
}
@@ -326,7 +326,7 @@ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp,
err = PTR_ERR(lp);
goto out;
}
- ubifs_assert(lp->dirty >= c->dead_wm ||
+ ubifs_assert(c, lp->dirty >= c->dead_wm ||
(pick_free && lp->free + lp->dirty == c->leb_size));
found:
@@ -462,15 +462,15 @@ const struct ubifs_lprops *do_find_free_space(struct ubifs_info *c,
&data);
if (err)
return ERR_PTR(err);
- ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt);
+ ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt);
c->lscan_lnum = data.lnum;
lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
if (IS_ERR(lprops))
return lprops;
- ubifs_assert(lprops->lnum == data.lnum);
- ubifs_assert(lprops->free >= min_space);
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, lprops->lnum == data.lnum);
+ ubifs_assert(c, lprops->free >= min_space);
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
return lprops;
}
@@ -574,7 +574,7 @@ int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs,
}
dbg_find("found LEB %d, free %d", lnum, c->leb_size - *offs);
- ubifs_assert(*offs <= c->leb_size - min_space);
+ ubifs_assert(c, *offs <= c->leb_size - min_space);
return lnum;
out:
@@ -642,15 +642,15 @@ static const struct ubifs_lprops *scan_for_leb_for_idx(struct ubifs_info *c)
&data);
if (err)
return ERR_PTR(err);
- ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt);
+ ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt);
c->lscan_lnum = data.lnum;
lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
if (IS_ERR(lprops))
return lprops;
- ubifs_assert(lprops->lnum == data.lnum);
- ubifs_assert(lprops->free + lprops->dirty == c->leb_size);
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, lprops->lnum == data.lnum);
+ ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size);
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
return lprops;
}
@@ -690,7 +690,7 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c)
*/
if (c->in_a_category_cnt != c->main_lebs ||
c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
- ubifs_assert(c->freeable_cnt == 0);
+ ubifs_assert(c, c->freeable_cnt == 0);
lprops = scan_for_leb_for_idx(c);
if (IS_ERR(lprops)) {
err = PTR_ERR(lprops);
@@ -750,10 +750,7 @@ static int cmp_dirty_idx(const struct ubifs_lprops **a,
static void swap_dirty_idx(struct ubifs_lprops **a, struct ubifs_lprops **b,
int size)
{
- struct ubifs_lprops *t = *a;
-
- *a = *b;
- *b = t;
+ swap(*a, *b);
}
/**
@@ -870,15 +867,15 @@ static int find_dirty_idx_leb(struct ubifs_info *c)
if (err)
return err;
found:
- ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt);
+ ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt);
c->lscan_lnum = data.lnum;
lprops = ubifs_lpt_lookup_dirty(c, data.lnum);
if (IS_ERR(lprops))
return PTR_ERR(lprops);
- ubifs_assert(lprops->lnum == data.lnum);
- ubifs_assert(lprops->free + lprops->dirty >= c->min_idx_node_sz);
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert((lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, lprops->lnum == data.lnum);
+ ubifs_assert(c, lprops->free + lprops->dirty >= c->min_idx_node_sz);
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, (lprops->flags & LPROPS_INDEX));
dbg_find("found dirty LEB %d, free %d, dirty %d, flags %#x",
lprops->lnum, lprops->free, lprops->dirty, lprops->flags);
@@ -947,8 +944,8 @@ static int find_dirtiest_idx_leb(struct ubifs_info *c)
}
dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty,
lp->free, lp->flags);
- ubifs_assert(lp->flags & LPROPS_TAKEN);
- ubifs_assert(lp->flags & LPROPS_INDEX);
+ ubifs_assert(c, lp->flags & LPROPS_TAKEN);
+ ubifs_assert(c, lp->flags & LPROPS_INDEX);
return lnum;
}
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index a03a47cf880d..d2680e0b4a36 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -83,7 +83,7 @@ static int switch_gc_head(struct ubifs_info *c)
int err, gc_lnum = c->gc_lnum;
struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
- ubifs_assert(gc_lnum != -1);
+ ubifs_assert(c, gc_lnum != -1);
dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
c->leb_size - wbuf->offs - wbuf->used);
@@ -131,10 +131,10 @@ static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
sa = list_entry(a, struct ubifs_scan_node, list);
sb = list_entry(b, struct ubifs_scan_node, list);
- ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
- ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
- ubifs_assert(sa->type == UBIFS_DATA_NODE);
- ubifs_assert(sb->type == UBIFS_DATA_NODE);
+ ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DATA_KEY);
+ ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DATA_KEY);
+ ubifs_assert(c, sa->type == UBIFS_DATA_NODE);
+ ubifs_assert(c, sb->type == UBIFS_DATA_NODE);
inuma = key_inum(c, &sa->key);
inumb = key_inum(c, &sb->key);
@@ -175,9 +175,9 @@ static int nondata_nodes_cmp(void *priv, struct list_head *a,
sa = list_entry(a, struct ubifs_scan_node, list);
sb = list_entry(b, struct ubifs_scan_node, list);
- ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
+ ubifs_assert(c, key_type(c, &sa->key) != UBIFS_DATA_KEY &&
key_type(c, &sb->key) != UBIFS_DATA_KEY);
- ubifs_assert(sa->type != UBIFS_DATA_NODE &&
+ ubifs_assert(c, sa->type != UBIFS_DATA_NODE &&
sb->type != UBIFS_DATA_NODE);
/* Inodes go before directory entries */
@@ -189,13 +189,13 @@ static int nondata_nodes_cmp(void *priv, struct list_head *a,
if (sb->type == UBIFS_INO_NODE)
return 1;
- ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
+ ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DENT_KEY ||
key_type(c, &sa->key) == UBIFS_XENT_KEY);
- ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
+ ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DENT_KEY ||
key_type(c, &sb->key) == UBIFS_XENT_KEY);
- ubifs_assert(sa->type == UBIFS_DENT_NODE ||
+ ubifs_assert(c, sa->type == UBIFS_DENT_NODE ||
sa->type == UBIFS_XENT_NODE);
- ubifs_assert(sb->type == UBIFS_DENT_NODE ||
+ ubifs_assert(c, sb->type == UBIFS_DENT_NODE ||
sb->type == UBIFS_XENT_NODE);
inuma = key_inum(c, &sa->key);
@@ -250,7 +250,7 @@ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
/* Separate data nodes and non-data nodes */
list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
- ubifs_assert(snod->type == UBIFS_INO_NODE ||
+ ubifs_assert(c, snod->type == UBIFS_INO_NODE ||
snod->type == UBIFS_DATA_NODE ||
snod->type == UBIFS_DENT_NODE ||
snod->type == UBIFS_XENT_NODE ||
@@ -266,7 +266,7 @@ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
continue;
}
- ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
+ ubifs_assert(c, key_type(c, &snod->key) == UBIFS_DATA_KEY ||
key_type(c, &snod->key) == UBIFS_INO_KEY ||
key_type(c, &snod->key) == UBIFS_DENT_KEY ||
key_type(c, &snod->key) == UBIFS_XENT_KEY);
@@ -469,21 +469,21 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
int err = 0, lnum = lp->lnum;
- ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
+ ubifs_assert(c, c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
c->need_recovery);
- ubifs_assert(c->gc_lnum != lnum);
- ubifs_assert(wbuf->lnum != lnum);
+ ubifs_assert(c, c->gc_lnum != lnum);
+ ubifs_assert(c, wbuf->lnum != lnum);
if (lp->free + lp->dirty == c->leb_size) {
/* Special case - a free LEB */
dbg_gc("LEB %d is free, return it", lp->lnum);
- ubifs_assert(!(lp->flags & LPROPS_INDEX));
+ ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
if (lp->free != c->leb_size) {
/*
* Write buffers must be sync'd before unmapping
* freeable LEBs, because one of them may contain data
- * which obsoletes something in 'lp->pnum'.
+ * which obsoletes something in 'lp->lnum'.
*/
err = gc_sync_wbufs(c);
if (err)
@@ -513,7 +513,7 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
if (IS_ERR(sleb))
return PTR_ERR(sleb);
- ubifs_assert(!list_empty(&sleb->nodes));
+ ubifs_assert(c, !list_empty(&sleb->nodes));
snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
if (snod->type == UBIFS_IDX_NODE) {
@@ -525,7 +525,7 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
struct ubifs_idx_node *idx = snod->node;
int level = le16_to_cpu(idx->level);
- ubifs_assert(snod->type == UBIFS_IDX_NODE);
+ ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
key_read(c, ubifs_idx_key(c, idx), &snod->key);
err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
snod->offs);
@@ -648,7 +648,7 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
ubifs_assert_cmt_locked(c);
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (ubifs_gc_should_commit(c))
return -EAGAIN;
@@ -661,7 +661,7 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
}
/* We expect the write-buffer to be empty on entry */
- ubifs_assert(!wbuf->used);
+ ubifs_assert(c, !wbuf->used);
for (i = 0; ; i++) {
int space_before, space_after;
@@ -752,7 +752,7 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
continue;
}
- ubifs_assert(ret == LEB_RETAINED);
+ ubifs_assert(c, ret == LEB_RETAINED);
space_after = c->leb_size - wbuf->offs - wbuf->used;
dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
space_after - space_before);
@@ -812,8 +812,8 @@ out_unlock:
return ret;
out:
- ubifs_assert(ret < 0);
- ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
+ ubifs_assert(c, ret < 0);
+ ubifs_assert(c, ret != -ENOSPC && ret != -EAGAIN);
ubifs_wbuf_sync_nolock(wbuf);
ubifs_ro_mode(c, ret);
mutex_unlock(&wbuf->io_mutex);
@@ -848,8 +848,8 @@ int ubifs_gc_start_commit(struct ubifs_info *c)
lp = ubifs_fast_find_freeable(c);
if (!lp)
break;
- ubifs_assert(!(lp->flags & LPROPS_TAKEN));
- ubifs_assert(!(lp->flags & LPROPS_INDEX));
+ ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
err = ubifs_leb_unmap(c, lp->lnum);
if (err)
goto out;
@@ -858,8 +858,8 @@ int ubifs_gc_start_commit(struct ubifs_info *c)
err = PTR_ERR(lp);
goto out;
}
- ubifs_assert(!(lp->flags & LPROPS_TAKEN));
- ubifs_assert(!(lp->flags & LPROPS_INDEX));
+ ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
}
/* Mark GC'd index LEBs OK to unmap after this commit finishes */
@@ -880,8 +880,8 @@ int ubifs_gc_start_commit(struct ubifs_info *c)
err = -ENOMEM;
goto out;
}
- ubifs_assert(!(lp->flags & LPROPS_TAKEN));
- ubifs_assert(lp->flags & LPROPS_INDEX);
+ ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
+ ubifs_assert(c, lp->flags & LPROPS_INDEX);
/* Don't release the LEB until after the next commit */
flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
@@ -890,8 +890,8 @@ int ubifs_gc_start_commit(struct ubifs_info *c)
kfree(idx_gc);
goto out;
}
- ubifs_assert(lp->flags & LPROPS_TAKEN);
- ubifs_assert(!(lp->flags & LPROPS_INDEX));
+ ubifs_assert(c, lp->flags & LPROPS_TAKEN);
+ ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
idx_gc->lnum = lp->lnum;
idx_gc->unmap = 1;
list_add(&idx_gc->list, &c->idx_gc);
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index fe77e9625e84..099bec94b820 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -119,7 +119,7 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
{
int err;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error)
return -EROFS;
if (!dbg_is_tst_rcvry(c))
@@ -139,7 +139,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
{
int err;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error)
return -EROFS;
if (!dbg_is_tst_rcvry(c))
@@ -159,7 +159,7 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
{
int err;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error)
return -EROFS;
if (!dbg_is_tst_rcvry(c))
@@ -178,7 +178,7 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum)
{
int err;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error)
return -EROFS;
if (!dbg_is_tst_rcvry(c))
@@ -241,8 +241,8 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
uint32_t crc, node_crc, magic;
const struct ubifs_ch *ch = buf;
- ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
- ubifs_assert(!(offs & 7) && offs < c->leb_size);
+ ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+ ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
magic = le32_to_cpu(ch->magic);
if (magic != UBIFS_NODE_MAGIC) {
@@ -319,7 +319,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
{
uint32_t crc;
- ubifs_assert(pad >= 0 && !(pad & 7));
+ ubifs_assert(c, pad >= 0 && !(pad & 7));
if (pad >= UBIFS_PAD_NODE_SZ) {
struct ubifs_ch *ch = buf;
@@ -382,7 +382,7 @@ void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
struct ubifs_ch *ch = node;
unsigned long long sqnum = next_sqnum(c);
- ubifs_assert(len >= UBIFS_CH_SZ);
+ ubifs_assert(c, len >= UBIFS_CH_SZ);
ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
ch->len = cpu_to_le32(len);
@@ -415,7 +415,7 @@ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
struct ubifs_ch *ch = node;
unsigned long long sqnum = next_sqnum(c);
- ubifs_assert(len >= UBIFS_CH_SZ);
+ ubifs_assert(c, len >= UBIFS_CH_SZ);
ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
ch->len = cpu_to_le32(len);
@@ -448,9 +448,10 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
/**
* new_wbuf_timer - start new write-buffer timer.
+ * @c: UBIFS file-system description object
* @wbuf: write-buffer descriptor
*/
-static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
+static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
{
ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10);
unsigned long long delta = dirty_writeback_interval;
@@ -458,8 +459,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
/* centi to milli, milli to nano, then 10% */
delta *= 10ULL * NSEC_PER_MSEC / 10ULL;
- ubifs_assert(!hrtimer_active(&wbuf->timer));
- ubifs_assert(delta <= ULONG_MAX);
+ ubifs_assert(c, !hrtimer_active(&wbuf->timer));
+ ubifs_assert(c, delta <= ULONG_MAX);
if (wbuf->no_timer)
return;
@@ -508,14 +509,14 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
dbg_io("LEB %d:%d, %d bytes, jhead %s",
wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
- ubifs_assert(!(wbuf->avail & 7));
- ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size);
- ubifs_assert(wbuf->size >= c->min_io_size);
- ubifs_assert(wbuf->size <= c->max_write_size);
- ubifs_assert(wbuf->size % c->min_io_size == 0);
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !(wbuf->avail & 7));
+ ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size);
+ ubifs_assert(c, wbuf->size >= c->min_io_size);
+ ubifs_assert(c, wbuf->size <= c->max_write_size);
+ ubifs_assert(c, wbuf->size % c->min_io_size == 0);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->leb_size - wbuf->offs >= c->max_write_size)
- ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
+ ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
if (c->ro_error)
return -EROFS;
@@ -576,11 +577,11 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
const struct ubifs_info *c = wbuf->c;
dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
- ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
- ubifs_assert(offs >= 0 && offs <= c->leb_size);
- ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
- ubifs_assert(lnum != wbuf->lnum);
- ubifs_assert(wbuf->used == 0);
+ ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt);
+ ubifs_assert(c, offs >= 0 && offs <= c->leb_size);
+ ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7));
+ ubifs_assert(c, lnum != wbuf->lnum);
+ ubifs_assert(c, wbuf->used == 0);
spin_lock(&wbuf->lock);
wbuf->lnum = lnum;
@@ -610,7 +611,7 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
{
int err, i;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (!c->need_wbuf_sync)
return 0;
c->need_wbuf_sync = 0;
@@ -686,18 +687,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
dbg_ntype(((struct ubifs_ch *)buf)->node_type),
dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
- ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
- ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
- ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
- ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size);
- ubifs_assert(wbuf->size >= c->min_io_size);
- ubifs_assert(wbuf->size <= c->max_write_size);
- ubifs_assert(wbuf->size % c->min_io_size == 0);
- ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
- ubifs_assert(!c->ro_media && !c->ro_mount);
- ubifs_assert(!c->space_fixup);
+ ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
+ ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
+ ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
+ ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size);
+ ubifs_assert(c, wbuf->size >= c->min_io_size);
+ ubifs_assert(c, wbuf->size <= c->max_write_size);
+ ubifs_assert(c, wbuf->size % c->min_io_size == 0);
+ ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex));
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->space_fixup);
if (c->leb_size - wbuf->offs >= c->max_write_size)
- ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
+ ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
err = -ENOSPC;
@@ -834,7 +835,7 @@ exit:
}
if (wbuf->used)
- new_wbuf_timer_nolock(wbuf);
+ new_wbuf_timer_nolock(c, wbuf);
return 0;
@@ -869,10 +870,10 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
buf_len);
- ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
- ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
- ubifs_assert(!c->ro_media && !c->ro_mount);
- ubifs_assert(!c->space_fixup);
+ ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+ ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->space_fixup);
if (c->ro_error)
return -EROFS;
@@ -909,9 +910,9 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
- ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
- ubifs_assert(!(offs & 7) && offs < c->leb_size);
- ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
+ ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+ ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
+ ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
spin_lock(&wbuf->lock);
overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
@@ -984,10 +985,10 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
struct ubifs_ch *ch = buf;
dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
- ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
- ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
- ubifs_assert(!(offs & 7) && offs < c->leb_size);
- ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
+ ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+ ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
+ ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
+ ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
if (err && err != -EBADMSG)
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 07b4956e0425..802565a17733 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -111,7 +111,7 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
* better to try to allocate space at the ends of eraseblocks. This is
* what the squeeze parameter does.
*/
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
squeeze = (jhead == BASEHD);
again:
mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
@@ -215,7 +215,7 @@ out_unlock:
out_return:
/* An error occurred and the LEB has to be returned to lprops */
- ubifs_assert(err < 0);
+ ubifs_assert(c, err < 0);
err1 = ubifs_return_leb(c, lnum);
if (err1 && err == -EAGAIN)
/*
@@ -246,7 +246,7 @@ static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
{
struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
- ubifs_assert(jhead != GCHD);
+ ubifs_assert(c, jhead != GCHD);
*lnum = c->jheads[jhead].wbuf.lnum;
*offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
@@ -278,7 +278,7 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
int err;
struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
- ubifs_assert(jhead != GCHD);
+ ubifs_assert(c, jhead != GCHD);
*lnum = c->jheads[jhead].wbuf.lnum;
*offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
@@ -317,6 +317,7 @@ again:
down_read(&c->commit_sem);
err = reserve_space(c, jhead, len);
if (!err)
+ /* c->commit_sem will get released via finish_reservation(). */
return 0;
up_read(&c->commit_sem);
@@ -548,7 +549,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
struct ubifs_ino_node *ino;
union ubifs_key dent_key, ino_key;
- ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
+ ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
ilen = UBIFS_INO_NODE_SZ;
@@ -664,6 +665,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
spin_lock(&ui->ui_lock);
ui->synced_i_size = ui->ui_size;
spin_unlock(&ui->ui_lock);
+ if (xent) {
+ spin_lock(&host_ui->ui_lock);
+ host_ui->synced_i_size = host_ui->ui_size;
+ spin_unlock(&host_ui->ui_lock);
+ }
mark_inode_clean(c, ui);
mark_inode_clean(c, host_ui);
return 0;
@@ -707,7 +713,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
(unsigned long)key_inum(c, key), key_block(c, key), len);
- ubifs_assert(len <= UBIFS_BLOCK_SIZE);
+ ubifs_assert(c, len <= UBIFS_BLOCK_SIZE);
if (encrypted)
dlen += UBIFS_CIPHER_BLOCK_SIZE;
@@ -738,7 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
- ubifs_assert(compr_len <= UBIFS_BLOCK_SIZE);
+ ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE);
if (encrypted) {
err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
@@ -898,7 +904,7 @@ int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
int err;
struct ubifs_inode *ui = ubifs_inode(inode);
- ubifs_assert(inode->i_nlink == 0);
+ ubifs_assert(c, inode->i_nlink == 0);
if (ui->del_cmtno != c->cmt_no)
/* A commit happened for sure */
@@ -953,10 +959,10 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
int twoparents = (fst_dir != snd_dir);
void *p;
- ubifs_assert(ubifs_inode(fst_dir)->data_len == 0);
- ubifs_assert(ubifs_inode(snd_dir)->data_len == 0);
- ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
- ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
+ ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0);
+ ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0);
+ ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
+ ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
@@ -1096,16 +1102,16 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
int move = (old_dir != new_dir);
struct ubifs_inode *uninitialized_var(new_ui);
- ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
- ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
- ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
- ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
+ ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0);
+ ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0);
+ ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
+ ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
if (new_inode) {
new_ui = ubifs_inode(new_inode);
- ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
+ ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex));
ilen = UBIFS_INO_NODE_SZ;
if (!last_reference)
ilen += new_ui->data_len;
@@ -1282,8 +1288,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
int *new_len)
{
void *buf;
- int err, compr_type;
- u32 dlen, out_len, old_dlen;
+ int err, dlen, compr_type, out_len, old_dlen;
out_len = le32_to_cpu(dn->size);
buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
@@ -1319,7 +1324,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
dn->compr_size = 0;
}
- ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
+ ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE);
dn->compr_type = cpu_to_le16(compr_type);
dn->size = cpu_to_le32(*new_len);
*new_len = UBIFS_DATA_NODE_SZ + out_len;
@@ -1358,9 +1363,9 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
dbg_jnl("ino %lu, size %lld -> %lld",
(unsigned long)inum, old_size, new_size);
- ubifs_assert(!ui->data_len);
- ubifs_assert(S_ISREG(inode->i_mode));
- ubifs_assert(mutex_is_locked(&ui->ui_mutex));
+ ubifs_assert(c, !ui->data_len);
+ ubifs_assert(c, S_ISREG(inode->i_mode));
+ ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
@@ -1388,7 +1393,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
else if (err)
goto out_free;
else {
- if (le32_to_cpu(dn->size) <= dlen)
+ int dn_len = le32_to_cpu(dn->size);
+
+ if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
+ ubifs_err(c, "bad data node (block %u, inode %lu)",
+ blk, inode->i_ino);
+ ubifs_dump_node(c, dn);
+ goto out_free;
+ }
+
+ if (dn_len <= dlen)
dlen = 0; /* Nothing to do */
else {
err = truncate_data_node(c, inode, blk, dn, &dlen);
@@ -1488,8 +1502,8 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
int sync = IS_DIRSYNC(host);
struct ubifs_inode *host_ui = ubifs_inode(host);
- ubifs_assert(inode->i_nlink == 0);
- ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
+ ubifs_assert(c, inode->i_nlink == 0);
+ ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
/*
* Since we are deleting the inode, we do not bother to attach any data
@@ -1598,9 +1612,9 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
int sync = IS_DIRSYNC(host);
dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
- ubifs_assert(host->i_nlink > 0);
- ubifs_assert(inode->i_nlink > 0);
- ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
+ ubifs_assert(c, host->i_nlink > 0);
+ ubifs_assert(c, inode->i_nlink > 0);
+ ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h
index b1f7c0caa3ac..2feff6cbbb77 100644
--- a/fs/ubifs/key.h
+++ b/fs/ubifs/key.h
@@ -161,8 +161,8 @@ static inline void dent_key_init(const struct ubifs_info *c,
{
uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
- ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
- ubifs_assert(!nm->hash && !nm->minor_hash);
+ ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK));
+ ubifs_assert(c, !nm->hash && !nm->minor_hash);
key->u32[0] = inum;
key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS);
}
@@ -179,7 +179,7 @@ static inline void dent_key_init_hash(const struct ubifs_info *c,
union ubifs_key *key, ino_t inum,
uint32_t hash)
{
- ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK));
key->u32[0] = inum;
key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS);
}
@@ -198,7 +198,7 @@ static inline void dent_key_init_flash(const struct ubifs_info *c, void *k,
union ubifs_key *key = k;
uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
- ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK));
key->j32[0] = cpu_to_le32(inum);
key->j32[1] = cpu_to_le32(hash |
(UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS));
@@ -231,7 +231,7 @@ static inline void xent_key_init(const struct ubifs_info *c,
{
uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
- ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK));
key->u32[0] = inum;
key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS);
}
@@ -249,7 +249,7 @@ static inline void xent_key_init_flash(const struct ubifs_info *c, void *k,
union ubifs_key *key = k;
uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
- ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK));
key->j32[0] = cpu_to_le32(inum);
key->j32[1] = cpu_to_le32(hash |
(UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS));
@@ -280,7 +280,7 @@ static inline void data_key_init(const struct ubifs_info *c,
union ubifs_key *key, ino_t inum,
unsigned int block)
{
- ubifs_assert(!(block & ~UBIFS_S_KEY_BLOCK_MASK));
+ ubifs_assert(c, !(block & ~UBIFS_S_KEY_BLOCK_MASK));
key->u32[0] = inum;
key->u32[1] = block | (UBIFS_DATA_KEY << UBIFS_S_KEY_BLOCK_BITS);
}
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index 7cffa120a750..86b0828f5499 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -132,7 +132,7 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
while (*p) {
parent = *p;
b = rb_entry(parent, struct ubifs_bud, rb);
- ubifs_assert(bud->lnum != b->lnum);
+ ubifs_assert(c, bud->lnum != b->lnum);
if (bud->lnum < b->lnum)
p = &(*p)->rb_left;
else
@@ -145,7 +145,7 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
jhead = &c->jheads[bud->jhead];
list_add_tail(&bud->list, &jhead->buds_list);
} else
- ubifs_assert(c->replaying && c->ro_mount);
+ ubifs_assert(c, c->replaying && c->ro_mount);
/*
* Note, although this is a new bud, we anyway account this space now,
@@ -189,7 +189,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
}
mutex_lock(&c->log_mutex);
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error) {
err = -EROFS;
goto out_unlock;
@@ -244,7 +244,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
- ubifs_assert(c->lhead_lnum != c->ltail_lnum);
+ ubifs_assert(c, c->lhead_lnum != c->ltail_lnum);
c->lhead_offs = 0;
}
@@ -301,7 +301,7 @@ static void remove_buds(struct ubifs_info *c)
{
struct rb_node *p;
- ubifs_assert(list_empty(&c->old_buds));
+ ubifs_assert(c, list_empty(&c->old_buds));
c->cmt_bud_bytes = 0;
spin_lock(&c->buds_lock);
p = rb_first(&c->buds);
@@ -409,7 +409,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
/* Switch to the next log LEB */
if (c->lhead_offs) {
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
- ubifs_assert(c->lhead_lnum != c->ltail_lnum);
+ ubifs_assert(c, c->lhead_lnum != c->ltail_lnum);
c->lhead_offs = 0;
}
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index f5a46844340c..fa8d775c9753 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -187,9 +187,9 @@ static int add_to_lpt_heap(struct ubifs_info *c, struct ubifs_lprops *lprops,
/* Compare to some other LEB on the bottom of heap */
/* Pick a position kind of randomly */
cpos = (((size_t)lprops >> 4) & b) + b;
- ubifs_assert(cpos >= b);
- ubifs_assert(cpos < LPT_HEAP_SZ);
- ubifs_assert(cpos < heap->cnt);
+ ubifs_assert(c, cpos >= b);
+ ubifs_assert(c, cpos < LPT_HEAP_SZ);
+ ubifs_assert(c, cpos < heap->cnt);
val1 = get_heap_comp_val(lprops, cat);
val2 = get_heap_comp_val(heap->arr[cpos], cat);
@@ -230,8 +230,8 @@ static void remove_from_lpt_heap(struct ubifs_info *c,
int hpos = lprops->hpos;
heap = &c->lpt_heap[cat - 1];
- ubifs_assert(hpos >= 0 && hpos < heap->cnt);
- ubifs_assert(heap->arr[hpos] == lprops);
+ ubifs_assert(c, hpos >= 0 && hpos < heap->cnt);
+ ubifs_assert(c, heap->arr[hpos] == lprops);
heap->cnt -= 1;
if (hpos < heap->cnt) {
heap->arr[hpos] = heap->arr[heap->cnt];
@@ -296,13 +296,13 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
list_add(&lprops->list, &c->frdi_idx_list);
break;
default:
- ubifs_assert(0);
+ ubifs_assert(c, 0);
}
lprops->flags &= ~LPROPS_CAT_MASK;
lprops->flags |= cat;
c->in_a_category_cnt += 1;
- ubifs_assert(c->in_a_category_cnt <= c->main_lebs);
+ ubifs_assert(c, c->in_a_category_cnt <= c->main_lebs);
}
/**
@@ -324,20 +324,20 @@ static void ubifs_remove_from_cat(struct ubifs_info *c,
break;
case LPROPS_FREEABLE:
c->freeable_cnt -= 1;
- ubifs_assert(c->freeable_cnt >= 0);
+ ubifs_assert(c, c->freeable_cnt >= 0);
/* Fall through */
case LPROPS_UNCAT:
case LPROPS_EMPTY:
case LPROPS_FRDI_IDX:
- ubifs_assert(!list_empty(&lprops->list));
+ ubifs_assert(c, !list_empty(&lprops->list));
list_del(&lprops->list);
break;
default:
- ubifs_assert(0);
+ ubifs_assert(c, 0);
}
c->in_a_category_cnt -= 1;
- ubifs_assert(c->in_a_category_cnt >= 0);
+ ubifs_assert(c, c->in_a_category_cnt >= 0);
}
/**
@@ -369,7 +369,7 @@ void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops,
list_replace(&old_lprops->list, &new_lprops->list);
break;
default:
- ubifs_assert(0);
+ ubifs_assert(c, 0);
}
}
@@ -412,7 +412,7 @@ int ubifs_categorize_lprops(const struct ubifs_info *c,
return LPROPS_UNCAT;
if (lprops->free == c->leb_size) {
- ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
return LPROPS_EMPTY;
}
@@ -478,7 +478,7 @@ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops)
*/
int ubifs_calc_dark(const struct ubifs_info *c, int spc)
{
- ubifs_assert(!(spc & 7));
+ ubifs_assert(c, !(spc & 7));
if (spc < c->dark_wm)
return spc;
@@ -543,27 +543,27 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
dbg_lp("LEB %d, free %d, dirty %d, flags %d",
lprops->lnum, free, dirty, flags);
- ubifs_assert(mutex_is_locked(&c->lp_mutex));
- ubifs_assert(c->lst.empty_lebs >= 0 &&
+ ubifs_assert(c, mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c, c->lst.empty_lebs >= 0 &&
c->lst.empty_lebs <= c->main_lebs);
- ubifs_assert(c->freeable_cnt >= 0);
- ubifs_assert(c->freeable_cnt <= c->main_lebs);
- ubifs_assert(c->lst.taken_empty_lebs >= 0);
- ubifs_assert(c->lst.taken_empty_lebs <= c->lst.empty_lebs);
- ubifs_assert(!(c->lst.total_free & 7) && !(c->lst.total_dirty & 7));
- ubifs_assert(!(c->lst.total_dead & 7) && !(c->lst.total_dark & 7));
- ubifs_assert(!(c->lst.total_used & 7));
- ubifs_assert(free == LPROPS_NC || free >= 0);
- ubifs_assert(dirty == LPROPS_NC || dirty >= 0);
+ ubifs_assert(c, c->freeable_cnt >= 0);
+ ubifs_assert(c, c->freeable_cnt <= c->main_lebs);
+ ubifs_assert(c, c->lst.taken_empty_lebs >= 0);
+ ubifs_assert(c, c->lst.taken_empty_lebs <= c->lst.empty_lebs);
+ ubifs_assert(c, !(c->lst.total_free & 7) && !(c->lst.total_dirty & 7));
+ ubifs_assert(c, !(c->lst.total_dead & 7) && !(c->lst.total_dark & 7));
+ ubifs_assert(c, !(c->lst.total_used & 7));
+ ubifs_assert(c, free == LPROPS_NC || free >= 0);
+ ubifs_assert(c, dirty == LPROPS_NC || dirty >= 0);
if (!is_lprops_dirty(c, lprops)) {
lprops = ubifs_lpt_lookup_dirty(c, lprops->lnum);
if (IS_ERR(lprops))
return lprops;
} else
- ubifs_assert(lprops == ubifs_lpt_lookup_dirty(c, lprops->lnum));
+ ubifs_assert(c, lprops == ubifs_lpt_lookup_dirty(c, lprops->lnum));
- ubifs_assert(!(lprops->free & 7) && !(lprops->dirty & 7));
+ ubifs_assert(c, !(lprops->free & 7) && !(lprops->dirty & 7));
spin_lock(&c->space_lock);
if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size)
@@ -768,15 +768,15 @@ const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c)
struct ubifs_lprops *lprops;
struct ubifs_lpt_heap *heap;
- ubifs_assert(mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c, mutex_is_locked(&c->lp_mutex));
heap = &c->lpt_heap[LPROPS_FREE - 1];
if (heap->cnt == 0)
return NULL;
lprops = heap->arr[0];
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
return lprops;
}
@@ -791,15 +791,15 @@ const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c)
{
struct ubifs_lprops *lprops;
- ubifs_assert(mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c, mutex_is_locked(&c->lp_mutex));
if (list_empty(&c->empty_list))
return NULL;
lprops = list_entry(c->empty_list.next, struct ubifs_lprops, list);
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert(!(lprops->flags & LPROPS_INDEX));
- ubifs_assert(lprops->free == c->leb_size);
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, lprops->free == c->leb_size);
return lprops;
}
@@ -814,16 +814,16 @@ const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c)
{
struct ubifs_lprops *lprops;
- ubifs_assert(mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c, mutex_is_locked(&c->lp_mutex));
if (list_empty(&c->freeable_list))
return NULL;
lprops = list_entry(c->freeable_list.next, struct ubifs_lprops, list);
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert(!(lprops->flags & LPROPS_INDEX));
- ubifs_assert(lprops->free + lprops->dirty == c->leb_size);
- ubifs_assert(c->freeable_cnt > 0);
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, !(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size);
+ ubifs_assert(c, c->freeable_cnt > 0);
return lprops;
}
@@ -838,15 +838,15 @@ const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c)
{
struct ubifs_lprops *lprops;
- ubifs_assert(mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c, mutex_is_locked(&c->lp_mutex));
if (list_empty(&c->frdi_idx_list))
return NULL;
lprops = list_entry(c->frdi_idx_list.next, struct ubifs_lprops, list);
- ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
- ubifs_assert((lprops->flags & LPROPS_INDEX));
- ubifs_assert(lprops->free + lprops->dirty == c->leb_size);
+ ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(c, (lprops->flags & LPROPS_INDEX));
+ ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size);
return lprops;
}
@@ -1089,10 +1089,6 @@ static int scan_check_cb(struct ubifs_info *c,
}
}
- buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
- if (!buf)
- return -ENOMEM;
-
/*
* After an unclean unmount, empty and freeable LEBs
* may contain garbage - do not scan them.
@@ -1111,6 +1107,10 @@ static int scan_check_cb(struct ubifs_info *c,
return LPT_SCAN_CONTINUE;
}
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
sleb = ubifs_scan(c, lnum, 0, buf, 0);
if (IS_ERR(sleb)) {
ret = PTR_ERR(sleb);
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 8e99dad18880..31393370e334 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -225,21 +225,22 @@ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs,
/**
* pack_bits - pack bit fields end-to-end.
+ * @c: UBIFS file-system description object
* @addr: address at which to pack (passed and next address returned)
* @pos: bit position at which to pack (passed and next position returned)
* @val: value to pack
* @nrbits: number of bits of value to pack (1-32)
*/
-static void pack_bits(uint8_t **addr, int *pos, uint32_t val, int nrbits)
+static void pack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, uint32_t val, int nrbits)
{
uint8_t *p = *addr;
int b = *pos;
- ubifs_assert(nrbits > 0);
- ubifs_assert(nrbits <= 32);
- ubifs_assert(*pos >= 0);
- ubifs_assert(*pos < 8);
- ubifs_assert((val >> nrbits) == 0 || nrbits == 32);
+ ubifs_assert(c, nrbits > 0);
+ ubifs_assert(c, nrbits <= 32);
+ ubifs_assert(c, *pos >= 0);
+ ubifs_assert(c, *pos < 8);
+ ubifs_assert(c, (val >> nrbits) == 0 || nrbits == 32);
if (b) {
*p |= ((uint8_t)val) << b;
nrbits += b;
@@ -274,13 +275,14 @@ static void pack_bits(uint8_t **addr, int *pos, uint32_t val, int nrbits)
/**
* ubifs_unpack_bits - unpack bit fields.
+ * @c: UBIFS file-system description object
* @addr: address at which to unpack (passed and next address returned)
* @pos: bit position at which to unpack (passed and next position returned)
* @nrbits: number of bits of value to unpack (1-32)
*
* This functions returns the value unpacked.
*/
-uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits)
+uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, int nrbits)
{
const int k = 32 - nrbits;
uint8_t *p = *addr;
@@ -288,10 +290,10 @@ uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits)
uint32_t uninitialized_var(val);
const int bytes = (nrbits + b + 7) >> 3;
- ubifs_assert(nrbits > 0);
- ubifs_assert(nrbits <= 32);
- ubifs_assert(*pos >= 0);
- ubifs_assert(*pos < 8);
+ ubifs_assert(c, nrbits > 0);
+ ubifs_assert(c, nrbits <= 32);
+ ubifs_assert(c, *pos >= 0);
+ ubifs_assert(c, *pos < 8);
if (b) {
switch (bytes) {
case 2:
@@ -337,7 +339,7 @@ uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits)
p += nrbits >> 3;
*addr = p;
*pos = b;
- ubifs_assert((val >> nrbits) == 0 || nrbits - b == 32);
+ ubifs_assert(c, (val >> nrbits) == 0 || nrbits - b == 32);
return val;
}
@@ -354,24 +356,24 @@ void ubifs_pack_pnode(struct ubifs_info *c, void *buf,
int i, pos = 0;
uint16_t crc;
- pack_bits(&addr, &pos, UBIFS_LPT_PNODE, UBIFS_LPT_TYPE_BITS);
+ pack_bits(c, &addr, &pos, UBIFS_LPT_PNODE, UBIFS_LPT_TYPE_BITS);
if (c->big_lpt)
- pack_bits(&addr, &pos, pnode->num, c->pcnt_bits);
+ pack_bits(c, &addr, &pos, pnode->num, c->pcnt_bits);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
- pack_bits(&addr, &pos, pnode->lprops[i].free >> 3,
+ pack_bits(c, &addr, &pos, pnode->lprops[i].free >> 3,
c->space_bits);
- pack_bits(&addr, &pos, pnode->lprops[i].dirty >> 3,
+ pack_bits(c, &addr, &pos, pnode->lprops[i].dirty >> 3,
c->space_bits);
if (pnode->lprops[i].flags & LPROPS_INDEX)
- pack_bits(&addr, &pos, 1, 1);
+ pack_bits(c, &addr, &pos, 1, 1);
else
- pack_bits(&addr, &pos, 0, 1);
+ pack_bits(c, &addr, &pos, 0, 1);
}
crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
c->pnode_sz - UBIFS_LPT_CRC_BYTES);
addr = buf;
pos = 0;
- pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
+ pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS);
}
/**
@@ -387,23 +389,23 @@ void ubifs_pack_nnode(struct ubifs_info *c, void *buf,
int i, pos = 0;
uint16_t crc;
- pack_bits(&addr, &pos, UBIFS_LPT_NNODE, UBIFS_LPT_TYPE_BITS);
+ pack_bits(c, &addr, &pos, UBIFS_LPT_NNODE, UBIFS_LPT_TYPE_BITS);
if (c->big_lpt)
- pack_bits(&addr, &pos, nnode->num, c->pcnt_bits);
+ pack_bits(c, &addr, &pos, nnode->num, c->pcnt_bits);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
int lnum = nnode->nbranch[i].lnum;
if (lnum == 0)
lnum = c->lpt_last + 1;
- pack_bits(&addr, &pos, lnum - c->lpt_first, c->lpt_lnum_bits);
- pack_bits(&addr, &pos, nnode->nbranch[i].offs,
+ pack_bits(c, &addr, &pos, lnum - c->lpt_first, c->lpt_lnum_bits);
+ pack_bits(c, &addr, &pos, nnode->nbranch[i].offs,
c->lpt_offs_bits);
}
crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
c->nnode_sz - UBIFS_LPT_CRC_BYTES);
addr = buf;
pos = 0;
- pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
+ pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS);
}
/**
@@ -419,16 +421,16 @@ void ubifs_pack_ltab(struct ubifs_info *c, void *buf,
int i, pos = 0;
uint16_t crc;
- pack_bits(&addr, &pos, UBIFS_LPT_LTAB, UBIFS_LPT_TYPE_BITS);
+ pack_bits(c, &addr, &pos, UBIFS_LPT_LTAB, UBIFS_LPT_TYPE_BITS);
for (i = 0; i < c->lpt_lebs; i++) {
- pack_bits(&addr, &pos, ltab[i].free, c->lpt_spc_bits);
- pack_bits(&addr, &pos, ltab[i].dirty, c->lpt_spc_bits);
+ pack_bits(c, &addr, &pos, ltab[i].free, c->lpt_spc_bits);
+ pack_bits(c, &addr, &pos, ltab[i].dirty, c->lpt_spc_bits);
}
crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
c->ltab_sz - UBIFS_LPT_CRC_BYTES);
addr = buf;
pos = 0;
- pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
+ pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS);
}
/**
@@ -443,14 +445,14 @@ void ubifs_pack_lsave(struct ubifs_info *c, void *buf, int *lsave)
int i, pos = 0;
uint16_t crc;
- pack_bits(&addr, &pos, UBIFS_LPT_LSAVE, UBIFS_LPT_TYPE_BITS);
+ pack_bits(c, &addr, &pos, UBIFS_LPT_LSAVE, UBIFS_LPT_TYPE_BITS);
for (i = 0; i < c->lsave_cnt; i++)
- pack_bits(&addr, &pos, lsave[i], c->lnum_bits);
+ pack_bits(c, &addr, &pos, lsave[i], c->lnum_bits);
crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
c->lsave_sz - UBIFS_LPT_CRC_BYTES);
addr = buf;
pos = 0;
- pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
+ pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS);
}
/**
@@ -465,7 +467,7 @@ void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty)
return;
dbg_lp("LEB %d add %d to %d",
lnum, dirty, c->ltab[lnum - c->lpt_first].dirty);
- ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
+ ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last);
c->ltab[lnum - c->lpt_first].dirty += dirty;
}
@@ -481,7 +483,7 @@ static void set_ltab(struct ubifs_info *c, int lnum, int free, int dirty)
dbg_lp("LEB %d free %d dirty %d to %d %d",
lnum, c->ltab[lnum - c->lpt_first].free,
c->ltab[lnum - c->lpt_first].dirty, free, dirty);
- ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
+ ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last);
c->ltab[lnum - c->lpt_first].free = free;
c->ltab[lnum - c->lpt_first].dirty = dirty;
}
@@ -639,7 +641,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
goto out;
}
- ubifs_assert(!c->ltab);
+ ubifs_assert(c, !c->ltab);
c->ltab = ltab; /* Needed by set_ltab */
/* Initialize LPT's own lprops */
@@ -918,7 +920,7 @@ static int check_lpt_crc(const struct ubifs_info *c, void *buf, int len)
uint8_t *addr = buf;
uint16_t crc, calc_crc;
- crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS);
+ crc = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_CRC_BITS);
calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
len - UBIFS_LPT_CRC_BYTES);
if (crc != calc_crc) {
@@ -944,7 +946,7 @@ static int check_lpt_type(const struct ubifs_info *c, uint8_t **addr,
{
int node_type;
- node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS);
+ node_type = ubifs_unpack_bits(c, addr, pos, UBIFS_LPT_TYPE_BITS);
if (node_type != type) {
ubifs_err(c, "invalid type (%d) in LPT node type %d",
node_type, type);
@@ -972,16 +974,16 @@ static int unpack_pnode(const struct ubifs_info *c, void *buf,
if (err)
return err;
if (c->big_lpt)
- pnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
+ pnode->num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
struct ubifs_lprops * const lprops = &pnode->lprops[i];
- lprops->free = ubifs_unpack_bits(&addr, &pos, c->space_bits);
+ lprops->free = ubifs_unpack_bits(c, &addr, &pos, c->space_bits);
lprops->free <<= 3;
- lprops->dirty = ubifs_unpack_bits(&addr, &pos, c->space_bits);
+ lprops->dirty = ubifs_unpack_bits(c, &addr, &pos, c->space_bits);
lprops->dirty <<= 3;
- if (ubifs_unpack_bits(&addr, &pos, 1))
+ if (ubifs_unpack_bits(c, &addr, &pos, 1))
lprops->flags = LPROPS_INDEX;
else
lprops->flags = 0;
@@ -1009,16 +1011,16 @@ int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
if (err)
return err;
if (c->big_lpt)
- nnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
+ nnode->num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits);
for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
int lnum;
- lnum = ubifs_unpack_bits(&addr, &pos, c->lpt_lnum_bits) +
+ lnum = ubifs_unpack_bits(c, &addr, &pos, c->lpt_lnum_bits) +
c->lpt_first;
if (lnum == c->lpt_last + 1)
lnum = 0;
nnode->nbranch[i].lnum = lnum;
- nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos,
+ nnode->nbranch[i].offs = ubifs_unpack_bits(c, &addr, &pos,
c->lpt_offs_bits);
}
err = check_lpt_crc(c, buf, c->nnode_sz);
@@ -1041,8 +1043,8 @@ static int unpack_ltab(const struct ubifs_info *c, void *buf)
if (err)
return err;
for (i = 0; i < c->lpt_lebs; i++) {
- int free = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits);
- int dirty = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits);
+ int free = ubifs_unpack_bits(c, &addr, &pos, c->lpt_spc_bits);
+ int dirty = ubifs_unpack_bits(c, &addr, &pos, c->lpt_spc_bits);
if (free < 0 || free > c->leb_size || dirty < 0 ||
dirty > c->leb_size || free + dirty > c->leb_size)
@@ -1073,7 +1075,7 @@ static int unpack_lsave(const struct ubifs_info *c, void *buf)
if (err)
return err;
for (i = 0; i < c->lsave_cnt; i++) {
- int lnum = ubifs_unpack_bits(&addr, &pos, c->lnum_bits);
+ int lnum = ubifs_unpack_bits(c, &addr, &pos, c->lnum_bits);
if (lnum < c->main_first || lnum >= c->leb_cnt)
return -EINVAL;
@@ -1515,7 +1517,7 @@ static struct ubifs_nnode *dirty_cow_nnode(struct ubifs_info *c,
branch->cnode->parent = n;
}
- ubifs_assert(!test_bit(OBSOLETE_CNODE, &nnode->flags));
+ ubifs_assert(c, !test_bit(OBSOLETE_CNODE, &nnode->flags));
__set_bit(OBSOLETE_CNODE, &nnode->flags);
c->dirty_nn_cnt += 1;
@@ -1558,7 +1560,7 @@ static struct ubifs_pnode *dirty_cow_pnode(struct ubifs_info *c,
__clear_bit(COW_CNODE, &p->flags);
replace_cats(c, pnode, p);
- ubifs_assert(!test_bit(OBSOLETE_CNODE, &pnode->flags));
+ ubifs_assert(c, !test_bit(OBSOLETE_CNODE, &pnode->flags));
__set_bit(OBSOLETE_CNODE, &pnode->flags);
c->dirty_pn_cnt += 1;
@@ -1613,7 +1615,7 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum)
dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
pnode->lprops[iip].free, pnode->lprops[iip].dirty,
pnode->lprops[iip].flags);
- ubifs_assert(test_bit(DIRTY_CNODE, &pnode->flags));
+ ubifs_assert(c, test_bit(DIRTY_CNODE, &pnode->flags));
return &pnode->lprops[iip];
}
@@ -1889,9 +1891,9 @@ static struct ubifs_pnode *scan_get_pnode(struct ubifs_info *c,
lprops->flags = ubifs_categorize_lprops(c, lprops);
}
} else {
- ubifs_assert(branch->lnum >= c->lpt_first &&
+ ubifs_assert(c, branch->lnum >= c->lpt_first &&
branch->lnum <= c->lpt_last);
- ubifs_assert(branch->offs >= 0 && branch->offs < c->leb_size);
+ ubifs_assert(c, branch->offs >= 0 && branch->offs < c->leb_size);
err = ubifs_leb_read(c, branch->lnum, buf, branch->offs,
c->pnode_sz, 1);
if (err)
@@ -1935,8 +1937,8 @@ int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum,
start_lnum = c->main_first;
}
- ubifs_assert(start_lnum >= c->main_first && start_lnum < c->leb_cnt);
- ubifs_assert(end_lnum >= c->main_first && end_lnum < c->leb_cnt);
+ ubifs_assert(c, start_lnum >= c->main_first && start_lnum < c->leb_cnt);
+ ubifs_assert(c, end_lnum >= c->main_first && end_lnum < c->leb_cnt);
if (!c->nroot) {
err = ubifs_read_nnode(c, NULL, 0);
@@ -2055,7 +2057,7 @@ again:
iip = pnode->iip;
while (1) {
h -= 1;
- ubifs_assert(h >= 0);
+ ubifs_assert(c, h >= 0);
nnode = path[h].ptr.nnode;
if (iip + 1 < UBIFS_LPT_FANOUT)
break;
@@ -2234,7 +2236,7 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
return 0;
while (cnode) {
- ubifs_assert(row >= 0);
+ ubifs_assert(c, row >= 0);
nnode = cnode->parent;
if (cnode->level) {
/* cnode is a nnode */
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 78da65b2fb85..7ce30994bbba 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -34,13 +34,14 @@ static int dbg_populate_lsave(struct ubifs_info *c);
/**
* first_dirty_cnode - find first dirty cnode.
+ * @c: UBIFS file-system description object
* @nnode: nnode at which to start
*
* This function returns the first dirty cnode or %NULL if there is not one.
*/
-static struct ubifs_cnode *first_dirty_cnode(struct ubifs_nnode *nnode)
+static struct ubifs_cnode *first_dirty_cnode(const struct ubifs_info *c, struct ubifs_nnode *nnode)
{
- ubifs_assert(nnode);
+ ubifs_assert(c, nnode);
while (1) {
int i, cont = 0;
@@ -64,16 +65,17 @@ static struct ubifs_cnode *first_dirty_cnode(struct ubifs_nnode *nnode)
/**
* next_dirty_cnode - find next dirty cnode.
+ * @c: UBIFS file-system description object
* @cnode: cnode from which to begin searching
*
* This function returns the next dirty cnode or %NULL if there is not one.
*/
-static struct ubifs_cnode *next_dirty_cnode(struct ubifs_cnode *cnode)
+static struct ubifs_cnode *next_dirty_cnode(const struct ubifs_info *c, struct ubifs_cnode *cnode)
{
struct ubifs_nnode *nnode;
int i;
- ubifs_assert(cnode);
+ ubifs_assert(c, cnode);
nnode = cnode->parent;
if (!nnode)
return NULL;
@@ -83,7 +85,7 @@ static struct ubifs_cnode *next_dirty_cnode(struct ubifs_cnode *cnode)
if (cnode->level == 0)
return cnode; /* cnode is a pnode */
/* cnode is a nnode */
- return first_dirty_cnode((struct ubifs_nnode *)cnode);
+ return first_dirty_cnode(c, (struct ubifs_nnode *)cnode);
}
}
return (struct ubifs_cnode *)nnode;
@@ -106,15 +108,15 @@ static int get_cnodes_to_commit(struct ubifs_info *c)
if (!test_bit(DIRTY_CNODE, &c->nroot->flags))
return 0;
- c->lpt_cnext = first_dirty_cnode(c->nroot);
+ c->lpt_cnext = first_dirty_cnode(c, c->nroot);
cnode = c->lpt_cnext;
if (!cnode)
return 0;
cnt += 1;
while (1) {
- ubifs_assert(!test_bit(COW_CNODE, &cnode->flags));
+ ubifs_assert(c, !test_bit(COW_CNODE, &cnode->flags));
__set_bit(COW_CNODE, &cnode->flags);
- cnext = next_dirty_cnode(cnode);
+ cnext = next_dirty_cnode(c, cnode);
if (!cnext) {
cnode->cnext = c->lpt_cnext;
break;
@@ -125,7 +127,7 @@ static int get_cnodes_to_commit(struct ubifs_info *c)
}
dbg_cmt("committing %d cnodes", cnt);
dbg_lp("committing %d cnodes", cnt);
- ubifs_assert(cnt == c->dirty_nn_cnt + c->dirty_pn_cnt);
+ ubifs_assert(c, cnt == c->dirty_nn_cnt + c->dirty_pn_cnt);
return cnt;
}
@@ -141,7 +143,7 @@ static void upd_ltab(struct ubifs_info *c, int lnum, int free, int dirty)
dbg_lp("LEB %d free %d dirty %d to %d +%d",
lnum, c->ltab[lnum - c->lpt_first].free,
c->ltab[lnum - c->lpt_first].dirty, free, dirty);
- ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
+ ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last);
c->ltab[lnum - c->lpt_first].free = free;
c->ltab[lnum - c->lpt_first].dirty += dirty;
}
@@ -237,7 +239,7 @@ static int layout_cnodes(struct ubifs_info *c)
if (err)
goto no_space;
offs = 0;
- ubifs_assert(lnum >= c->lpt_first &&
+ ubifs_assert(c, lnum >= c->lpt_first &&
lnum <= c->lpt_last);
/* Try to place lsave and ltab nicely */
if (!done_lsave) {
@@ -280,7 +282,7 @@ static int layout_cnodes(struct ubifs_info *c)
if (err)
goto no_space;
offs = 0;
- ubifs_assert(lnum >= c->lpt_first &&
+ ubifs_assert(c, lnum >= c->lpt_first &&
lnum <= c->lpt_last);
}
done_lsave = 1;
@@ -300,7 +302,7 @@ static int layout_cnodes(struct ubifs_info *c)
if (err)
goto no_space;
offs = 0;
- ubifs_assert(lnum >= c->lpt_first &&
+ ubifs_assert(c, lnum >= c->lpt_first &&
lnum <= c->lpt_last);
}
c->ltab_lnum = lnum;
@@ -423,7 +425,7 @@ static int write_cnodes(struct ubifs_info *c)
if (err)
goto no_space;
offs = from = 0;
- ubifs_assert(lnum >= c->lpt_first &&
+ ubifs_assert(c, lnum >= c->lpt_first &&
lnum <= c->lpt_last);
err = ubifs_leb_unmap(c, lnum);
if (err)
@@ -480,7 +482,7 @@ static int write_cnodes(struct ubifs_info *c)
if (err)
goto no_space;
offs = from = 0;
- ubifs_assert(lnum >= c->lpt_first &&
+ ubifs_assert(c, lnum >= c->lpt_first &&
lnum <= c->lpt_last);
err = ubifs_leb_unmap(c, lnum);
if (err)
@@ -506,7 +508,7 @@ static int write_cnodes(struct ubifs_info *c)
if (err)
goto no_space;
offs = from = 0;
- ubifs_assert(lnum >= c->lpt_first &&
+ ubifs_assert(c, lnum >= c->lpt_first &&
lnum <= c->lpt_last);
err = ubifs_leb_unmap(c, lnum);
if (err)
@@ -806,7 +808,7 @@ static void populate_lsave(struct ubifs_info *c)
struct ubifs_lpt_heap *heap;
int i, cnt = 0;
- ubifs_assert(c->big_lpt);
+ ubifs_assert(c, c->big_lpt);
if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) {
c->lpt_drty_flgs |= LSAVE_DIRTY;
ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz);
@@ -1095,8 +1097,8 @@ static int get_lpt_node_type(const struct ubifs_info *c, uint8_t *buf,
uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
int pos = 0, node_type;
- node_type = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_TYPE_BITS);
- *node_num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
+ node_type = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_TYPE_BITS);
+ *node_num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits);
return node_type;
}
@@ -1116,7 +1118,7 @@ static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len)
if (len < UBIFS_LPT_CRC_BYTES + (UBIFS_LPT_TYPE_BITS + 7) / 8)
return 0;
- node_type = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_TYPE_BITS);
+ node_type = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_TYPE_BITS);
if (node_type == UBIFS_LPT_NOT_A_NODE)
return 0;
node_len = get_lpt_node_len(c, node_type);
@@ -1124,7 +1126,7 @@ static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len)
return 0;
pos = 0;
addr = buf;
- crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS);
+ crc = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_CRC_BITS);
calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
node_len - UBIFS_LPT_CRC_BYTES);
if (crc != calc_crc)
@@ -1170,7 +1172,7 @@ static int lpt_gc_lnum(struct ubifs_info *c, int lnum)
node_type = get_lpt_node_type(c, buf, &node_num);
node_len = get_lpt_node_len(c, node_type);
offs = c->leb_size - len;
- ubifs_assert(node_len != 0);
+ ubifs_assert(c, node_len != 0);
mutex_lock(&c->lp_mutex);
err = make_node_dirty(c, node_type, node_num, lnum, offs);
mutex_unlock(&c->lp_mutex);
@@ -1195,7 +1197,7 @@ static int lpt_gc(struct ubifs_info *c)
mutex_lock(&c->lp_mutex);
for (i = 0; i < c->lpt_lebs; i++) {
- ubifs_assert(!c->ltab[i].tgc);
+ ubifs_assert(c, !c->ltab[i].tgc);
if (i + c->lpt_first == c->nhead_lnum ||
c->ltab[i].free + c->ltab[i].dirty == c->leb_size)
continue;
@@ -1271,7 +1273,7 @@ int ubifs_lpt_start_commit(struct ubifs_info *c)
populate_lsave(c);
cnt = get_cnodes_to_commit(c);
- ubifs_assert(cnt != 0);
+ ubifs_assert(c, cnt != 0);
err = layout_cnodes(c);
if (err)
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index c6a5e39e2ba5..9df4a41bba52 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -360,7 +360,7 @@ int ubifs_write_master(struct ubifs_info *c)
{
int err, lnum, offs, len;
- ubifs_assert(!c->ro_media && !c->ro_mount);
+ ubifs_assert(c, !c->ro_media && !c->ro_mount);
if (c->ro_error)
return -EROFS;
diff --git a/fs/ubifs/misc.c b/fs/ubifs/misc.c
index 586fd5b578a7..cd23de0f211d 100644
--- a/fs/ubifs/misc.c
+++ b/fs/ubifs/misc.c
@@ -56,3 +56,14 @@ void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...)
va_end(args);
}
+
+static char *assert_names[] = {
+ [ASSACT_REPORT] = "report",
+ [ASSACT_RO] = "read-only",
+ [ASSACT_PANIC] = "panic",
+};
+
+const char *ubifs_assert_action_name(struct ubifs_info *c)
+{
+ return assert_names[c->assert_action];
+}
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index caf83d68fb38..21d35d7dd975 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -105,25 +105,27 @@ static inline struct ubifs_inode *ubifs_inode(const struct inode *inode)
/**
* ubifs_compr_present - check if compressor was compiled in.
* @compr_type: compressor type to check
+ * @c: the UBIFS file-system description object
*
* This function returns %1 of compressor of type @compr_type is present, and
* %0 if not.
*/
-static inline int ubifs_compr_present(int compr_type)
+static inline int ubifs_compr_present(struct ubifs_info *c, int compr_type)
{
- ubifs_assert(compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT);
+ ubifs_assert(c, compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT);
return !!ubifs_compressors[compr_type]->capi_name;
}
/**
* ubifs_compr_name - get compressor name string by its type.
* @compr_type: compressor type
+ * @c: the UBIFS file-system description object
*
* This function returns compressor type string.
*/
-static inline const char *ubifs_compr_name(int compr_type)
+static inline const char *ubifs_compr_name(struct ubifs_info *c, int compr_type)
{
- ubifs_assert(compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT);
+ ubifs_assert(c, compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT);
return ubifs_compressors[compr_type]->name;
}
@@ -262,8 +264,8 @@ static inline void ubifs_get_lprops(struct ubifs_info *c)
*/
static inline void ubifs_release_lprops(struct ubifs_info *c)
{
- ubifs_assert(mutex_is_locked(&c->lp_mutex));
- ubifs_assert(c->lst.empty_lebs >= 0 &&
+ ubifs_assert(c, mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c, c->lst.empty_lebs >= 0 &&
c->lst.empty_lebs <= c->main_lebs);
mutex_unlock(&c->lp_mutex);
}
@@ -285,4 +287,6 @@ static inline int ubifs_next_log_lnum(const struct ubifs_info *c, int lnum)
return lnum;
}
+const char *ubifs_assert_action_name(struct ubifs_info *c);
+
#endif /* __UBIFS_MISC_H__ */
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index caf2d123e9ee..8f70494efb0c 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -172,8 +172,8 @@ int ubifs_orphan_start_commit(struct ubifs_info *c)
spin_lock(&c->orphan_lock);
last = &c->orph_cnext;
list_for_each_entry(orphan, &c->orph_new, new_list) {
- ubifs_assert(orphan->new);
- ubifs_assert(!orphan->cmt);
+ ubifs_assert(c, orphan->new);
+ ubifs_assert(c, !orphan->cmt);
orphan->new = 0;
orphan->cmt = 1;
*last = orphan;
@@ -244,7 +244,7 @@ static int do_write_orph_node(struct ubifs_info *c, int len, int atomic)
int err = 0;
if (atomic) {
- ubifs_assert(c->ohead_offs == 0);
+ ubifs_assert(c, c->ohead_offs == 0);
ubifs_prepare_node(c, c->orph_buf, len, 1);
len = ALIGN(len, c->min_io_size);
err = ubifs_leb_change(c, c->ohead_lnum, c->orph_buf, len);
@@ -276,7 +276,7 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
struct ubifs_orph_node *orph;
int gap, err, len, cnt, i;
- ubifs_assert(c->cmt_orphans > 0);
+ ubifs_assert(c, c->cmt_orphans > 0);
gap = c->leb_size - c->ohead_offs;
if (gap < UBIFS_ORPH_NODE_SZ + sizeof(__le64)) {
c->ohead_lnum += 1;
@@ -295,14 +295,14 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
if (cnt > c->cmt_orphans)
cnt = c->cmt_orphans;
len = UBIFS_ORPH_NODE_SZ + cnt * sizeof(__le64);
- ubifs_assert(c->orph_buf);
+ ubifs_assert(c, c->orph_buf);
orph = c->orph_buf;
orph->ch.node_type = UBIFS_ORPH_NODE;
spin_lock(&c->orphan_lock);
cnext = c->orph_cnext;
for (i = 0; i < cnt; i++) {
orphan = cnext;
- ubifs_assert(orphan->cmt);
+ ubifs_assert(c, orphan->cmt);
orph->inos[i] = cpu_to_le64(orphan->inum);
orphan->cmt = 0;
cnext = orphan->cnext;
@@ -316,9 +316,9 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
else
/* Mark the last node of the commit */
orph->cmt_no = cpu_to_le64((c->cmt_no) | (1ULL << 63));
- ubifs_assert(c->ohead_offs + len <= c->leb_size);
- ubifs_assert(c->ohead_lnum >= c->orph_first);
- ubifs_assert(c->ohead_lnum <= c->orph_last);
+ ubifs_assert(c, c->ohead_offs + len <= c->leb_size);
+ ubifs_assert(c, c->ohead_lnum >= c->orph_first);
+ ubifs_assert(c, c->ohead_lnum <= c->orph_last);
err = do_write_orph_node(c, len, atomic);
c->ohead_offs += ALIGN(len, c->min_io_size);
c->ohead_offs = ALIGN(c->ohead_offs, 8);
@@ -388,7 +388,7 @@ static int consolidate(struct ubifs_info *c)
cnt += 1;
}
*last = NULL;
- ubifs_assert(cnt == c->tot_orphans - c->new_orphans);
+ ubifs_assert(c, cnt == c->tot_orphans - c->new_orphans);
c->cmt_orphans = cnt;
c->ohead_lnum = c->orph_first;
c->ohead_offs = 0;
@@ -415,7 +415,7 @@ static int commit_orphans(struct ubifs_info *c)
{
int avail, atomic = 0, err;
- ubifs_assert(c->cmt_orphans > 0);
+ ubifs_assert(c, c->cmt_orphans > 0);
avail = avail_orphs(c);
if (avail < c->cmt_orphans) {
/* Not enough space to write new orphans, so consolidate */
@@ -446,8 +446,8 @@ static void erase_deleted(struct ubifs_info *c)
while (dnext) {
orphan = dnext;
dnext = orphan->dnext;
- ubifs_assert(!orphan->new);
- ubifs_assert(orphan->del);
+ ubifs_assert(c, !orphan->new);
+ ubifs_assert(c, orphan->del);
rb_erase(&orphan->rb, &c->orph_tree);
list_del(&orphan->list);
c->tot_orphans -= 1;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 3af4472061cc..984e30e83c0b 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -444,7 +444,7 @@ static void clean_buf(const struct ubifs_info *c, void **buf, int lnum,
dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs);
- ubifs_assert(!(*offs & 7));
+ ubifs_assert(c, !(*offs & 7));
empty_offs = ALIGN(*offs, c->min_io_size);
pad_len = empty_offs - *offs;
ubifs_pad(c, *buf, pad_len);
@@ -644,7 +644,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
if (IS_ERR(sleb))
return sleb;
- ubifs_assert(len >= 8);
+ ubifs_assert(c, len >= 8);
while (len >= 8) {
dbg_scan("look at LEB %d:%d (%d bytes left)",
lnum, offs, len);
@@ -966,7 +966,7 @@ int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf)
{
int err;
- ubifs_assert(!c->ro_mount || c->remounting_rw);
+ ubifs_assert(c, !c->ro_mount || c->remounting_rw);
dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
@@ -1187,8 +1187,8 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
return grab_empty_leb(c);
}
- ubifs_assert(!(lp.flags & LPROPS_INDEX));
- ubifs_assert(lp.free + lp.dirty >= wbuf->offs);
+ ubifs_assert(c, !(lp.flags & LPROPS_INDEX));
+ ubifs_assert(c, lp.free + lp.dirty >= wbuf->offs);
/*
* We run the commit before garbage collection otherwise subsequent
@@ -1216,7 +1216,7 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
return err;
}
- ubifs_assert(err == LEB_RETAINED);
+ ubifs_assert(c, err == LEB_RETAINED);
if (err != LEB_RETAINED)
return -EINVAL;
@@ -1507,7 +1507,7 @@ int ubifs_recover_size(struct ubifs_info *c)
struct inode *inode;
struct ubifs_inode *ui;
- ubifs_assert(!e->inode);
+ ubifs_assert(c, !e->inode);
inode = ubifs_iget(c->vfs_sb, e->inum);
if (IS_ERR(inode))
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 85c2a43082b7..4844538eb926 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -273,6 +273,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
static int replay_entries_cmp(void *priv, struct list_head *a,
struct list_head *b)
{
+ struct ubifs_info *c = priv;
struct replay_entry *ra, *rb;
cond_resched();
@@ -281,7 +282,7 @@ static int replay_entries_cmp(void *priv, struct list_head *a,
ra = list_entry(a, struct replay_entry, list);
rb = list_entry(b, struct replay_entry, list);
- ubifs_assert(ra->sqnum != rb->sqnum);
+ ubifs_assert(c, ra->sqnum != rb->sqnum);
if (ra->sqnum > rb->sqnum)
return 1;
return -1;
@@ -668,9 +669,9 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
goto out;
}
- ubifs_assert(ubifs_search_bud(c, lnum));
- ubifs_assert(sleb->endpt - offs >= used);
- ubifs_assert(sleb->endpt % c->min_io_size == 0);
+ ubifs_assert(c, ubifs_search_bud(c, lnum));
+ ubifs_assert(c, sleb->endpt - offs >= used);
+ ubifs_assert(c, sleb->endpt % c->min_io_size == 0);
b->dirty = sleb->endpt - offs - used;
b->free = c->leb_size - sleb->endpt;
@@ -706,7 +707,7 @@ static int replay_buds(struct ubifs_info *c)
if (err)
return err;
- ubifs_assert(b->sqnum > prev_sqnum);
+ ubifs_assert(c, b->sqnum > prev_sqnum);
prev_sqnum = b->sqnum;
}
@@ -1067,7 +1068,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
c->bi.uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt);
c->bi.uncommitted_idx *= c->max_idx_node_sz;
- ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
+ ubifs_assert(c, c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, highest_inum %lu",
c->lhead_lnum, c->lhead_offs, c->max_sqnum,
(unsigned long)c->highest_inum);
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 8c25081a5109..bf17f58908ff 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -85,7 +85,7 @@ static int create_default_filesystem(struct ubifs_info *c)
long long tmp64, main_bytes;
__le64 tmp_le64;
__le32 tmp_le32;
- struct timespec ts;
+ struct timespec64 ts;
/* Some functions called from here depend on the @c->key_len filed */
c->key_len = UBIFS_SK_LEN;
@@ -301,8 +301,8 @@ static int create_default_filesystem(struct ubifs_info *c)
ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
ino->nlink = cpu_to_le32(2);
- ktime_get_real_ts(&ts);
- ts = timespec_trunc(ts, DEFAULT_TIME_GRAN);
+ ktime_get_real_ts64(&ts);
+ ts = timespec64_trunc(ts, DEFAULT_TIME_GRAN);
tmp_le64 = cpu_to_le64(ts.tv_sec);
ino->atime_sec = tmp_le64;
ino->ctime_sec = tmp_le64;
@@ -563,7 +563,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
* due to the unavailability of time-travelling equipment.
*/
if (c->fmt_version > UBIFS_FORMAT_VERSION) {
- ubifs_assert(!c->ro_media || c->ro_mount);
+ ubifs_assert(c, !c->ro_media || c->ro_mount);
if (!c->ro_mount ||
c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
ubifs_err(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
@@ -705,9 +705,9 @@ static int fixup_leb(struct ubifs_info *c, int lnum, int len)
{
int err;
- ubifs_assert(len >= 0);
- ubifs_assert(len % c->min_io_size == 0);
- ubifs_assert(len < c->leb_size);
+ ubifs_assert(c, len >= 0);
+ ubifs_assert(c, len % c->min_io_size == 0);
+ ubifs_assert(c, len < c->leb_size);
if (len == 0) {
dbg_mnt("unmap empty LEB %d", lnum);
@@ -817,8 +817,8 @@ int ubifs_fixup_free_space(struct ubifs_info *c)
int err;
struct ubifs_sb_node *sup;
- ubifs_assert(c->space_fixup);
- ubifs_assert(!c->ro_mount);
+ ubifs_assert(c, c->space_fixup);
+ ubifs_assert(c, !c->ro_mount);
ubifs_msg(c, "start fixing up free space");
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index 16f03d9929e5..ea88926163f4 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -176,7 +176,7 @@ void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
int lnum, int offs)
{
dbg_scan("stop scanning LEB %d at offset %d", lnum, offs);
- ubifs_assert(offs % c->min_io_size == 0);
+ ubifs_assert(c, offs % c->min_io_size == 0);
sleb->endpt = ALIGN(offs, c->min_io_size);
}
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 9a9fb94a41c6..5eb5958723d4 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -71,10 +71,10 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
{
int total_freed = 0;
struct ubifs_znode *znode, *zprev;
- int time = get_seconds();
+ time64_t time = ktime_get_seconds();
- ubifs_assert(mutex_is_locked(&c->umount_mutex));
- ubifs_assert(mutex_is_locked(&c->tnc_mutex));
+ ubifs_assert(c, mutex_is_locked(&c->umount_mutex));
+ ubifs_assert(c, mutex_is_locked(&c->tnc_mutex));
if (!c->zroot.znode || atomic_long_read(&c->clean_zn_cnt) == 0)
return 0;
@@ -89,7 +89,7 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
* changed only when the 'c->tnc_mutex' is held.
*/
zprev = NULL;
- znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
+ znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL);
while (znode && total_freed < nr &&
atomic_long_read(&c->clean_zn_cnt) > 0) {
int freed;
@@ -125,7 +125,7 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
else
c->zroot.znode = NULL;
- freed = ubifs_destroy_tnc_subtree(znode);
+ freed = ubifs_destroy_tnc_subtree(c, znode);
atomic_long_sub(freed, &ubifs_clean_zn_cnt);
atomic_long_sub(freed, &c->clean_zn_cnt);
total_freed += freed;
@@ -136,7 +136,7 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention)
break;
zprev = znode;
- znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
+ znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode);
cond_resched();
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index c5466c70d620..23e7042666a7 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -89,9 +89,9 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
if (ui->xattr && !S_ISREG(inode->i_mode))
return 5;
- if (!ubifs_compr_present(ui->compr_type)) {
+ if (!ubifs_compr_present(c, ui->compr_type)) {
ubifs_warn(c, "inode %lu uses '%s' compression, but it was not compiled in",
- inode->i_ino, ubifs_compr_name(ui->compr_type));
+ inode->i_ino, ubifs_compr_name(c, ui->compr_type));
}
err = dbg_check_dir(c, inode);
@@ -296,7 +296,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
- ubifs_assert(!ui->xattr);
+ ubifs_assert(c, !ui->xattr);
if (is_bad_inode(inode))
return 0;
@@ -349,7 +349,7 @@ static void ubifs_evict_inode(struct inode *inode)
goto out;
dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode);
- ubifs_assert(!atomic_read(&inode->i_count));
+ ubifs_assert(c, !atomic_read(&inode->i_count));
truncate_inode_pages_final(&inode->i_data);
@@ -384,9 +384,10 @@ done:
static void ubifs_dirty_inode(struct inode *inode, int flags)
{
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
- ubifs_assert(mutex_is_locked(&ui->ui_mutex));
+ ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
if (!ui->dirty) {
ui->dirty = 1;
dbg_gen("inode %lu", inode->i_ino);
@@ -416,7 +417,7 @@ static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = UBIFS_MAX_NLEN;
buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]);
buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]);
- ubifs_assert(buf->f_bfree <= c->block_cnt);
+ ubifs_assert(c, buf->f_bfree <= c->block_cnt);
return 0;
}
@@ -441,9 +442,10 @@ static int ubifs_show_options(struct seq_file *s, struct dentry *root)
if (c->mount_opts.override_compr) {
seq_printf(s, ",compr=%s",
- ubifs_compr_name(c->mount_opts.compr_type));
+ ubifs_compr_name(c, c->mount_opts.compr_type));
}
+ seq_printf(s, ",assert=%s", ubifs_assert_action_name(c));
seq_printf(s, ",ubi=%d,vol=%d", c->vi.ubi_num, c->vi.vol_id);
return 0;
@@ -921,6 +923,7 @@ static int check_volume_empty(struct ubifs_info *c)
* Opt_chk_data_crc: check CRCs when reading data nodes
* Opt_no_chk_data_crc: do not check CRCs when reading data nodes
* Opt_override_compr: override default compressor
+ * Opt_assert: set ubifs_assert() action
* Opt_err: just end of array marker
*/
enum {
@@ -931,6 +934,7 @@ enum {
Opt_chk_data_crc,
Opt_no_chk_data_crc,
Opt_override_compr,
+ Opt_assert,
Opt_ignore,
Opt_err,
};
@@ -945,6 +949,7 @@ static const match_table_t tokens = {
{Opt_override_compr, "compr=%s"},
{Opt_ignore, "ubi=%s"},
{Opt_ignore, "vol=%s"},
+ {Opt_assert, "assert=%s"},
{Opt_err, NULL},
};
@@ -1045,6 +1050,26 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
c->default_compr = c->mount_opts.compr_type;
break;
}
+ case Opt_assert:
+ {
+ char *act = match_strdup(&args[0]);
+
+ if (!act)
+ return -ENOMEM;
+ if (!strcmp(act, "report"))
+ c->assert_action = ASSACT_REPORT;
+ else if (!strcmp(act, "read-only"))
+ c->assert_action = ASSACT_RO;
+ else if (!strcmp(act, "panic"))
+ c->assert_action = ASSACT_PANIC;
+ else {
+ ubifs_err(c, "unknown assert action \"%s\"", act);
+ kfree(act);
+ return -EINVAL;
+ }
+ kfree(act);
+ break;
+ }
case Opt_ignore:
break;
default:
@@ -1103,7 +1128,7 @@ static void destroy_journal(struct ubifs_info *c)
*/
static void bu_init(struct ubifs_info *c)
{
- ubifs_assert(c->bulk_read == 1);
+ ubifs_assert(c, c->bulk_read == 1);
if (c->bu.buf)
return; /* Already initialized */
@@ -1134,7 +1159,7 @@ again:
*/
static int check_free_space(struct ubifs_info *c)
{
- ubifs_assert(c->dark_wm > 0);
+ ubifs_assert(c, c->dark_wm > 0);
if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) {
ubifs_err(c, "insufficient free space to mount in R/W mode");
ubifs_dump_budg(c, &c->bi);
@@ -1234,9 +1259,9 @@ static int mount_ubifs(struct ubifs_info *c)
* Make sure the compressor which is set as default in the superblock
* or overridden by mount options is actually compiled in.
*/
- if (!ubifs_compr_present(c->default_compr)) {
+ if (!ubifs_compr_present(c, c->default_compr)) {
ubifs_err(c, "'compressor \"%s\" is not compiled in",
- ubifs_compr_name(c->default_compr));
+ ubifs_compr_name(c, c->default_compr));
err = -ENOTSUPP;
goto out_free;
}
@@ -1396,10 +1421,10 @@ static int mount_ubifs(struct ubifs_info *c)
* the journal head LEBs may also be accounted as
* "empty taken" if they are empty.
*/
- ubifs_assert(c->lst.taken_empty_lebs > 0);
+ ubifs_assert(c, c->lst.taken_empty_lebs > 0);
}
} else
- ubifs_assert(c->lst.taken_empty_lebs > 0);
+ ubifs_assert(c, c->lst.taken_empty_lebs > 0);
err = dbg_check_filesystem(c);
if (err)
@@ -1429,7 +1454,7 @@ static int mount_ubifs(struct ubifs_info *c)
UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid,
c->big_lpt ? ", big LPT model" : ", small LPT model");
- dbg_gen("default compressor: %s", ubifs_compr_name(c->default_compr));
+ dbg_gen("default compressor: %s", ubifs_compr_name(c, c->default_compr));
dbg_gen("data journal heads: %d",
c->jhead_cnt - NONDATA_JHEADS_CNT);
dbg_gen("log LEBs: %d (%d - %d)",
@@ -1610,7 +1635,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
goto out;
} else {
/* A readonly mount is not allowed to have orphans */
- ubifs_assert(c->tot_orphans == 0);
+ ubifs_assert(c, c->tot_orphans == 0);
err = ubifs_clear_orphans(c);
if (err)
goto out;
@@ -1727,8 +1752,8 @@ static void ubifs_remount_ro(struct ubifs_info *c)
{
int i, err;
- ubifs_assert(!c->need_recovery);
- ubifs_assert(!c->ro_mount);
+ ubifs_assert(c, !c->need_recovery);
+ ubifs_assert(c, !c->ro_mount);
mutex_lock(&c->umount_mutex);
if (c->bgt) {
@@ -1778,9 +1803,9 @@ static void ubifs_put_super(struct super_block *sb)
* to write them back because of I/O errors.
*/
if (!c->ro_error) {
- ubifs_assert(c->bi.idx_growth == 0);
- ubifs_assert(c->bi.dd_growth == 0);
- ubifs_assert(c->bi.data_growth == 0);
+ ubifs_assert(c, c->bi.idx_growth == 0);
+ ubifs_assert(c, c->bi.dd_growth == 0);
+ ubifs_assert(c, c->bi.data_growth == 0);
}
/*
@@ -1887,7 +1912,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
mutex_unlock(&c->bu_mutex);
}
- ubifs_assert(c->lst.taken_empty_lebs > 0);
+ ubifs_assert(c, c->lst.taken_empty_lebs > 0);
return 0;
}
@@ -2002,6 +2027,7 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
INIT_LIST_HEAD(&c->orph_list);
INIT_LIST_HEAD(&c->orph_new);
c->no_chk_data_crc = 1;
+ c->assert_action = ASSACT_RO;
c->highest_inum = UBIFS_FIRST_INO;
c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
@@ -2053,7 +2079,9 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
if (c->max_inode_sz > MAX_LFS_FILESIZE)
sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
sb->s_op = &ubifs_super_operations;
+#ifdef CONFIG_UBIFS_FS_XATTR
sb->s_xattr = ubifs_xattr_handlers;
+#endif
#ifdef CONFIG_UBIFS_FS_ENCRYPTION
sb->s_cop = &ubifs_crypt_operations;
#endif
@@ -2061,7 +2089,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
mutex_lock(&c->umount_mutex);
err = mount_ubifs(c);
if (err) {
- ubifs_assert(err < 0);
+ ubifs_assert(c, err < 0);
goto out_unlock;
}
@@ -2304,8 +2332,8 @@ late_initcall(ubifs_init);
static void __exit ubifs_exit(void)
{
- ubifs_assert(list_empty(&ubifs_infos));
- ubifs_assert(atomic_long_read(&ubifs_clean_zn_cnt) == 0);
+ WARN_ON(list_empty(&ubifs_infos));
+ WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) == 0);
dbg_debugfs_exit();
ubifs_compressors_exit();
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 4a21e7f75e7a..bf416e512743 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -211,7 +211,7 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c,
__set_bit(DIRTY_ZNODE, &zn->flags);
__clear_bit(COW_ZNODE, &zn->flags);
- ubifs_assert(!ubifs_zn_obsolete(znode));
+ ubifs_assert(c, !ubifs_zn_obsolete(znode));
__set_bit(OBSOLETE_ZNODE, &znode->flags);
if (znode->level != 0) {
@@ -321,9 +321,9 @@ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
void *lnc_node;
const struct ubifs_dent_node *dent = node;
- ubifs_assert(!zbr->leaf);
- ubifs_assert(zbr->len != 0);
- ubifs_assert(is_hash_key(c, &zbr->key));
+ ubifs_assert(c, !zbr->leaf);
+ ubifs_assert(c, zbr->len != 0);
+ ubifs_assert(c, is_hash_key(c, &zbr->key));
err = ubifs_validate_entry(c, dent);
if (err) {
@@ -355,8 +355,8 @@ static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
{
int err;
- ubifs_assert(!zbr->leaf);
- ubifs_assert(zbr->len != 0);
+ ubifs_assert(c, !zbr->leaf);
+ ubifs_assert(c, zbr->len != 0);
err = ubifs_validate_entry(c, node);
if (err) {
@@ -398,11 +398,11 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
{
int err;
- ubifs_assert(is_hash_key(c, &zbr->key));
+ ubifs_assert(c, is_hash_key(c, &zbr->key));
if (zbr->leaf) {
/* Read from the leaf node cache */
- ubifs_assert(zbr->len != 0);
+ ubifs_assert(c, zbr->len != 0);
memcpy(node, zbr->leaf, zbr->len);
return 0;
}
@@ -721,7 +721,7 @@ static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
while (1) {
err = tnc_prev(c, zn, n);
if (err == -ENOENT) {
- ubifs_assert(*n == 0);
+ ubifs_assert(c, *n == 0);
*n = -1;
return 0;
}
@@ -761,12 +761,12 @@ static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
err = tnc_next(c, zn, n);
if (err) {
/* Should be impossible */
- ubifs_assert(0);
+ ubifs_assert(c, 0);
if (err == -ENOENT)
err = -EINVAL;
return err;
}
- ubifs_assert(*n == 0);
+ ubifs_assert(c, *n == 0);
*n = -1;
}
return 0;
@@ -778,7 +778,7 @@ static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
return 0;
if (err == NAME_MATCHES)
return 1;
- ubifs_assert(err == NAME_GREATER);
+ ubifs_assert(c, err == NAME_GREATER);
}
} else {
int nn = *n;
@@ -802,7 +802,7 @@ static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
*n = nn;
if (err == NAME_MATCHES)
return 1;
- ubifs_assert(err == NAME_LESS);
+ ubifs_assert(c, err == NAME_LESS);
}
}
}
@@ -843,7 +843,7 @@ static int fallible_matches_name(struct ubifs_info *c,
err = NOT_ON_MEDIA;
goto out_free;
}
- ubifs_assert(err == 1);
+ ubifs_assert(c, err == 1);
err = lnc_add_directly(c, zbr, dent);
if (err)
@@ -923,7 +923,7 @@ static int fallible_resolve_collision(struct ubifs_info *c,
while (1) {
err = tnc_prev(c, zn, n);
if (err == -ENOENT) {
- ubifs_assert(*n == 0);
+ ubifs_assert(c, *n == 0);
*n = -1;
break;
}
@@ -935,12 +935,12 @@ static int fallible_resolve_collision(struct ubifs_info *c,
err = tnc_next(c, zn, n);
if (err) {
/* Should be impossible */
- ubifs_assert(0);
+ ubifs_assert(c, 0);
if (err == -ENOENT)
err = -EINVAL;
return err;
}
- ubifs_assert(*n == 0);
+ ubifs_assert(c, *n == 0);
*n = -1;
}
break;
@@ -1100,8 +1100,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
struct ubifs_znode *zp;
int *path = c->bottom_up_buf, p = 0;
- ubifs_assert(c->zroot.znode);
- ubifs_assert(znode);
+ ubifs_assert(c, c->zroot.znode);
+ ubifs_assert(c, znode);
if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
kfree(c->bottom_up_buf);
c->bottom_up_buf = kmalloc_array(c->zroot.znode->level,
@@ -1120,7 +1120,7 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
if (!zp)
break;
n = znode->iip;
- ubifs_assert(p < c->zroot.znode->level);
+ ubifs_assert(c, p < c->zroot.znode->level);
path[p++] = n;
if (!zp->cnext && ubifs_zn_dirty(znode))
break;
@@ -1134,18 +1134,18 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
zp = znode->parent;
if (zp) {
- ubifs_assert(path[p - 1] >= 0);
- ubifs_assert(path[p - 1] < zp->child_cnt);
+ ubifs_assert(c, path[p - 1] >= 0);
+ ubifs_assert(c, path[p - 1] < zp->child_cnt);
zbr = &zp->zbranch[path[--p]];
znode = dirty_cow_znode(c, zbr);
} else {
- ubifs_assert(znode == c->zroot.znode);
+ ubifs_assert(c, znode == c->zroot.znode);
znode = dirty_cow_znode(c, &c->zroot);
}
if (IS_ERR(znode) || !p)
break;
- ubifs_assert(path[p - 1] >= 0);
- ubifs_assert(path[p - 1] < znode->child_cnt);
+ ubifs_assert(c, path[p - 1] >= 0);
+ ubifs_assert(c, path[p - 1] < znode->child_cnt);
znode = znode->zbranch[path[p - 1]].znode;
}
@@ -1179,10 +1179,10 @@ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
{
int err, exact;
struct ubifs_znode *znode;
- unsigned long time = get_seconds();
+ time64_t time = ktime_get_seconds();
dbg_tnck(key, "search key ");
- ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
+ ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY);
znode = c->zroot.znode;
if (unlikely(!znode)) {
@@ -1315,7 +1315,7 @@ static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
{
int err, exact;
struct ubifs_znode *znode;
- unsigned long time = get_seconds();
+ time64_t time = ktime_get_seconds();
dbg_tnck(key, "search and dirty key ");
@@ -1658,9 +1658,9 @@ static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
int rlen, overlap;
dbg_io("LEB %d:%d, length %d", lnum, offs, len);
- ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
- ubifs_assert(!(offs & 7) && offs < c->leb_size);
- ubifs_assert(offs + len <= c->leb_size);
+ ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+ ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
+ ubifs_assert(c, offs + len <= c->leb_size);
spin_lock(&wbuf->lock);
overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
@@ -1824,7 +1824,7 @@ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
goto out_unlock;
}
- ubifs_assert(n >= 0);
+ ubifs_assert(c, n >= 0);
err = resolve_collision(c, key, &znode, &n, nm);
dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
@@ -1922,7 +1922,7 @@ static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
struct ubifs_znode *znode;
union ubifs_key start_key;
- ubifs_assert(is_hash_key(c, key));
+ ubifs_assert(c, is_hash_key(c, key));
lowest_dent_key(c, &start_key, key_inum(c, key));
@@ -1993,8 +1993,8 @@ static void correct_parent_keys(const struct ubifs_info *c,
{
union ubifs_key *key, *key1;
- ubifs_assert(znode->parent);
- ubifs_assert(znode->iip == 0);
+ ubifs_assert(c, znode->parent);
+ ubifs_assert(c, znode->iip == 0);
key = &znode->zbranch[0].key;
key1 = &znode->parent->zbranch[0].key;
@@ -2011,6 +2011,7 @@ static void correct_parent_keys(const struct ubifs_info *c,
/**
* insert_zbranch - insert a zbranch into a znode.
+ * @c: UBIFS file-system description object
* @znode: znode into which to insert
* @zbr: zbranch to insert
* @n: slot number to insert to
@@ -2020,12 +2021,12 @@ static void correct_parent_keys(const struct ubifs_info *c,
* zbranch has to be inserted to the @znode->zbranches[]' array at the @n-th
* slot, zbranches starting from @n have to be moved right.
*/
-static void insert_zbranch(struct ubifs_znode *znode,
+static void insert_zbranch(struct ubifs_info *c, struct ubifs_znode *znode,
const struct ubifs_zbranch *zbr, int n)
{
int i;
- ubifs_assert(ubifs_zn_dirty(znode));
+ ubifs_assert(c, ubifs_zn_dirty(znode));
if (znode->level) {
for (i = znode->child_cnt; i > n; i--) {
@@ -2079,16 +2080,16 @@ static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
int i, keep, move, appending = 0;
union ubifs_key *key = &zbr->key, *key1;
- ubifs_assert(n >= 0 && n <= c->fanout);
+ ubifs_assert(c, n >= 0 && n <= c->fanout);
/* Implement naive insert for now */
again:
zp = znode->parent;
if (znode->child_cnt < c->fanout) {
- ubifs_assert(n != c->fanout);
+ ubifs_assert(c, n != c->fanout);
dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level);
- insert_zbranch(znode, zbr, n);
+ insert_zbranch(c, znode, zbr, n);
/* Ensure parent's key is correct */
if (n == 0 && zp && znode->iip == 0)
@@ -2197,7 +2198,7 @@ do_split:
/* Insert new key and branch */
dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level);
- insert_zbranch(zi, zbr, n);
+ insert_zbranch(c, zi, zbr, n);
/* Insert new znode (produced by spitting) into the parent */
if (zp) {
@@ -2495,8 +2496,8 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
int i, err;
/* Delete without merge for now */
- ubifs_assert(znode->level == 0);
- ubifs_assert(n >= 0 && n < c->fanout);
+ ubifs_assert(c, znode->level == 0);
+ ubifs_assert(c, n >= 0 && n < c->fanout);
dbg_tnck(&znode->zbranch[n].key, "deleting key ");
zbr = &znode->zbranch[n];
@@ -2522,8 +2523,8 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
*/
do {
- ubifs_assert(!ubifs_zn_obsolete(znode));
- ubifs_assert(ubifs_zn_dirty(znode));
+ ubifs_assert(c, !ubifs_zn_obsolete(znode));
+ ubifs_assert(c, ubifs_zn_dirty(znode));
zp = znode->parent;
n = znode->iip;
@@ -2545,7 +2546,7 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
/* Remove from znode, entry n - 1 */
znode->child_cnt -= 1;
- ubifs_assert(znode->level != 0);
+ ubifs_assert(c, znode->level != 0);
for (i = n; i < znode->child_cnt; i++) {
znode->zbranch[i] = znode->zbranch[i + 1];
if (znode->zbranch[i].znode)
@@ -2578,8 +2579,8 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
c->zroot.offs = zbr->offs;
c->zroot.len = zbr->len;
c->zroot.znode = znode;
- ubifs_assert(!ubifs_zn_obsolete(zp));
- ubifs_assert(ubifs_zn_dirty(zp));
+ ubifs_assert(c, !ubifs_zn_obsolete(zp));
+ ubifs_assert(c, ubifs_zn_dirty(zp));
atomic_long_dec(&c->dirty_zn_cnt);
if (zp->cnext) {
@@ -2944,7 +2945,7 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
union ubifs_key *dkey;
dbg_tnck(key, "key ");
- ubifs_assert(is_hash_key(c, key));
+ ubifs_assert(c, is_hash_key(c, key));
mutex_lock(&c->tnc_mutex);
err = ubifs_lookup_level0(c, key, &znode, &n);
@@ -3031,7 +3032,7 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
if (!c->cnext)
return;
- ubifs_assert(c->cmt_state == COMMIT_BROKEN);
+ ubifs_assert(c, c->cmt_state == COMMIT_BROKEN);
cnext = c->cnext;
do {
struct ubifs_znode *znode = cnext;
@@ -3053,8 +3054,8 @@ void ubifs_tnc_close(struct ubifs_info *c)
long n, freed;
n = atomic_long_read(&c->clean_zn_cnt);
- freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
- ubifs_assert(freed == n);
+ freed = ubifs_destroy_tnc_subtree(c, c->zroot.znode);
+ ubifs_assert(c, freed == n);
atomic_long_sub(n, &ubifs_clean_zn_cnt);
}
kfree(c->gap_lebs);
@@ -3167,7 +3168,7 @@ static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
struct ubifs_znode *znode, *zn;
int n, nn;
- ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
+ ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY);
/*
* The arguments have probably been read off flash, so don't assume
@@ -3206,7 +3207,7 @@ static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
if (IS_ERR(znode))
return znode;
ubifs_search_zbranch(c, znode, key, &n);
- ubifs_assert(n >= 0);
+ ubifs_assert(c, n >= 0);
}
if (znode->level == level + 1)
break;
@@ -3497,7 +3498,7 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
if (err < 0)
goto out_unlock;
- ubifs_assert(err == 0);
+ ubifs_assert(c, err == 0);
key = &znode->zbranch[n].key;
if (!key_in_range(c, key, &from_key, &to_key))
goto out_unlock;
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index a9df94ad46a3..dba87d09b989 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -87,8 +87,8 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
atomic_long_dec(&c->dirty_zn_cnt);
- ubifs_assert(ubifs_zn_dirty(znode));
- ubifs_assert(ubifs_zn_cow(znode));
+ ubifs_assert(c, ubifs_zn_dirty(znode));
+ ubifs_assert(c, ubifs_zn_cow(znode));
/*
* Note, unlike 'write_index()' we do not add memory barriers here
@@ -115,9 +115,9 @@ static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
{
int len, gap_remains, gap_pos, written, pad_len;
- ubifs_assert((gap_start & 7) == 0);
- ubifs_assert((gap_end & 7) == 0);
- ubifs_assert(gap_end >= gap_start);
+ ubifs_assert(c, (gap_start & 7) == 0);
+ ubifs_assert(c, (gap_end & 7) == 0);
+ ubifs_assert(c, gap_end >= gap_start);
gap_remains = gap_end - gap_start;
if (!gap_remains)
@@ -131,7 +131,7 @@ static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
const int alen = ALIGN(len, 8);
int err;
- ubifs_assert(alen <= gap_remains);
+ ubifs_assert(c, alen <= gap_remains);
err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
lnum, gap_pos, len);
if (err)
@@ -259,7 +259,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
struct ubifs_idx_node *idx;
int in_use, level;
- ubifs_assert(snod->type == UBIFS_IDX_NODE);
+ ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
idx = snod->node;
key_read(c, ubifs_idx_key(c, idx), &snod->key);
level = le16_to_cpu(idx->level);
@@ -373,7 +373,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
p = c->gap_lebs;
do {
- ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
+ ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
written = layout_leb_in_gaps(c, p);
if (written < 0) {
err = written;
@@ -639,7 +639,7 @@ static int get_znodes_to_commit(struct ubifs_info *c)
}
cnt += 1;
while (1) {
- ubifs_assert(!ubifs_zn_cow(znode));
+ ubifs_assert(c, !ubifs_zn_cow(znode));
__set_bit(COW_ZNODE, &znode->flags);
znode->alt = 0;
cnext = find_next_dirty(znode);
@@ -652,7 +652,7 @@ static int get_znodes_to_commit(struct ubifs_info *c)
cnt += 1;
}
dbg_cmt("committing %d znodes", cnt);
- ubifs_assert(cnt == atomic_long_read(&c->dirty_zn_cnt));
+ ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
return cnt;
}
@@ -760,7 +760,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
err = layout_commit(c, no_space, cnt);
if (err)
goto out_free;
- ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
+ ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
err = free_unused_idx_lebs(c);
if (err)
goto out;
@@ -781,7 +781,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
* budgeting subsystem to assume the index is already committed,
* even though it is not.
*/
- ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
+ ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
c->bi.old_idx_sz = c->calc_idx_sz;
c->bi.uncommitted_idx = 0;
c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
@@ -887,8 +887,8 @@ static int write_index(struct ubifs_info *c)
/* Grab some stuff from znode while we still can */
cnext = znode->cnext;
- ubifs_assert(ubifs_zn_dirty(znode));
- ubifs_assert(ubifs_zn_cow(znode));
+ ubifs_assert(c, ubifs_zn_dirty(znode));
+ ubifs_assert(c, ubifs_zn_cow(znode));
/*
* It is important that other threads should see %DIRTY_ZNODE
diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c
index 93f5b7859e6f..d90ee01076a9 100644
--- a/fs/ubifs/tnc_misc.c
+++ b/fs/ubifs/tnc_misc.c
@@ -31,19 +31,21 @@
/**
* ubifs_tnc_levelorder_next - next TNC tree element in levelorder traversal.
+ * @c: UBIFS file-system description object
* @zr: root of the subtree to traverse
* @znode: previous znode
*
* This function implements levelorder TNC traversal. The LNC is ignored.
* Returns the next element or %NULL if @znode is already the last one.
*/
-struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
+struct ubifs_znode *ubifs_tnc_levelorder_next(const struct ubifs_info *c,
+ struct ubifs_znode *zr,
struct ubifs_znode *znode)
{
int level, iip, level_search = 0;
struct ubifs_znode *zn;
- ubifs_assert(zr);
+ ubifs_assert(c, zr);
if (unlikely(!znode))
return zr;
@@ -58,7 +60,7 @@ struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
iip = znode->iip;
while (1) {
- ubifs_assert(znode->level <= zr->level);
+ ubifs_assert(c, znode->level <= zr->level);
/*
* First walk up until there is a znode with next branch to
@@ -85,7 +87,7 @@ struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
level_search = 1;
iip = -1;
znode = ubifs_tnc_find_child(zr, 0);
- ubifs_assert(znode);
+ ubifs_assert(c, znode);
}
/* Switch to the next index */
@@ -111,7 +113,7 @@ struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
}
if (zn) {
- ubifs_assert(zn->level >= 0);
+ ubifs_assert(c, zn->level >= 0);
return zn;
}
}
@@ -140,7 +142,7 @@ int ubifs_search_zbranch(const struct ubifs_info *c,
int uninitialized_var(cmp);
const struct ubifs_zbranch *zbr = &znode->zbranch[0];
- ubifs_assert(end > beg);
+ ubifs_assert(c, end > beg);
while (end > beg) {
mid = (beg + end) >> 1;
@@ -158,13 +160,13 @@ int ubifs_search_zbranch(const struct ubifs_info *c,
*n = end - 1;
/* The insert point is after *n */
- ubifs_assert(*n >= -1 && *n < znode->child_cnt);
+ ubifs_assert(c, *n >= -1 && *n < znode->child_cnt);
if (*n == -1)
- ubifs_assert(keys_cmp(c, key, &zbr[0].key) < 0);
+ ubifs_assert(c, keys_cmp(c, key, &zbr[0].key) < 0);
else
- ubifs_assert(keys_cmp(c, key, &zbr[*n].key) > 0);
+ ubifs_assert(c, keys_cmp(c, key, &zbr[*n].key) > 0);
if (*n + 1 < znode->child_cnt)
- ubifs_assert(keys_cmp(c, key, &zbr[*n + 1].key) < 0);
+ ubifs_assert(c, keys_cmp(c, key, &zbr[*n + 1].key) < 0);
return 0;
}
@@ -195,16 +197,18 @@ struct ubifs_znode *ubifs_tnc_postorder_first(struct ubifs_znode *znode)
/**
* ubifs_tnc_postorder_next - next TNC tree element in postorder traversal.
+ * @c: UBIFS file-system description object
* @znode: previous znode
*
* This function implements postorder TNC traversal. The LNC is ignored.
* Returns the next element or %NULL if @znode is already the last one.
*/
-struct ubifs_znode *ubifs_tnc_postorder_next(struct ubifs_znode *znode)
+struct ubifs_znode *ubifs_tnc_postorder_next(const struct ubifs_info *c,
+ struct ubifs_znode *znode)
{
struct ubifs_znode *zn;
- ubifs_assert(znode);
+ ubifs_assert(c, znode);
if (unlikely(!znode->parent))
return NULL;
@@ -220,18 +224,20 @@ struct ubifs_znode *ubifs_tnc_postorder_next(struct ubifs_znode *znode)
/**
* ubifs_destroy_tnc_subtree - destroy all znodes connected to a subtree.
+ * @c: UBIFS file-system description object
* @znode: znode defining subtree to destroy
*
* This function destroys subtree of the TNC tree. Returns number of clean
* znodes in the subtree.
*/
-long ubifs_destroy_tnc_subtree(struct ubifs_znode *znode)
+long ubifs_destroy_tnc_subtree(const struct ubifs_info *c,
+ struct ubifs_znode *znode)
{
struct ubifs_znode *zn = ubifs_tnc_postorder_first(znode);
long clean_freed = 0;
int n;
- ubifs_assert(zn);
+ ubifs_assert(c, zn);
while (1) {
for (n = 0; n < zn->child_cnt; n++) {
if (!zn->zbranch[n].znode)
@@ -252,7 +258,7 @@ long ubifs_destroy_tnc_subtree(struct ubifs_znode *znode)
return clean_freed;
}
- zn = ubifs_tnc_postorder_next(zn);
+ zn = ubifs_tnc_postorder_next(c, zn);
}
}
@@ -410,7 +416,7 @@ struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c,
int err;
struct ubifs_znode *znode;
- ubifs_assert(!zbr->znode);
+ ubifs_assert(c, !zbr->znode);
/*
* A slab cache is not presently used for znodes because the znode size
* depends on the fanout which is stored in the superblock.
@@ -435,7 +441,7 @@ struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c,
zbr->znode = znode;
znode->parent = parent;
- znode->time = get_seconds();
+ znode->time = ktime_get_seconds();
znode->iip = iip;
return znode;
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 04bf84d71e7b..4368cde476b0 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -258,6 +258,18 @@ enum {
LEB_RETAINED,
};
+/*
+ * Action taken upon a failed ubifs_assert().
+ * @ASSACT_REPORT: just report the failed assertion
+ * @ASSACT_RO: switch to read-only mode
+ * @ASSACT_PANIC: call BUG() and possible panic the kernel
+ */
+enum {
+ ASSACT_REPORT = 0,
+ ASSACT_RO,
+ ASSACT_PANIC,
+};
+
/**
* struct ubifs_old_idx - index node obsoleted since last commit start.
* @rb: rb-tree node
@@ -758,7 +770,7 @@ struct ubifs_znode {
struct ubifs_znode *parent;
struct ubifs_znode *cnext;
unsigned long flags;
- unsigned long time;
+ time64_t time;
int level;
int child_cnt;
int iip;
@@ -1015,6 +1027,7 @@ struct ubifs_debug_info;
* @bulk_read: enable bulk-reads
* @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc)
* @rw_incompat: the media is not R/W compatible
+ * @assert_action: action to take when a ubifs_assert() fails
*
* @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and
* @calc_idx_sz
@@ -1256,6 +1269,7 @@ struct ubifs_info {
unsigned int bulk_read:1;
unsigned int default_compr:2;
unsigned int rw_incompat:1;
+ unsigned int assert_action:2;
struct mutex tnc_mutex;
struct ubifs_zbranch zroot;
@@ -1608,14 +1622,17 @@ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu);
int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu);
/* tnc_misc.c */
-struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
+struct ubifs_znode *ubifs_tnc_levelorder_next(const struct ubifs_info *c,
+ struct ubifs_znode *zr,
struct ubifs_znode *znode);
int ubifs_search_zbranch(const struct ubifs_info *c,
const struct ubifs_znode *znode,
const union ubifs_key *key, int *n);
struct ubifs_znode *ubifs_tnc_postorder_first(struct ubifs_znode *znode);
-struct ubifs_znode *ubifs_tnc_postorder_next(struct ubifs_znode *znode);
-long ubifs_destroy_tnc_subtree(struct ubifs_znode *zr);
+struct ubifs_znode *ubifs_tnc_postorder_next(const struct ubifs_info *c,
+ struct ubifs_znode *znode);
+long ubifs_destroy_tnc_subtree(const struct ubifs_info *c,
+ struct ubifs_znode *zr);
struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c,
struct ubifs_zbranch *zbr,
struct ubifs_znode *parent, int iip);
@@ -1698,7 +1715,7 @@ struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c,
int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip);
void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty);
void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode);
-uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits);
+uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, int nrbits);
struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght);
/* Needed only in debugging code in lpt_commit.c */
int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
@@ -1755,7 +1772,13 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
size_t size, int flags, bool check_lock);
ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
size_t size);
+
+#ifdef CONFIG_UBIFS_FS_XATTR
void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum);
+#else
+static inline void ubifs_evict_xattr_inode(struct ubifs_info *c,
+ ino_t xattr_inum) { }
+#endif
#ifdef CONFIG_UBIFS_FS_SECURITY
extern int ubifs_init_security(struct inode *dentry, struct inode *inode,
@@ -1812,14 +1835,16 @@ static inline int ubifs_encrypt(const struct inode *inode,
unsigned int in_len, unsigned int *out_len,
int block)
{
- ubifs_assert(0);
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
+ ubifs_assert(c, 0);
return -EOPNOTSUPP;
}
static inline int ubifs_decrypt(const struct inode *inode,
struct ubifs_data_node *dn,
unsigned int *out_len, int block)
{
- ubifs_assert(0);
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
+ ubifs_assert(c, 0);
return -EOPNOTSUPP;
}
#else
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6f720fdf5020..61afdfee4b28 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -152,6 +152,12 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
+
+ if (!host->i_nlink) {
+ err = -ENOENT;
+ goto out_noent;
+ }
+
host->i_ctime = current_time(host);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
@@ -184,6 +190,7 @@ out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_names -= fname_len(nm);
host_ui->flags &= ~UBIFS_CRYPT_FL;
+out_noent:
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@@ -216,7 +223,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
struct ubifs_budget_req req = { .dirtied_ino = 2,
.dirtied_ino_d = ALIGN(size, 8) + ALIGN(host_ui->data_len, 8) };
- ubifs_assert(ui->data_len == inode->i_size);
+ ubifs_assert(c, ui->data_len == inode->i_size);
err = ubifs_budget_space(c, &req);
if (err)
return err;
@@ -235,6 +242,12 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
mutex_unlock(&ui->ui_mutex);
mutex_lock(&host_ui->ui_mutex);
+
+ if (!host->i_nlink) {
+ err = -ENOENT;
+ goto out_noent;
+ }
+
host->i_ctime = current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -256,6 +269,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
+out_noent:
mutex_unlock(&host_ui->ui_mutex);
make_bad_inode(inode);
out_free:
@@ -291,7 +305,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
int err;
if (check_lock)
- ubifs_assert(inode_is_locked(host));
+ ubifs_assert(c, inode_is_locked(host));
if (size > UBIFS_MAX_INO_DATA)
return -ERANGE;
@@ -374,8 +388,8 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
}
ui = ubifs_inode(inode);
- ubifs_assert(inode->i_size == ui->data_len);
- ubifs_assert(ubifs_inode(host)->xattr_size > ui->data_len);
+ ubifs_assert(c, inode->i_size == ui->data_len);
+ ubifs_assert(c, ubifs_inode(host)->xattr_size > ui->data_len);
mutex_lock(&ui->ui_mutex);
if (buf) {
@@ -462,7 +476,7 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
return err;
}
- ubifs_assert(written <= size);
+ ubifs_assert(c, written <= size);
return written;
}
@@ -475,13 +489,19 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
struct ubifs_budget_req req = { .dirtied_ino = 2, .mod_dent = 1,
.dirtied_ino_d = ALIGN(host_ui->data_len, 8) };
- ubifs_assert(ui->data_len == inode->i_size);
+ ubifs_assert(c, ui->data_len == inode->i_size);
err = ubifs_budget_space(c, &req);
if (err)
return err;
mutex_lock(&host_ui->ui_mutex);
+
+ if (!host->i_nlink) {
+ err = -ENOENT;
+ goto out_noent;
+ }
+
host->i_ctime = current_time(host);
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
@@ -501,6 +521,7 @@ out_cancel:
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_names += fname_len(nm);
+out_noent:
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
@@ -538,7 +559,10 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
union ubifs_key key;
int err;
- ubifs_assert(inode_is_locked(host));
+ ubifs_assert(c, inode_is_locked(host));
+
+ if (!host->i_nlink)
+ return -ENOENT;
if (fname_len(&nm) > UBIFS_MAX_NLEN)
return -ENAMETOOLONG;
@@ -561,7 +585,7 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
goto out_free;
}
- ubifs_assert(inode->i_nlink == 1);
+ ubifs_assert(c, inode->i_nlink == 1);
clear_nlink(inode);
err = remove_xattr(c, host, inode, &nm);
if (err)
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 56569023783b..f8e5872f7cc2 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -125,7 +125,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- iinfo->i_crtime = timespec64_to_timespec(inode->i_mtime);
+ iinfo->i_crtime = inode->i_mtime;
if (unlikely(insert_inode_locked(inode) < 0)) {
make_bad_inode(inode);
iput(inode);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 9915a58fbabd..5df554a9f9c9 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1270,7 +1270,6 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
struct kernel_lb_addr *iloc = &iinfo->i_location;
- struct timespec ts;
unsigned int link_count;
unsigned int indirections = 0;
int bs = inode->i_sb->s_blocksize;
@@ -1443,12 +1442,9 @@ reread:
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- udf_disk_stamp_to_time(&ts, fe->accessTime);
- inode->i_atime = timespec_to_timespec64(ts);
- udf_disk_stamp_to_time(&ts, fe->modificationTime);
- inode->i_mtime = timespec_to_timespec64(ts);
- udf_disk_stamp_to_time(&ts, fe->attrTime);
- inode->i_ctime = timespec_to_timespec64(ts);
+ udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
+ udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
+ udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime);
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
@@ -1458,13 +1454,10 @@ reread:
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- udf_disk_stamp_to_time(&ts, efe->accessTime);
- inode->i_atime = timespec_to_timespec64(ts);
- udf_disk_stamp_to_time(&ts, efe->modificationTime);
- inode->i_mtime = timespec_to_timespec64(ts);
+ udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
+ udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
- udf_disk_stamp_to_time(&ts, efe->attrTime);
- inode->i_ctime = timespec_to_timespec64(ts);
+ udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime);
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
@@ -1601,7 +1594,7 @@ static int udf_sync_inode(struct inode *inode)
return udf_update_inode(inode, 1);
}
-static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec time)
+static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec64 time)
{
if (iinfo->i_crtime.tv_sec > time.tv_sec ||
(iinfo->i_crtime.tv_sec == time.tv_sec &&
@@ -1714,12 +1707,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
- udf_time_to_disk_stamp(&fe->accessTime,
- timespec64_to_timespec(inode->i_atime));
- udf_time_to_disk_stamp(&fe->modificationTime,
- timespec64_to_timespec(inode->i_mtime));
- udf_time_to_disk_stamp(&fe->attrTime,
- timespec64_to_timespec(inode->i_ctime));
+ udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
+ udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
+ udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
memset(&(fe->impIdent), 0, sizeof(struct regid));
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1738,17 +1728,14 @@ static int udf_update_inode(struct inode *inode, int do_sync)
efe->objectSize = cpu_to_le64(inode->i_size);
efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
- udf_adjust_time(iinfo, timespec64_to_timespec(inode->i_atime));
- udf_adjust_time(iinfo, timespec64_to_timespec(inode->i_mtime));
- udf_adjust_time(iinfo, timespec64_to_timespec(inode->i_ctime));
+ udf_adjust_time(iinfo, inode->i_atime);
+ udf_adjust_time(iinfo, inode->i_mtime);
+ udf_adjust_time(iinfo, inode->i_ctime);
- udf_time_to_disk_stamp(&efe->accessTime,
- timespec64_to_timespec(inode->i_atime));
- udf_time_to_disk_stamp(&efe->modificationTime,
- timespec64_to_timespec(inode->i_mtime));
+ udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
+ udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
- udf_time_to_disk_stamp(&efe->attrTime,
- timespec64_to_timespec(inode->i_ctime));
+ udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
memset(&(efe->impIdent), 0, sizeof(efe->impIdent));
strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 06f37ddd2997..58cc2414992b 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -602,8 +602,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (unlikely(!fi)) {
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -694,8 +693,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err);
if (!fi) {
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
set_nlink(inode, 2);
@@ -713,8 +711,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (!fi) {
clear_nlink(inode);
mark_inode_dirty(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -1041,8 +1038,7 @@ out:
out_no_entry:
up_write(&iinfo->i_data_sem);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
goto out;
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 0c504c8031d3..3040dc2a32f6 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1980,7 +1980,7 @@ static void udf_open_lvid(struct super_block *sb)
struct buffer_head *bh = sbi->s_lvid_bh;
struct logicalVolIntegrityDesc *lvid;
struct logicalVolIntegrityDescImpUse *lvidiu;
- struct timespec ts;
+ struct timespec64 ts;
if (!bh)
return;
@@ -1992,7 +1992,7 @@ static void udf_open_lvid(struct super_block *sb)
mutex_lock(&sbi->s_alloc_mutex);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
- ktime_get_real_ts(&ts);
+ ktime_get_real_ts64(&ts);
udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
@@ -2017,7 +2017,7 @@ static void udf_close_lvid(struct super_block *sb)
struct buffer_head *bh = sbi->s_lvid_bh;
struct logicalVolIntegrityDesc *lvid;
struct logicalVolIntegrityDescImpUse *lvidiu;
- struct timespec ts;
+ struct timespec64 ts;
if (!bh)
return;
@@ -2029,7 +2029,7 @@ static void udf_close_lvid(struct super_block *sb)
mutex_lock(&sbi->s_alloc_mutex);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
- ktime_get_real_ts(&ts);
+ ktime_get_real_ts64(&ts);
udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index 630426ffb775..2ef0e212f08a 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -28,7 +28,7 @@ struct udf_ext_cache {
*/
struct udf_inode_info {
- struct timespec i_crtime;
+ struct timespec64 i_crtime;
/* Physical address of inode */
struct kernel_lb_addr i_location;
__u64 i_unique;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 9dd3e1b9619e..9424d7cab790 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -138,7 +138,7 @@ struct udf_sb_info {
rwlock_t s_cred_lock;
/* Root Info */
- struct timespec s_record_time;
+ struct timespec64 s_record_time;
/* Fileset Info */
__u16 s_serial_number;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 84c47dde4d26..ee246769dee4 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -258,8 +258,8 @@ extern struct long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int);
extern struct short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int);
/* udftime.c */
-extern void udf_disk_stamp_to_time(struct timespec *dest,
+extern void udf_disk_stamp_to_time(struct timespec64 *dest,
struct timestamp src);
-extern void udf_time_to_disk_stamp(struct timestamp *dest, struct timespec src);
+extern void udf_time_to_disk_stamp(struct timestamp *dest, struct timespec64 src);
#endif /* __UDF_DECL_H */
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index 67b33ac5d41b..fce4ad976c8c 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -41,7 +41,7 @@
#include <linux/time.h>
void
-udf_disk_stamp_to_time(struct timespec *dest, struct timestamp src)
+udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src)
{
u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone);
u16 year = le16_to_cpu(src.year);
@@ -70,9 +70,9 @@ udf_disk_stamp_to_time(struct timespec *dest, struct timestamp src)
}
void
-udf_time_to_disk_stamp(struct timestamp *dest, struct timespec ts)
+udf_time_to_disk_stamp(struct timestamp *dest, struct timespec64 ts)
{
- long seconds;
+ time64_t seconds;
int16_t offset;
struct tm tm;
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index e727ee07dbe4..075d3d9114c8 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -547,7 +547,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
/*
* Block can be extended
*/
- ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+ ucg->cg_time = ufs_get_seconds(sb);
for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
break;
@@ -639,7 +639,7 @@ cg_found:
if (!ufs_cg_chkmagic(sb, ucg))
ufs_panic (sb, "ufs_alloc_fragments",
"internal error, bad magic number on cg %u", cgno);
- ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+ ucg->cg_time = ufs_get_seconds(sb);
if (count == uspi->s_fpb) {
result = ufs_alloccg_block (inode, ucpi, goal, err);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index e1ef0f0a1353..969fd60436d3 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -89,7 +89,7 @@ void ufs_free_inode (struct inode * inode)
if (!ufs_cg_chkmagic(sb, ucg))
ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");
- ucg->cg_time = cpu_to_fs32(sb, get_seconds());
+ ucg->cg_time = ufs_get_seconds(sb);
is_directory = S_ISDIR(inode->i_mode);
@@ -343,8 +343,7 @@ cg_found:
fail_remove_inode:
mutex_unlock(&sbi->s_lock);
clear_nlink(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
UFSD("EXIT (FAILED): err %d\n", err);
return ERR_PTR(err);
failed:
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index d5f43ba76c59..9ef40f100415 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -43,8 +43,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
return 0;
}
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -142,8 +141,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
out_fail:
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput(inode);
+ discard_new_inode(inode);
return err;
}
@@ -198,8 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput (inode);
+ discard_new_inode(inode);
out_dir:
inode_dec_link_count(dir);
return err;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 488088141451..a4e07e910f1b 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -698,7 +698,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
usb1 = ubh_get_usb_first(uspi);
usb3 = ubh_get_usb_third(uspi);
- usb1->fs_time = cpu_to_fs32(sb, get_seconds());
+ usb1->fs_time = ufs_get_seconds(sb);
if ((flags & UFS_ST_MASK) == UFS_ST_SUN ||
(flags & UFS_ST_MASK) == UFS_ST_SUNOS ||
(flags & UFS_ST_MASK) == UFS_ST_SUNx86)
@@ -1342,7 +1342,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
*/
if (*mount_flags & SB_RDONLY) {
ufs_put_super_internal(sb);
- usb1->fs_time = cpu_to_fs32(sb, get_seconds());
+ usb1->fs_time = ufs_get_seconds(sb);
if ((flags & UFS_ST_MASK) == UFS_ST_SUN
|| (flags & UFS_ST_MASK) == UFS_ST_SUNOS
|| (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 1907be6d5808..1fd3011ea623 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -590,3 +590,17 @@ static inline int ufs_is_data_ptr_zero(struct ufs_sb_private_info *uspi,
else
return *(__fs32 *)p == 0;
}
+
+static inline __fs32 ufs_get_seconds(struct super_block *sbp)
+{
+ time64_t now = ktime_get_real_seconds();
+
+ /* Signed 32-bit interpretation wraps around in 2038, which
+ * happens in ufs1 inode stamps but not ufs2 using 64-bits
+ * stamps. For superblock and blockgroup, let's assume
+ * unsigned 32-bit stamps, which are good until y2106.
+ * Wrap around rather than clamp here to make the dirty
+ * file system detection work in the superblock stamp.
+ */
+ return cpu_to_fs32(sbp, lower_32_bits(now));
+}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index bad9cea37f12..bfa0ec69f924 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -340,17 +340,15 @@ out:
* fatal_signal_pending()s, and the mmap_sem must be released before
* returning it.
*/
-int handle_userfault(struct vm_fault *vmf, unsigned long reason)
+vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
{
struct mm_struct *mm = vmf->vma->vm_mm;
struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue uwq;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
bool must_wait, return_to_userland;
long blocking_state;
- ret = VM_FAULT_SIGBUS;
-
/*
* We don't do userfault handling for the final child pid update.
*
@@ -910,7 +908,7 @@ wakeup:
*/
spin_lock(&ctx->fault_pending_wqh.lock);
__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
- __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
+ __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
spin_unlock(&ctx->fault_pending_wqh.lock);
/* Flush pending events that may still wait on event_wqh */
@@ -1066,7 +1064,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
* anyway.
*/
list_del(&uwq->wq.entry);
- __add_wait_queue(&ctx->fault_wqh, &uwq->wq);
+ add_wait_queue(&ctx->fault_wqh, &uwq->wq);
write_seqcount_end(&ctx->refile_seq);
@@ -1215,7 +1213,7 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
range);
if (waitqueue_active(&ctx->fault_wqh))
- __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
+ __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
spin_unlock(&ctx->fault_pending_wqh.lock);
}
@@ -1849,17 +1847,14 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
{
struct userfaultfd_ctx *ctx = f->private_data;
wait_queue_entry_t *wq;
- struct userfaultfd_wait_queue *uwq;
unsigned long pending = 0, total = 0;
spin_lock(&ctx->fault_pending_wqh.lock);
list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
- uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
pending++;
total++;
}
list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
- uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
total++;
}
spin_unlock(&ctx->fault_pending_wqh.lock);
diff --git a/fs/xattr.c b/fs/xattr.c
index f9cb1db187b7..daa732550088 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -23,7 +23,6 @@
#include <linux/posix_acl_xattr.h>
#include <linux/uaccess.h>
-#include "internal.h"
static const char *
strcmp_prefix(const char *a, const char *a_prefix)
@@ -501,10 +500,10 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
if (!f.file)
return error;
audit_file(f.file);
- error = mnt_want_write_file_path(f.file);
+ error = mnt_want_write_file(f.file);
if (!error) {
error = setxattr(f.file->f_path.dentry, name, value, size, flags);
- mnt_drop_write_file_path(f.file);
+ mnt_drop_write_file(f.file);
}
fdput(f);
return error;
@@ -539,7 +538,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
if (error > 0) {
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
- posix_acl_fix_xattr_to_user(kvalue, size);
+ posix_acl_fix_xattr_to_user(kvalue, error);
if (size && copy_to_user(value, kvalue, error))
error = -EFAULT;
} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
@@ -733,10 +732,10 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
if (!f.file)
return error;
audit_file(f.file);
- error = mnt_want_write_file_path(f.file);
+ error = mnt_want_write_file(f.file);
if (!error) {
error = removexattr(f.file->f_path.dentry, name);
- mnt_drop_write_file_path(f.file);
+ mnt_drop_write_file(f.file);
}
fdput(f);
return error;
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 2f3f75a7f180..7f96bdadc372 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -158,6 +158,7 @@ xfs-$(CONFIG_XFS_QUOTA) += scrub/quota.o
ifeq ($(CONFIG_XFS_ONLINE_REPAIR),y)
xfs-y += $(addprefix scrub/, \
agheader_repair.o \
+ bitmap.o \
repair.o \
)
endif
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index fecd187fcf2c..e701ebc36c06 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -248,7 +248,8 @@ __xfs_ag_resv_init(
/* Create a per-AG block reservation. */
int
xfs_ag_resv_init(
- struct xfs_perag *pag)
+ struct xfs_perag *pag,
+ struct xfs_trans *tp)
{
struct xfs_mount *mp = pag->pag_mount;
xfs_agnumber_t agno = pag->pag_agno;
@@ -260,11 +261,11 @@ xfs_ag_resv_init(
if (pag->pag_meta_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
+ error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
- error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
+ error = xfs_finobt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
@@ -282,7 +283,7 @@ xfs_ag_resv_init(
mp->m_inotbt_nores = true;
- error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+ error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
&used);
if (error)
goto out;
@@ -298,7 +299,7 @@ xfs_ag_resv_init(
if (pag->pag_rmapbt_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
+ error = xfs_rmapbt_calc_reserves(mp, tp, agno, &ask, &used);
if (error)
goto out;
@@ -309,7 +310,7 @@ xfs_ag_resv_init(
#ifdef DEBUG
/* need to read in the AGF for the ASSERT below to work */
- error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+ error = xfs_alloc_pagf_init(pag->pag_mount, tp, pag->pag_agno, 0);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h
index 4619b554ee90..c0352edc8e41 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.h
+++ b/fs/xfs/libxfs/xfs_ag_resv.h
@@ -7,7 +7,7 @@
#define __XFS_AG_RESV_H__
int xfs_ag_resv_free(struct xfs_perag *pag);
-int xfs_ag_resv_init(struct xfs_perag *pag);
+int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp);
bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type);
xfs_extlen_t xfs_ag_resv_needed(struct xfs_perag *pag,
@@ -28,7 +28,7 @@ xfs_ag_resv_rmapbt_alloc(
struct xfs_mount *mp,
xfs_agnumber_t agno)
{
- struct xfs_alloc_arg args = {0};
+ struct xfs_alloc_arg args = { NULL };
struct xfs_perag *pag;
args.len = 1;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 75dbdc14c45f..e1c0c0d2f1b0 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2198,12 +2198,12 @@ xfs_agfl_reset(
*/
STATIC void
xfs_defer_agfl_block(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_fsblock_t agbno,
struct xfs_owner_info *oinfo)
{
+ struct xfs_mount *mp = tp->t_mountp;
struct xfs_extent_free_item *new; /* new element */
ASSERT(xfs_bmap_free_item_zone != NULL);
@@ -2216,7 +2216,7 @@ xfs_defer_agfl_block(
trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
}
/*
@@ -2323,16 +2323,8 @@ xfs_alloc_fix_freelist(
if (error)
goto out_agbp_relse;
- /* defer agfl frees if dfops is provided */
- if (tp->t_agfl_dfops) {
- xfs_defer_agfl_block(mp, tp->t_agfl_dfops, args->agno,
- bno, &targs.oinfo);
- } else {
- error = xfs_free_agfl_block(tp, args->agno, bno, agbp,
- &targs.oinfo);
- if (error)
- goto out_agbp_relse;
- }
+ /* defer agfl frees */
+ xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
}
targs.tp = tp;
@@ -2755,9 +2747,6 @@ xfs_alloc_read_agf(
pag->pagf_levels[XFS_BTNUM_RMAPi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
- spin_lock_init(&pag->pagb_lock);
- pag->pagb_count = 0;
- pag->pagb_tree = RB_ROOT;
pag->pagf_init = 1;
pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
}
@@ -2784,16 +2773,16 @@ xfs_alloc_read_agf(
*/
int /* error */
xfs_alloc_vextent(
- xfs_alloc_arg_t *args) /* allocation argument structure */
+ struct xfs_alloc_arg *args) /* allocation argument structure */
{
- xfs_agblock_t agsize; /* allocation group size */
- int error;
- int flags; /* XFS_ALLOC_FLAG_... locking flags */
- xfs_mount_t *mp; /* mount structure pointer */
- xfs_agnumber_t sagno; /* starting allocation group number */
- xfs_alloctype_t type; /* input allocation type */
- int bump_rotor = 0;
- xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
+ xfs_agblock_t agsize; /* allocation group size */
+ int error;
+ int flags; /* XFS_ALLOC_FLAG_... locking flags */
+ struct xfs_mount *mp; /* mount structure pointer */
+ xfs_agnumber_t sagno; /* starting allocation group number */
+ xfs_alloctype_t type; /* input allocation type */
+ int bump_rotor = 0;
+ xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
mp = args->mp;
type = args->otype = args->type;
@@ -2914,7 +2903,7 @@ xfs_alloc_vextent(
* locking of AGF, which might cause deadlock.
*/
if (++(args->agno) == mp->m_sb.sb_agcount) {
- if (args->firstblock != NULLFSBLOCK)
+ if (args->tp->t_firstblock != NULLFSBLOCK)
args->agno = sagno;
else
args->agno = 0;
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index e716c993ac4c..00cd5ec4cb6b 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -74,7 +74,6 @@ typedef struct xfs_alloc_arg {
int datatype; /* mask defining data type treatment */
char wasdel; /* set if allocation was prev delayed */
char wasfromfl; /* set if allocation is from freelist */
- xfs_fsblock_t firstblock; /* io first block allocated */
struct xfs_owner_info oinfo; /* owner of blocks being allocated */
enum xfs_ag_resv_type resv; /* block reservation to use */
} xfs_alloc_arg_t;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 99590f61d624..1e671d4eb6fa 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -202,9 +202,7 @@ xfs_attr_set(
struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *leaf_bp = NULL;
struct xfs_da_args args;
- struct xfs_defer_ops dfops;
struct xfs_trans_res tres;
- xfs_fsblock_t firstblock;
int rsvd = (flags & ATTR_ROOT) != 0;
int error, err2, local;
@@ -219,8 +217,6 @@ xfs_attr_set(
args.value = value;
args.valuelen = valuelen;
- args.firstblock = &firstblock;
- args.dfops = &dfops;
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
args.total = xfs_attr_calc_size(&args, &local);
@@ -315,21 +311,18 @@ xfs_attr_set(
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
- xfs_defer_init(args.dfops, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
if (error)
- goto out_defer_cancel;
+ goto out;
/*
* Prevent the leaf buffer from being unlocked so that a
* concurrent AIL push cannot grab the half-baked leaf
* buffer and run into problems with the write verifier.
*/
xfs_trans_bhold(args.trans, leaf_bp);
- xfs_defer_bjoin(args.dfops, leaf_bp);
- xfs_defer_ijoin(args.dfops, dp);
- error = xfs_defer_finish(&args.trans, args.dfops);
+ error = xfs_defer_finish(&args.trans);
if (error)
- goto out_defer_cancel;
+ goto out;
/*
* Commit the leaf transformation. We'll need another (linked)
@@ -369,8 +362,6 @@ xfs_attr_set(
return error;
-out_defer_cancel:
- xfs_defer_cancel(&dfops);
out:
if (leaf_bp)
xfs_trans_brelse(args.trans, leaf_bp);
@@ -392,8 +383,6 @@ xfs_attr_remove(
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_da_args args;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t firstblock;
int error;
XFS_STATS_INC(mp, xs_attr_remove);
@@ -405,9 +394,6 @@ xfs_attr_remove(
if (error)
return error;
- args.firstblock = &firstblock;
- args.dfops = &dfops;
-
/*
* we have no control over the attribute names that userspace passes us
* to remove, so we have to allow the name lookup prior to attribute
@@ -536,11 +522,12 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
-xfs_attr_leaf_addname(xfs_da_args_t *args)
+xfs_attr_leaf_addname(
+ struct xfs_da_args *args)
{
- xfs_inode_t *dp;
- struct xfs_buf *bp;
- int retval, error, forkoff;
+ struct xfs_inode *dp;
+ struct xfs_buf *bp;
+ int retval, error, forkoff;
trace_xfs_attr_leaf_addname(args);
@@ -598,14 +585,12 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
/*
* Commit the current trans (including the inode) and start
@@ -687,15 +672,13 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
}
/*
@@ -711,7 +694,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
}
return error;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
+ xfs_defer_cancel(args->trans);
return error;
}
@@ -722,11 +705,12 @@ out_defer_cancel:
* if bmap_one_block() says there is only one block (ie: no remote blks).
*/
STATIC int
-xfs_attr_leaf_removename(xfs_da_args_t *args)
+xfs_attr_leaf_removename(
+ struct xfs_da_args *args)
{
- xfs_inode_t *dp;
- struct xfs_buf *bp;
- int error, forkoff;
+ struct xfs_inode *dp;
+ struct xfs_buf *bp;
+ int error, forkoff;
trace_xfs_attr_leaf_removename(args);
@@ -751,19 +735,17 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
}
return 0;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
+ xfs_defer_cancel(args->trans);
return error;
}
@@ -814,13 +796,14 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
* add a whole extra layer of confusion on top of that.
*/
STATIC int
-xfs_attr_node_addname(xfs_da_args_t *args)
+xfs_attr_node_addname(
+ struct xfs_da_args *args)
{
- xfs_da_state_t *state;
- xfs_da_state_blk_t *blk;
- xfs_inode_t *dp;
- xfs_mount_t *mp;
- int retval, error;
+ struct xfs_da_state *state;
+ struct xfs_da_state_blk *blk;
+ struct xfs_inode *dp;
+ struct xfs_mount *mp;
+ int retval, error;
trace_xfs_attr_node_addname(args);
@@ -879,14 +862,12 @@ restart:
*/
xfs_da_state_free(state);
state = NULL;
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
/*
* Commit the node conversion and start the next
@@ -905,14 +886,12 @@ restart:
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_split(state);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
} else {
/*
* Addition succeeded, update Btree hashvals.
@@ -1003,14 +982,12 @@ restart:
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
}
/*
@@ -1037,7 +1014,7 @@ out:
return error;
return retval;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
+ xfs_defer_cancel(args->trans);
goto out;
}
@@ -1049,13 +1026,14 @@ out_defer_cancel:
* the root node (a special case of an intermediate node).
*/
STATIC int
-xfs_attr_node_removename(xfs_da_args_t *args)
+xfs_attr_node_removename(
+ struct xfs_da_args *args)
{
- xfs_da_state_t *state;
- xfs_da_state_blk_t *blk;
- xfs_inode_t *dp;
- struct xfs_buf *bp;
- int retval, error, forkoff;
+ struct xfs_da_state *state;
+ struct xfs_da_state_blk *blk;
+ struct xfs_inode *dp;
+ struct xfs_buf *bp;
+ int retval, error, forkoff;
trace_xfs_attr_node_removename(args);
@@ -1127,14 +1105,12 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
/*
* Commit the Btree join operation and start a new trans.
*/
@@ -1159,15 +1135,13 @@ xfs_attr_node_removename(xfs_da_args_t *args)
goto out;
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ goto out;
} else
xfs_trans_brelse(args->trans, bp);
}
@@ -1177,7 +1151,7 @@ out:
xfs_da_state_free(state);
return error;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
+ xfs_defer_cancel(args->trans);
goto out;
}
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 76e90046731c..6fc5425b1474 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -242,8 +242,9 @@ xfs_attr3_leaf_verify(
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
- struct xfs_perag *pag = bp->b_pag;
struct xfs_attr_leaf_entry *entries;
+ uint16_t end;
+ int i;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -268,7 +269,7 @@ xfs_attr3_leaf_verify(
* because we may have transitioned an empty shortform attr to a leaf
* if the attr didn't fit in shortform.
*/
- if (pag && pag->pagf_init && ichdr.count == 0)
+ if (!xfs_log_in_recovery(mp) && ichdr.count == 0)
return __this_address;
/*
@@ -289,6 +290,26 @@ xfs_attr3_leaf_verify(
/* XXX: need to range check rest of attr header values */
/* XXX: hash order check? */
+ /*
+ * Quickly check the freemap information. Attribute data has to be
+ * aligned to 4-byte boundaries, and likewise for the free space.
+ */
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
+ return __this_address;
+ if (ichdr.freemap[i].base & 0x3)
+ return __this_address;
+ if (ichdr.freemap[i].size > mp->m_attr_geo->blksize)
+ return __this_address;
+ if (ichdr.freemap[i].size & 0x3)
+ return __this_address;
+ end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+ if (end < ichdr.freemap[i].base)
+ return __this_address;
+ if (end > mp->m_attr_geo->blksize)
+ return __this_address;
+ }
+
return NULL;
}
@@ -506,7 +527,7 @@ xfs_attr_shortform_create(xfs_da_args_t *args)
{
xfs_attr_sf_hdr_t *hdr;
xfs_inode_t *dp;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
trace_xfs_attr_sf_create(args);
@@ -541,7 +562,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
int i, offset, size;
xfs_mount_t *mp;
xfs_inode_t *dp;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
trace_xfs_attr_sf_add(args);
@@ -682,7 +703,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
trace_xfs_attr_sf_lookup(args);
@@ -747,18 +768,18 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
*/
int
xfs_attr_shortform_to_leaf(
- struct xfs_da_args *args,
- struct xfs_buf **leaf_bp)
+ struct xfs_da_args *args,
+ struct xfs_buf **leaf_bp)
{
- xfs_inode_t *dp;
- xfs_attr_shortform_t *sf;
- xfs_attr_sf_entry_t *sfe;
- xfs_da_args_t nargs;
- char *tmpbuffer;
- int error, i, size;
- xfs_dablk_t blkno;
- struct xfs_buf *bp;
- xfs_ifork_t *ifp;
+ struct xfs_inode *dp;
+ struct xfs_attr_shortform *sf;
+ struct xfs_attr_sf_entry *sfe;
+ struct xfs_da_args nargs;
+ char *tmpbuffer;
+ int error, i, size;
+ xfs_dablk_t blkno;
+ struct xfs_buf *bp;
+ struct xfs_ifork *ifp;
trace_xfs_attr_sf_to_leaf(args);
@@ -802,8 +823,6 @@ xfs_attr_shortform_to_leaf(
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
nargs.geo = args->geo;
- nargs.firstblock = args->firstblock;
- nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
@@ -1006,8 +1025,6 @@ xfs_attr3_leaf_to_shortform(
memset((char *)&nargs, 0, sizeof(nargs));
nargs.geo = args->geo;
nargs.dp = dp;
- nargs.firstblock = args->firstblock;
- nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
@@ -1570,17 +1587,10 @@ xfs_attr3_leaf_rebalance(
*/
swap = 0;
if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) {
- struct xfs_da_state_blk *tmp_blk;
- struct xfs_attr3_icleaf_hdr tmp_ichdr;
-
- tmp_blk = blk1;
- blk1 = blk2;
- blk2 = tmp_blk;
+ swap(blk1, blk2);
- /* struct copies to swap them rather than reconverting */
- tmp_ichdr = ichdr1;
- ichdr1 = ichdr2;
- ichdr2 = tmp_ichdr;
+ /* swap structures rather than reconverting them */
+ swap(ichdr1, ichdr2);
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index bf2e0371149b..af094063e402 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -480,17 +480,15 @@ xfs_attr_rmtval_set(
* extent and then crash then the block may not contain the
* correct metadata after log recovery occurs.
*/
- xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
- blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
- args->total, &map, &nmap, args->dfops);
+ blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
+ &nmap);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
@@ -522,7 +520,6 @@ xfs_attr_rmtval_set(
ASSERT(blkcnt > 0);
- xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
blkcnt, &map, &nmap,
@@ -557,8 +554,7 @@ xfs_attr_rmtval_set(
ASSERT(valuelen == 0);
return 0;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
- args->trans = NULL;
+ xfs_defer_cancel(args->trans);
return error;
}
@@ -626,16 +622,13 @@ xfs_attr_rmtval_remove(
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
- xfs_defer_init(args->dfops, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
- XFS_BMAPI_ATTRFORK, 1, args->firstblock,
- args->dfops, &done);
+ XFS_BMAPI_ATTRFORK, 1, &done);
if (error)
goto out_defer_cancel;
- xfs_defer_ijoin(args->dfops, args->dp);
- error = xfs_defer_finish(&args->trans, args->dfops);
+ error = xfs_defer_finish(&args->trans);
if (error)
- goto out_defer_cancel;
+ return error;
/*
* Close out trans and start the next one in the chain.
@@ -646,7 +639,6 @@ xfs_attr_rmtval_remove(
}
return 0;
out_defer_cancel:
- xfs_defer_cancel(args->dfops);
- args->trans = NULL;
+ xfs_defer_cancel(args->trans);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 7205268b30bc..2760314fdf7f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -326,7 +326,7 @@ xfs_bmap_check_leaf_extents(
xfs_buf_t *bp; /* buffer for "block" */
int error; /* error return value */
xfs_extnum_t i=0, j; /* index into the extents list */
- xfs_ifork_t *ifp; /* fork structure */
+ struct xfs_ifork *ifp; /* fork structure */
int level; /* btree level, for checking */
xfs_mount_t *mp; /* file system mount structure */
__be64 *pp; /* pointer to block address */
@@ -533,8 +533,7 @@ xfs_bmap_validate_ret(
*/
void
__xfs_bmap_add_free(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_filblks_t len,
struct xfs_owner_info *oinfo,
@@ -542,8 +541,9 @@ __xfs_bmap_add_free(
{
struct xfs_extent_free_item *new; /* new element */
#ifdef DEBUG
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
ASSERT(bno != NULLFSBLOCK);
ASSERT(len > 0);
@@ -566,9 +566,10 @@ __xfs_bmap_add_free(
else
xfs_rmap_skip_owner_update(&new->xefi_oinfo);
new->xefi_skip_discard = skip_discard;
- trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
- XFS_FSB_TO_AGBNO(mp, bno), len);
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
+ trace_xfs_bmap_free_defer(tp->t_mountp,
+ XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
+ XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
}
/*
@@ -594,7 +595,7 @@ xfs_bmap_btree_to_extents(
xfs_fsblock_t cbno; /* child block number */
xfs_buf_t *cbp; /* child block's buffer */
int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork data */
+ struct xfs_ifork *ifp; /* inode fork data */
xfs_mount_t *mp; /* mount point structure */
__be64 *pp; /* ptr to block address */
struct xfs_btree_block *rblock;/* root btree block */
@@ -624,7 +625,7 @@ xfs_bmap_btree_to_extents(
if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
return error;
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
- xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
+ xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(tp, cbp);
@@ -644,25 +645,23 @@ xfs_bmap_btree_to_extents(
*/
STATIC int /* error */
xfs_bmap_extents_to_btree(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first-block-allocated */
- struct xfs_defer_ops *dfops, /* blocks freed in xaction */
- xfs_btree_cur_t **curp, /* cursor returned to caller */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode pointer */
+ struct xfs_btree_cur **curp, /* cursor returned to caller */
int wasdel, /* converting a delayed alloc */
int *logflagsp, /* inode logging flags */
int whichfork) /* data or attr fork */
{
struct xfs_btree_block *ablock; /* allocated (child) bt block */
- xfs_buf_t *abp; /* buffer for ablock */
- xfs_alloc_arg_t args; /* allocation arguments */
- xfs_bmbt_rec_t *arp; /* child record pointer */
+ struct xfs_buf *abp; /* buffer for ablock */
+ struct xfs_alloc_arg args; /* allocation arguments */
+ struct xfs_bmbt_rec *arp; /* child record pointer */
struct xfs_btree_block *block; /* btree root block */
- xfs_btree_cur_t *cur; /* bmap btree cursor */
+ struct xfs_btree_cur *cur; /* bmap btree cursor */
int error; /* error return value */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_key_t *kp; /* root block key pointer */
- xfs_mount_t *mp; /* mount structure */
+ struct xfs_ifork *ifp; /* inode fork pointer */
+ struct xfs_bmbt_key *kp; /* root block key pointer */
+ struct xfs_mount *mp; /* mount structure */
xfs_bmbt_ptr_t *pp; /* root block address pointer */
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec rec;
@@ -690,8 +689,6 @@ xfs_bmap_extents_to_btree(
* Need a cursor. Can't allocate until bb_level is filled in.
*/
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
/*
* Convert to a btree with two levels, one record in root.
@@ -701,45 +698,43 @@ xfs_bmap_extents_to_btree(
args.tp = tp;
args.mp = mp;
xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
- args.firstblock = *firstblock;
- if (*firstblock == NULLFSBLOCK) {
+ if (tp->t_firstblock == NULLFSBLOCK) {
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
- } else if (dfops->dop_low) {
+ } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
args.type = XFS_ALLOCTYPE_START_BNO;
- args.fsbno = *firstblock;
+ args.fsbno = tp->t_firstblock;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
- args.fsbno = *firstblock;
+ args.fsbno = tp->t_firstblock;
}
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = wasdel;
*logflagsp = 0;
if ((error = xfs_alloc_vextent(&args))) {
- xfs_iroot_realloc(ip, -1, whichfork);
ASSERT(ifp->if_broot == NULL);
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- return error;
+ goto err1;
}
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
- xfs_iroot_realloc(ip, -1, whichfork);
ASSERT(ifp->if_broot == NULL);
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- return -ENOSPC;
+ error = -ENOSPC;
+ goto err1;
}
/*
* Allocation can't fail, the space was reserved.
*/
- ASSERT(*firstblock == NULLFSBLOCK ||
- args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
- *firstblock = cur->bc_private.b.firstblock = args.fsbno;
+ ASSERT(tp->t_firstblock == NULLFSBLOCK ||
+ args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
+ tp->t_firstblock = args.fsbno;
cur->bc_private.b.allocated++;
ip->i_d.di_nblocks++;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
+ if (!abp) {
+ error = -ENOSPC;
+ goto err2;
+ }
/*
* Fill in the child block.
*/
@@ -779,6 +774,15 @@ xfs_bmap_extents_to_btree(
*curp = cur;
*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
return 0;
+
+err2:
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
+err1:
+ xfs_iroot_realloc(ip, -1, whichfork);
+ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+
+ return error;
}
/*
@@ -812,7 +816,6 @@ STATIC int /* error */
xfs_bmap_local_to_extents(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated in xaction */
xfs_extlen_t total, /* total blocks needed by transaction */
int *logflagsp, /* inode logging flags */
int whichfork,
@@ -823,7 +826,7 @@ xfs_bmap_local_to_extents(
{
int error = 0;
int flags; /* logging flags returned */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_alloc_arg_t args; /* allocation arguments */
xfs_buf_t *bp; /* buffer for extent block */
struct xfs_bmbt_irec rec;
@@ -850,16 +853,15 @@ xfs_bmap_local_to_extents(
args.tp = tp;
args.mp = ip->i_mount;
xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
- args.firstblock = *firstblock;
/*
* Allocate a block. We know we need only one, since the
* file currently fits in an inode.
*/
- if (*firstblock == NULLFSBLOCK) {
+ if (tp->t_firstblock == NULLFSBLOCK) {
args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
- args.fsbno = *firstblock;
+ args.fsbno = tp->t_firstblock;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
}
args.total = total;
@@ -871,7 +873,7 @@ xfs_bmap_local_to_extents(
/* Can't fail, the space was reserved. */
ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(args.len == 1);
- *firstblock = args.fsbno;
+ tp->t_firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
/*
@@ -917,8 +919,6 @@ STATIC int /* error */
xfs_bmap_add_attrfork_btree(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated */
- struct xfs_defer_ops *dfops, /* blocks to free at commit */
int *flags) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
@@ -931,8 +931,6 @@ xfs_bmap_add_attrfork_btree(
*flags |= XFS_ILOG_DBROOT;
else {
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
- cur->bc_private.b.dfops = dfops;
- cur->bc_private.b.firstblock = *firstblock;
error = xfs_bmbt_lookup_first(cur, &stat);
if (error)
goto error0;
@@ -944,7 +942,6 @@ xfs_bmap_add_attrfork_btree(
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
return -ENOSPC;
}
- *firstblock = cur->bc_private.b.firstblock;
cur->bc_private.b.allocated = 0;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
}
@@ -959,10 +956,8 @@ error0:
*/
STATIC int /* error */
xfs_bmap_add_attrfork_extents(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated */
- struct xfs_defer_ops *dfops, /* blocks to free at commit */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode pointer */
int *flags) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -971,12 +966,11 @@ xfs_bmap_add_attrfork_extents(
if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
return 0;
cur = NULL;
- error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
- flags, XFS_DATA_FORK);
+ error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
+ XFS_DATA_FORK);
if (cur) {
cur->bc_private.b.allocated = 0;
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
}
return error;
}
@@ -994,13 +988,11 @@ xfs_bmap_add_attrfork_extents(
*/
STATIC int /* error */
xfs_bmap_add_attrfork_local(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_fsblock_t *firstblock, /* first block allocated */
- struct xfs_defer_ops *dfops, /* blocks to free at commit */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode pointer */
int *flags) /* inode logging flags */
{
- xfs_da_args_t dargs; /* args for dir/attr code */
+ struct xfs_da_args dargs; /* args for dir/attr code */
if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
return 0;
@@ -1009,8 +1001,6 @@ xfs_bmap_add_attrfork_local(
memset(&dargs, 0, sizeof(dargs));
dargs.geo = ip->i_mount->m_dir_geo;
dargs.dp = ip;
- dargs.firstblock = firstblock;
- dargs.dfops = dfops;
dargs.total = dargs.geo->fsbcount;
dargs.whichfork = XFS_DATA_FORK;
dargs.trans = tp;
@@ -1018,8 +1008,8 @@ xfs_bmap_add_attrfork_local(
}
if (S_ISLNK(VFS_I(ip)->i_mode))
- return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
- flags, XFS_DATA_FORK,
+ return xfs_bmap_local_to_extents(tp, ip, 1, flags,
+ XFS_DATA_FORK,
xfs_symlink_local_to_remote);
/* should only be called for types that support local format data */
@@ -1037,8 +1027,6 @@ xfs_bmap_add_attrfork(
int size, /* space new attribute needs */
int rsvd) /* xact may use reserved blks */
{
- xfs_fsblock_t firstblock; /* 1st block/ag allocated */
- struct xfs_defer_ops dfops; /* freed extent records */
xfs_mount_t *mp; /* mount structure */
xfs_trans_t *tp; /* transaction pointer */
int blks; /* space reservation */
@@ -1104,19 +1092,15 @@ xfs_bmap_add_attrfork(
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
ip->i_afp->if_flags = XFS_IFEXTENTS;
logflags = 0;
- xfs_defer_init(&dfops, &firstblock);
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_LOCAL:
- error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
- &logflags);
+ error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
break;
case XFS_DINODE_FMT_EXTENTS:
- error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
- &dfops, &logflags);
+ error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
break;
case XFS_DINODE_FMT_BTREE:
- error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
- &logflags);
+ error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
break;
default:
error = 0;
@@ -1125,7 +1109,7 @@ xfs_bmap_add_attrfork(
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
if (error)
- goto bmap_cancel;
+ goto trans_cancel;
if (!xfs_sb_version_hasattr(&mp->m_sb) ||
(!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
bool log_sb = false;
@@ -1144,15 +1128,10 @@ xfs_bmap_add_attrfork(
xfs_log_sb(tp);
}
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto bmap_cancel;
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
-bmap_cancel:
- xfs_defer_cancel(&dfops);
trans_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1501,7 +1480,7 @@ xfs_bmap_one_block(
xfs_inode_t *ip, /* incore inode */
int whichfork) /* data or attr fork */
{
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
int rval; /* return value */
xfs_bmbt_irec_t s; /* internal version of extent */
struct xfs_iext_cursor icur;
@@ -1539,7 +1518,7 @@ xfs_bmap_add_extent_delay_real(
struct xfs_bmbt_irec *new = &bma->got;
int error; /* error return value */
int i; /* temp state */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_fileoff_t new_endoff; /* end offset of new entry */
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
/* left is 0, right is 1, prev is 2 */
@@ -1809,7 +1788,6 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops,
&bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
@@ -1887,8 +1865,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops, &bma->cur, 1,
- &tmp_rval, whichfork);
+ &bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
goto done;
@@ -1968,8 +1945,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops, &bma->cur,
- 1, &tmp_rval, whichfork);
+ &bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
goto done;
@@ -1994,8 +1970,7 @@ xfs_bmap_add_extent_delay_real(
/* add reverse mapping unless caller opted out */
if (!(bma->flags & XFS_BMAPI_NORMAP)) {
- error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip,
- whichfork, new);
+ error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
if (error)
goto done;
}
@@ -2006,8 +1981,8 @@ xfs_bmap_add_extent_delay_real(
ASSERT(bma->cur == NULL);
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops, &bma->cur,
- da_old > 0, &tmp_logflags, whichfork);
+ &bma->cur, da_old > 0, &tmp_logflags,
+ whichfork);
bma->logflags |= tmp_logflags;
if (error)
goto done;
@@ -2046,14 +2021,12 @@ xfs_bmap_add_extent_unwritten_real(
struct xfs_iext_cursor *icur,
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
- xfs_fsblock_t *first, /* pointer to firstblock variable */
- struct xfs_defer_ops *dfops, /* list of extents to be freed */
int *logflagsp) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
int error; /* error return value */
int i; /* temp state */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_fileoff_t new_endoff; /* end offset of new entry */
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
/* left is 0, right is 1, prev is 2 */
@@ -2479,7 +2452,7 @@ xfs_bmap_add_extent_unwritten_real(
}
/* update reverse mappings */
- error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
+ error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
if (error)
goto done;
@@ -2488,8 +2461,8 @@ xfs_bmap_add_extent_unwritten_real(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
- 0, &tmp_logflags, whichfork);
+ error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
+ &tmp_logflags, whichfork);
*logflagsp |= tmp_logflags;
if (error)
goto done;
@@ -2520,7 +2493,7 @@ xfs_bmap_add_extent_hole_delay(
struct xfs_iext_cursor *icur,
xfs_bmbt_irec_t *new) /* new data to add to file extents */
{
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_bmbt_irec_t left; /* left neighbor extent entry */
xfs_filblks_t newlen=0; /* new indirect size */
xfs_filblks_t oldlen=0; /* old indirect size */
@@ -2660,8 +2633,6 @@ xfs_bmap_add_extent_hole_real(
struct xfs_iext_cursor *icur,
struct xfs_btree_cur **curp,
struct xfs_bmbt_irec *new,
- xfs_fsblock_t *first,
- struct xfs_defer_ops *dfops,
int *logflagsp,
int flags)
{
@@ -2842,7 +2813,7 @@ xfs_bmap_add_extent_hole_real(
/* add reverse mapping unless caller opted out */
if (!(flags & XFS_BMAPI_NORMAP)) {
- error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
+ error = xfs_rmap_map_extent(tp, ip, whichfork, new);
if (error)
goto done;
}
@@ -2852,8 +2823,8 @@ xfs_bmap_add_extent_hole_real(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
- 0, &tmp_logflags, whichfork);
+ error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
+ &tmp_logflags, whichfork);
*logflagsp |= tmp_logflags;
cur = *curp;
if (error)
@@ -3071,10 +3042,11 @@ xfs_bmap_adjacent(
XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
mp = ap->ip->i_mount;
- nullfb = *ap->firstblock == NULLFSBLOCK;
+ nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
rt = XFS_IS_REALTIME_INODE(ap->ip) &&
xfs_alloc_is_userdata(ap->datatype);
- fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
+ fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
+ ap->tp->t_firstblock);
/*
* If allocating at eof, and there's a previous real block,
* try to use its last block as our starting point.
@@ -3432,8 +3404,9 @@ xfs_bmap_btalloc(
}
- nullfb = *ap->firstblock == NULLFSBLOCK;
- fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
+ nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
+ fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
+ ap->tp->t_firstblock);
if (nullfb) {
if (xfs_alloc_is_userdata(ap->datatype) &&
xfs_inode_is_filestream(ap->ip)) {
@@ -3444,7 +3417,7 @@ xfs_bmap_btalloc(
ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
}
} else
- ap->blkno = *ap->firstblock;
+ ap->blkno = ap->tp->t_firstblock;
xfs_bmap_adjacent(ap);
@@ -3455,7 +3428,7 @@ xfs_bmap_btalloc(
if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
;
else
- ap->blkno = *ap->firstblock;
+ ap->blkno = ap->tp->t_firstblock;
/*
* Normal allocation, done through xfs_alloc_vextent.
*/
@@ -3468,7 +3441,6 @@ xfs_bmap_btalloc(
/* Trim the allocation back to the maximum an AG can fit. */
args.maxlen = min(ap->length, mp->m_ag_max_usable);
- args.firstblock = *ap->firstblock;
blen = 0;
if (nullfb) {
/*
@@ -3483,7 +3455,7 @@ xfs_bmap_btalloc(
error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
if (error)
return error;
- } else if (ap->dfops->dop_low) {
+ } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
if (xfs_inode_is_filestream(ap->ip))
args.type = XFS_ALLOCTYPE_FIRST_AG;
else
@@ -3518,7 +3490,7 @@ xfs_bmap_btalloc(
* is >= the stripe unit and the allocation offset is
* at the end of file.
*/
- if (!ap->dfops->dop_low && ap->aeof) {
+ if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
if (!ap->offset) {
args.alignment = stripe_align;
atype = args.type;
@@ -3610,20 +3582,20 @@ xfs_bmap_btalloc(
args.total = ap->minlen;
if ((error = xfs_alloc_vextent(&args)))
return error;
- ap->dfops->dop_low = true;
+ ap->tp->t_flags |= XFS_TRANS_LOWMODE;
}
if (args.fsbno != NULLFSBLOCK) {
/*
* check the allocation happened at the same or higher AG than
* the first block that was allocated.
*/
- ASSERT(*ap->firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
+ ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
+ XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
XFS_FSB_TO_AGNO(mp, args.fsbno));
ap->blkno = args.fsbno;
- if (*ap->firstblock == NULLFSBLOCK)
- *ap->firstblock = args.fsbno;
+ if (ap->tp->t_firstblock == NULLFSBLOCK)
+ ap->tp->t_firstblock = args.fsbno;
ASSERT(nullfb || fb_agno <= args.agno);
ap->length = args.len;
/*
@@ -3788,8 +3760,7 @@ xfs_bmapi_update_map(
mval[-1].br_startblock != HOLESTARTBLOCK &&
mval->br_startblock == mval[-1].br_startblock +
mval[-1].br_blockcount &&
- ((flags & XFS_BMAPI_IGSTATE) ||
- mval[-1].br_state == mval->br_state)) {
+ mval[-1].br_state == mval->br_state) {
ASSERT(mval->br_startoff ==
mval[-1].br_startoff + mval[-1].br_blockcount);
mval[-1].br_blockcount += mval->br_blockcount;
@@ -3834,7 +3805,7 @@ xfs_bmapi_read(
ASSERT(*nmap >= 1);
ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
- XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
+ XFS_BMAPI_COWFORK)));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
if (unlikely(XFS_TEST_ERROR(
@@ -4079,15 +4050,10 @@ xfs_bmapi_allocate(
if (error)
return error;
- if (bma->cur)
- bma->cur->bc_private.b.firstblock = *bma->firstblock;
if (bma->blkno == NULLFSBLOCK)
return 0;
- if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
+ if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
- bma->cur->bc_private.b.firstblock = *bma->firstblock;
- bma->cur->bc_private.b.dfops = bma->dfops;
- }
/*
* Bump the number of extents we've allocated
* in this call.
@@ -4122,8 +4088,7 @@ xfs_bmapi_allocate(
else
error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
whichfork, &bma->icur, &bma->cur, &bma->got,
- bma->firstblock, bma->dfops, &bma->logflags,
- bma->flags);
+ &bma->logflags, bma->flags);
bma->logflags |= tmp_logflags;
if (error)
@@ -4174,8 +4139,6 @@ xfs_bmapi_convert_unwritten(
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
bma->ip, whichfork);
- bma->cur->bc_private.b.firstblock = *bma->firstblock;
- bma->cur->bc_private.b.dfops = bma->dfops;
}
mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
@@ -4192,8 +4155,7 @@ xfs_bmapi_convert_unwritten(
}
error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
- &bma->icur, &bma->cur, mval, bma->firstblock,
- bma->dfops, &tmp_logflags);
+ &bma->icur, &bma->cur, mval, &tmp_logflags);
/*
* Log the inode core unconditionally in the unwritten extent conversion
* path because the conversion might not have done so (e.g., if the
@@ -4231,12 +4193,6 @@ xfs_bmapi_convert_unwritten(
* extent state if necessary. Details behaviour is controlled by the flags
* parameter. Only allocates blocks from a single allocation group, to avoid
* locking problems.
- *
- * The returned value in "firstblock" from the first call in a transaction
- * must be remembered and presented to subsequent calls in "firstblock".
- * An upper bound for the number of blocks to be allocated is supplied to
- * the first call in "total"; if no allocation group has that many free
- * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
*/
int
xfs_bmapi_write(
@@ -4245,12 +4201,9 @@ xfs_bmapi_write(
xfs_fileoff_t bno, /* starting file offs. mapped */
xfs_filblks_t len, /* length to map in file */
int flags, /* XFS_BMAPI_... */
- xfs_fsblock_t *firstblock, /* first allocated block
- controls a.g. for allocs */
xfs_extlen_t total, /* total blocks needed */
struct xfs_bmbt_irec *mval, /* output: map values */
- int *nmap, /* i/o: mval size/count */
- struct xfs_defer_ops *dfops) /* i/o: list extents to free */
+ int *nmap) /* i/o: mval size/count */
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
@@ -4279,7 +4232,6 @@ xfs_bmapi_write(
ASSERT(*nmap >= 1);
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
- ASSERT(!(flags & XFS_BMAPI_IGSTATE));
ASSERT(tp != NULL ||
(flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
(XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
@@ -4315,7 +4267,7 @@ xfs_bmapi_write(
XFS_STATS_INC(mp, xs_blk_mapw);
- if (*firstblock == NULLFSBLOCK) {
+ if (!tp || tp->t_firstblock == NULLFSBLOCK) {
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
else
@@ -4342,8 +4294,6 @@ xfs_bmapi_write(
bma.ip = ip;
bma.total = total;
bma.datatype = 0;
- bma.dfops = dfops;
- bma.firstblock = firstblock;
while (bno < end && n < *nmap) {
bool need_alloc = false, wasdelay = false;
@@ -4419,7 +4369,7 @@ xfs_bmapi_write(
* the refcount btree for orphan recovery.
*/
if (whichfork == XFS_COW_FORK) {
- error = xfs_refcount_alloc_cow_extent(mp, dfops,
+ error = xfs_refcount_alloc_cow_extent(tp,
bma.blkno, bma.length);
if (error)
goto error0;
@@ -4493,15 +4443,7 @@ error0:
xfs_trans_log_inode(tp, ip, bma.logflags);
if (bma.cur) {
- if (!error) {
- ASSERT(*firstblock == NULLFSBLOCK ||
- XFS_FSB_TO_AGNO(mp, *firstblock) <=
- XFS_FSB_TO_AGNO(mp,
- bma.cur->bc_private.b.firstblock));
- *firstblock = bma.cur->bc_private.b.firstblock;
- }
- xfs_btree_del_cursor(bma.cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(bma.cur, error);
}
if (!error)
xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
@@ -4516,13 +4458,11 @@ xfs_bmapi_remap(
xfs_fileoff_t bno,
xfs_filblks_t len,
xfs_fsblock_t startblock,
- struct xfs_defer_ops *dfops,
int flags)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
struct xfs_btree_cur *cur = NULL;
- xfs_fsblock_t firstblock = NULLFSBLOCK;
struct xfs_bmbt_irec got;
struct xfs_iext_cursor icur;
int whichfork = xfs_bmapi_whichfork(flags);
@@ -4565,8 +4505,6 @@ xfs_bmapi_remap(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
}
@@ -4579,7 +4517,7 @@ xfs_bmapi_remap(
got.br_state = XFS_EXT_NORM;
error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
- &cur, &got, &firstblock, dfops, &logflags, flags);
+ &cur, &got, &logflags, flags);
if (error)
goto error0;
@@ -4599,10 +4537,8 @@ error0:
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
- if (cur) {
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
- }
+ if (cur)
+ xfs_btree_del_cursor(cur, error);
return error;
}
@@ -4898,7 +4834,6 @@ xfs_bmap_del_extent_real(
xfs_inode_t *ip, /* incore inode pointer */
xfs_trans_t *tp, /* current transaction pointer */
struct xfs_iext_cursor *icur,
- struct xfs_defer_ops *dfops, /* list of extents to be freed */
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
int *logflagsp, /* inode logging flags */
@@ -4913,7 +4848,7 @@ xfs_bmap_del_extent_real(
struct xfs_bmbt_irec got; /* current extent entry */
xfs_fileoff_t got_endoff; /* first offset past got */
int i; /* temp state */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_ifork *ifp; /* inode fork pointer */
xfs_mount_t *mp; /* mount structure */
xfs_filblks_t nblks; /* quota/sb block count */
xfs_bmbt_irec_t new; /* new record to be inserted */
@@ -5104,7 +5039,7 @@ xfs_bmap_del_extent_real(
}
/* remove reverse mapping */
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
+ error = xfs_rmap_unmap_extent(tp, ip, whichfork, del);
if (error)
goto done;
@@ -5113,11 +5048,11 @@ xfs_bmap_del_extent_real(
*/
if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
- error = xfs_refcount_decrease_extent(mp, dfops, del);
+ error = xfs_refcount_decrease_extent(tp, del);
if (error)
goto done;
} else {
- __xfs_bmap_add_free(mp, dfops, del->br_startblock,
+ __xfs_bmap_add_free(tp, del->br_startblock,
del->br_blockcount, NULL,
(bflags & XFS_BMAPI_NODISCARD) ||
del->br_state == XFS_EXT_UNWRITTEN);
@@ -5148,26 +5083,23 @@ done:
*/
int /* error */
__xfs_bunmapi(
- xfs_trans_t *tp, /* transaction pointer */
+ struct xfs_trans *tp, /* transaction pointer */
struct xfs_inode *ip, /* incore inode */
xfs_fileoff_t start, /* first file offset deleted */
xfs_filblks_t *rlen, /* i/o: amount remaining */
int flags, /* misc flags */
- xfs_extnum_t nexts, /* number of extents max */
- xfs_fsblock_t *firstblock, /* first allocated block
- controls a.g. for allocs */
- struct xfs_defer_ops *dfops) /* i/o: deferred updates */
+ xfs_extnum_t nexts) /* number of extents max */
{
- xfs_btree_cur_t *cur; /* bmap btree cursor */
- xfs_bmbt_irec_t del; /* extent being deleted */
+ struct xfs_btree_cur *cur; /* bmap btree cursor */
+ struct xfs_bmbt_irec del; /* extent being deleted */
int error; /* error return value */
xfs_extnum_t extno; /* extent number in list */
- xfs_bmbt_irec_t got; /* current extent record */
- xfs_ifork_t *ifp; /* inode fork pointer */
+ struct xfs_bmbt_irec got; /* current extent record */
+ struct xfs_ifork *ifp; /* inode fork pointer */
int isrt; /* freeing in rt area */
int logflags; /* transaction logging flags */
xfs_extlen_t mod; /* rt extent offset */
- xfs_mount_t *mp; /* mount structure */
+ struct xfs_mount *mp; /* mount structure */
int tmp_logflags; /* partial logging flags */
int wasdel; /* was a delayed alloc extent */
int whichfork; /* data or attribute fork */
@@ -5230,8 +5162,6 @@ __xfs_bunmapi(
if (ifp->if_flags & XFS_IFBROOT) {
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
} else
cur = NULL;
@@ -5347,7 +5277,7 @@ __xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp, ip,
whichfork, &icur, &cur, &del,
- firstblock, dfops, &logflags);
+ &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5404,8 +5334,7 @@ __xfs_bunmapi(
prev.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp,
ip, whichfork, &icur, &cur,
- &prev, firstblock, dfops,
- &logflags);
+ &prev, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5414,8 +5343,7 @@ __xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp,
ip, whichfork, &icur, &cur,
- &del, firstblock, dfops,
- &logflags);
+ &del, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5427,8 +5355,8 @@ delete:
error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
&got, &del);
} else {
- error = xfs_bmap_del_extent_real(ip, tp, &icur, dfops,
- cur, &del, &tmp_logflags, whichfork,
+ error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
+ &del, &tmp_logflags, whichfork,
flags);
logflags |= tmp_logflags;
}
@@ -5462,8 +5390,8 @@ nodelete:
*/
if (xfs_bmap_needs_btree(ip, whichfork)) {
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
- &cur, 0, &tmp_logflags, whichfork);
+ error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
+ &tmp_logflags, whichfork);
logflags |= tmp_logflags;
if (error)
goto error0;
@@ -5501,12 +5429,9 @@ error0:
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
if (cur) {
- if (!error) {
- *firstblock = cur->bc_private.b.firstblock;
+ if (!error)
cur->bc_private.b.allocated = 0;
- }
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
}
return error;
}
@@ -5520,14 +5445,11 @@ xfs_bunmapi(
xfs_filblks_t len,
int flags,
xfs_extnum_t nexts,
- xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops,
int *done)
{
int error;
- error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
- dfops);
+ error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
*done = (len == 0);
return error;
}
@@ -5570,6 +5492,7 @@ xfs_bmse_can_merge(
*/
STATIC int
xfs_bmse_merge(
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
xfs_fileoff_t shift, /* shift fsb */
@@ -5577,8 +5500,7 @@ xfs_bmse_merge(
struct xfs_bmbt_irec *got, /* extent to shift */
struct xfs_bmbt_irec *left, /* preceding extent */
struct xfs_btree_cur *cur,
- int *logflags, /* output */
- struct xfs_defer_ops *dfops)
+ int *logflags) /* output */
{
struct xfs_bmbt_irec new;
xfs_filblks_t blockcount;
@@ -5634,23 +5556,23 @@ done:
&new);
/* update reverse mapping. rmap functions merge the rmaps for us */
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
+ error = xfs_rmap_unmap_extent(tp, ip, whichfork, got);
if (error)
return error;
memcpy(&new, got, sizeof(new));
new.br_startoff = left->br_startoff + left->br_blockcount;
- return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
+ return xfs_rmap_map_extent(tp, ip, whichfork, &new);
}
static int
xfs_bmap_shift_update_extent(
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_iext_cursor *icur,
struct xfs_bmbt_irec *got,
struct xfs_btree_cur *cur,
int *logflags,
- struct xfs_defer_ops *dfops,
xfs_fileoff_t startoff)
{
struct xfs_mount *mp = ip->i_mount;
@@ -5678,10 +5600,10 @@ xfs_bmap_shift_update_extent(
got);
/* update reverse mapping */
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &prev);
+ error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
if (error)
return error;
- return xfs_rmap_map_extent(mp, dfops, ip, whichfork, got);
+ return xfs_rmap_map_extent(tp, ip, whichfork, got);
}
int
@@ -5690,9 +5612,7 @@ xfs_bmap_collapse_extents(
struct xfs_inode *ip,
xfs_fileoff_t *next_fsb,
xfs_fileoff_t offset_shift_fsb,
- bool *done,
- xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops)
+ bool *done)
{
int whichfork = XFS_DATA_FORK;
struct xfs_mount *mp = ip->i_mount;
@@ -5725,8 +5645,6 @@ xfs_bmap_collapse_extents(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
}
@@ -5745,9 +5663,9 @@ xfs_bmap_collapse_extents(
}
if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
- error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
- &icur, &got, &prev, cur, &logflags,
- dfops);
+ error = xfs_bmse_merge(tp, ip, whichfork,
+ offset_shift_fsb, &icur, &got, &prev,
+ cur, &logflags);
if (error)
goto del_cursor;
goto done;
@@ -5759,8 +5677,8 @@ xfs_bmap_collapse_extents(
}
}
- error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
- &logflags, dfops, new_startoff);
+ error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
+ cur, &logflags, new_startoff);
if (error)
goto del_cursor;
@@ -5773,8 +5691,7 @@ done:
*next_fsb = got.br_startoff;
del_cursor:
if (cur)
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
return error;
@@ -5813,9 +5730,7 @@ xfs_bmap_insert_extents(
xfs_fileoff_t *next_fsb,
xfs_fileoff_t offset_shift_fsb,
bool *done,
- xfs_fileoff_t stop_fsb,
- xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops)
+ xfs_fileoff_t stop_fsb)
{
int whichfork = XFS_DATA_FORK;
struct xfs_mount *mp = ip->i_mount;
@@ -5848,8 +5763,6 @@ xfs_bmap_insert_extents(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
}
@@ -5891,8 +5804,8 @@ xfs_bmap_insert_extents(
WARN_ON_ONCE(1);
}
- error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
- &logflags, dfops, new_startoff);
+ error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
+ cur, &logflags, new_startoff);
if (error)
goto del_cursor;
@@ -5905,8 +5818,7 @@ xfs_bmap_insert_extents(
*next_fsb = got.br_startoff;
del_cursor:
if (cur)
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
return error;
@@ -5922,9 +5834,7 @@ STATIC int
xfs_bmap_split_extent_at(
struct xfs_trans *tp,
struct xfs_inode *ip,
- xfs_fileoff_t split_fsb,
- xfs_fsblock_t *firstfsb,
- struct xfs_defer_ops *dfops)
+ xfs_fileoff_t split_fsb)
{
int whichfork = XFS_DATA_FORK;
struct xfs_btree_cur *cur = NULL;
@@ -5973,8 +5883,6 @@ xfs_bmap_split_extent_at(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
- cur->bc_private.b.firstblock = *firstfsb;
- cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
@@ -6018,16 +5926,15 @@ xfs_bmap_split_extent_at(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
- &cur, 0, &tmp_logflags, whichfork);
+ error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
+ &tmp_logflags, whichfork);
logflags |= tmp_logflags;
}
del_cursor:
if (cur) {
cur->bc_private.b.allocated = 0;
- xfs_btree_del_cursor(cur,
- error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
}
if (logflags)
@@ -6042,8 +5949,6 @@ xfs_bmap_split_extent(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t firstfsb;
int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
@@ -6054,21 +5959,13 @@ xfs_bmap_split_extent(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_defer_init(&dfops, &firstfsb);
-
- error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
- &firstfsb, &dfops);
- if (error)
- goto out;
-
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
if (error)
goto out;
return xfs_trans_commit(tp);
out:
- xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
return error;
}
@@ -6085,20 +5982,18 @@ xfs_bmap_is_update_needed(
/* Record a bmap intent. */
static int
__xfs_bmap_add(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
enum xfs_bmap_intent_type type,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *bmap)
{
- int error;
struct xfs_bmap_intent *bi;
- trace_xfs_bmap_defer(mp,
- XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
+ trace_xfs_bmap_defer(tp->t_mountp,
+ XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
type,
- XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
+ XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
ip->i_ino, whichfork,
bmap->br_startoff,
bmap->br_blockcount,
@@ -6111,44 +6006,34 @@ __xfs_bmap_add(
bi->bi_whichfork = whichfork;
bi->bi_bmap = *bmap;
- error = xfs_defer_ijoin(dfops, bi->bi_owner);
- if (error) {
- kmem_free(bi);
- return error;
- }
-
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
return 0;
}
/* Map an extent into a file. */
int
xfs_bmap_map_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_bmap_is_update_needed(PREV))
return 0;
- return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
- XFS_DATA_FORK, PREV);
+ return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
}
/* Unmap an extent out of a file. */
int
xfs_bmap_unmap_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_bmbt_irec *PREV)
{
if (!xfs_bmap_is_update_needed(PREV))
return 0;
- return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
- XFS_DATA_FORK, PREV);
+ return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
}
/*
@@ -6158,7 +6043,6 @@ xfs_bmap_unmap_extent(
int
xfs_bmap_finish_one(
struct xfs_trans *tp,
- struct xfs_defer_ops *dfops,
struct xfs_inode *ip,
enum xfs_bmap_intent_type type,
int whichfork,
@@ -6167,17 +6051,9 @@ xfs_bmap_finish_one(
xfs_filblks_t *blockcount,
xfs_exntst_t state)
{
- xfs_fsblock_t firstfsb;
int error = 0;
- /*
- * firstfsb is tied to the transaction lifetime and is used to
- * ensure correct AG locking order and schedule work item
- * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
- * to only making one bmap call per transaction, so it should
- * be safe to have it as a local variable here.
- */
- firstfsb = NULLFSBLOCK;
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
trace_xfs_bmap_deferred(tp->t_mountp,
XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
@@ -6194,12 +6070,12 @@ xfs_bmap_finish_one(
switch (type) {
case XFS_BMAP_MAP:
error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
- startblock, dfops, 0);
+ startblock, 0);
*blockcount = 0;
break;
case XFS_BMAP_UNMAP:
error = __xfs_bunmapi(tp, ip, startoff, blockcount,
- XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
+ XFS_BMAPI_REMAP, 1);
break;
default:
ASSERT(0);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 9b49ddf99c41..b6e9b639e731 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -19,8 +19,6 @@ extern kmem_zone_t *xfs_bmap_free_item_zone;
* Argument structure for xfs_bmap_alloc.
*/
struct xfs_bmalloca {
- xfs_fsblock_t *firstblock; /* i/o first block allocated */
- struct xfs_defer_ops *dfops; /* bmap freelist */
struct xfs_trans *tp; /* transaction pointer */
struct xfs_inode *ip; /* incore inode pointer */
struct xfs_bmbt_irec prev; /* extent before the new one */
@@ -68,8 +66,6 @@ struct xfs_extent_free_item
#define XFS_BMAPI_METADATA 0x002 /* mapping metadata not user data */
#define XFS_BMAPI_ATTRFORK 0x004 /* use attribute fork not data */
#define XFS_BMAPI_PREALLOC 0x008 /* preallocation op: unwritten space */
-#define XFS_BMAPI_IGSTATE 0x010 /* Ignore state - */
- /* combine contig. space */
#define XFS_BMAPI_CONTIG 0x020 /* must allocate only one extent */
/*
* unwritten extent conversion - this needs write cache flushing and no additional
@@ -116,7 +112,6 @@ struct xfs_extent_free_item
{ XFS_BMAPI_METADATA, "METADATA" }, \
{ XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
{ XFS_BMAPI_PREALLOC, "PREALLOC" }, \
- { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
{ XFS_BMAPI_CONTIG, "CONTIG" }, \
{ XFS_BMAPI_CONVERT, "CONVERT" }, \
{ XFS_BMAPI_ZERO, "ZERO" }, \
@@ -189,9 +184,9 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
-void __xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- xfs_fsblock_t bno, xfs_filblks_t len,
- struct xfs_owner_info *oinfo, bool skip_discard);
+void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
+ xfs_filblks_t len, struct xfs_owner_info *oinfo,
+ bool skip_discard);
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
@@ -205,17 +200,13 @@ int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
int *nmap, int flags);
int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
- xfs_fsblock_t *firstblock, xfs_extlen_t total,
- struct xfs_bmbt_irec *mval, int *nmap,
- struct xfs_defer_ops *dfops);
+ xfs_extlen_t total, struct xfs_bmbt_irec *mval, int *nmap);
int __xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t *rlen, int flags,
- xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops);
+ xfs_extnum_t nexts);
int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
- xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops, int *done);
+ xfs_extnum_t nexts, int *done);
int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
struct xfs_bmbt_irec *del);
@@ -225,14 +216,12 @@ void xfs_bmap_del_extent_cow(struct xfs_inode *ip,
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
- bool *done, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops);
+ bool *done);
int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
xfs_fileoff_t shift);
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
- bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops);
+ bool *done, xfs_fileoff_t stop_fsb);
int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
@@ -241,13 +230,12 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
static inline void
xfs_bmap_add_free(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_filblks_t len,
struct xfs_owner_info *oinfo)
{
- __xfs_bmap_add_free(mp, dfops, bno, len, oinfo, false);
+ __xfs_bmap_add_free(tp, bno, len, oinfo, false);
}
enum xfs_bmap_intent_type {
@@ -263,14 +251,14 @@ struct xfs_bmap_intent {
struct xfs_bmbt_irec bi_bmap;
};
-int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, enum xfs_bmap_intent_type type,
- int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
+int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_inode *ip,
+ enum xfs_bmap_intent_type type, int whichfork,
+ xfs_fileoff_t startoff, xfs_fsblock_t startblock,
xfs_filblks_t *blockcount, xfs_exntst_t state);
-int xfs_bmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
-int xfs_bmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
+int xfs_bmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap);
+int xfs_bmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap);
static inline int xfs_bmap_fork_to_state(int whichfork)
{
@@ -289,6 +277,6 @@ xfs_failaddr_t xfs_bmap_validate_extent(struct xfs_inode *ip, int whichfork,
int xfs_bmapi_remap(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, xfs_fsblock_t startblock,
- struct xfs_defer_ops *dfops, int flags);
+ int flags);
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index e1a2d9ceb615..cdb74d2e2a43 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -175,8 +175,6 @@ xfs_bmbt_dup_cursor(
* Copy the firstblock, dfops, and flags values,
* since init cursor doesn't get them.
*/
- new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
- new->bc_private.b.dfops = cur->bc_private.b.dfops;
new->bc_private.b.flags = cur->bc_private.b.flags;
return new;
@@ -187,12 +185,11 @@ xfs_bmbt_update_cursor(
struct xfs_btree_cur *src,
struct xfs_btree_cur *dst)
{
- ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
+ ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
(dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
- ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
dst->bc_private.b.allocated += src->bc_private.b.allocated;
- dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
+ dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
src->bc_private.b.allocated = 0;
}
@@ -210,8 +207,7 @@ xfs_bmbt_alloc_block(
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
- args.fsbno = cur->bc_private.b.firstblock;
- args.firstblock = args.fsbno;
+ args.fsbno = cur->bc_tp->t_firstblock;
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
cur->bc_private.b.whichfork);
@@ -230,7 +226,7 @@ xfs_bmbt_alloc_block(
* block allocation here and corrupt the filesystem.
*/
args.minleft = args.tp->t_blk_res;
- } else if (cur->bc_private.b.dfops->dop_low) {
+ } else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) {
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -257,7 +253,7 @@ xfs_bmbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
- cur->bc_private.b.dfops->dop_low = true;
+ cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
}
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
*stat = 0;
@@ -265,7 +261,7 @@ xfs_bmbt_alloc_block(
}
ASSERT(args.len == 1);
- cur->bc_private.b.firstblock = args.fsbno;
+ cur->bc_tp->t_firstblock = args.fsbno;
cur->bc_private.b.allocated++;
cur->bc_private.b.ip->i_d.di_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
@@ -293,7 +289,7 @@ xfs_bmbt_free_block(
struct xfs_owner_info oinfo;
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
- xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
+ xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -564,8 +560,6 @@ xfs_bmbt_init_cursor(
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
cur->bc_private.b.ip = ip;
- cur->bc_private.b.firstblock = NULLFSBLOCK;
- cur->bc_private.b.dfops = NULL;
cur->bc_private.b.allocated = 0;
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
@@ -645,7 +639,7 @@ xfs_bmbt_change_owner(
cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 0a4fdf7f11a7..e3b3e9dce5da 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -7,7 +7,6 @@
#define __XFS_BTREE_H__
struct xfs_buf;
-struct xfs_defer_ops;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
@@ -209,14 +208,11 @@ typedef struct xfs_btree_cur
union {
struct { /* needed for BNO, CNT, INO */
struct xfs_buf *agbp; /* agf/agi buffer pointer */
- struct xfs_defer_ops *dfops; /* deferred updates */
xfs_agnumber_t agno; /* ag number */
union xfs_btree_cur_private priv;
} a;
struct { /* needed for BMAP */
struct xfs_inode *ip; /* pointer to our inode */
- struct xfs_defer_ops *dfops; /* deferred updates */
- xfs_fsblock_t firstblock; /* 1st blk allocated */
int allocated; /* count of alloced */
short forksize; /* fork's inode space */
char whichfork; /* data or attr fork */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 8a301402bbc4..376bee94b5dd 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -1481,6 +1481,7 @@ xfs_da3_node_lookup_int(
int error;
int retval;
unsigned int expected_level = 0;
+ uint16_t magic;
struct xfs_inode *dp = state->args->dp;
args = state->args;
@@ -1505,25 +1506,27 @@ xfs_da3_node_lookup_int(
return error;
}
curr = blk->bp->b_addr;
- blk->magic = be16_to_cpu(curr->magic);
+ magic = be16_to_cpu(curr->magic);
- if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
- blk->magic == XFS_ATTR3_LEAF_MAGIC) {
+ if (magic == XFS_ATTR_LEAF_MAGIC ||
+ magic == XFS_ATTR3_LEAF_MAGIC) {
blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
}
- if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
- blk->magic == XFS_DIR3_LEAFN_MAGIC) {
+ if (magic == XFS_DIR2_LEAFN_MAGIC ||
+ magic == XFS_DIR3_LEAFN_MAGIC) {
blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
blk->bp, NULL);
break;
}
- blk->magic = XFS_DA_NODE_MAGIC;
+ if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
+ return -EFSCORRUPTED;
+ blk->magic = XFS_DA_NODE_MAGIC;
/*
* Search an intermediate node for a match.
@@ -2059,11 +2062,9 @@ xfs_da_grow_inode_int(
* Try mapping it in one filesystem block.
*/
nmap = 1;
- ASSERT(args->firstblock != NULL);
error = xfs_bmapi_write(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
- args->firstblock, args->total, &map, &nmap,
- args->dfops);
+ args->total, &map, &nmap);
if (error)
return error;
@@ -2085,8 +2086,7 @@ xfs_da_grow_inode_int(
c = (int)(*bno + count - b);
error = xfs_bmapi_write(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
- args->firstblock, args->total,
- &mapp[mapi], &nmap, args->dfops);
+ args->total, &mapp[mapi], &nmap);
if (error)
goto out_free_map;
if (nmap < 1)
@@ -2375,13 +2375,13 @@ done:
*/
int
xfs_da_shrink_inode(
- xfs_da_args_t *args,
- xfs_dablk_t dead_blkno,
- struct xfs_buf *dead_buf)
+ struct xfs_da_args *args,
+ xfs_dablk_t dead_blkno,
+ struct xfs_buf *dead_buf)
{
- xfs_inode_t *dp;
- int done, error, w, count;
- xfs_trans_t *tp;
+ struct xfs_inode *dp;
+ int done, error, w, count;
+ struct xfs_trans *tp;
trace_xfs_da_shrink_inode(args);
@@ -2395,8 +2395,7 @@ xfs_da_shrink_inode(
* the last block to the place we want to kill.
*/
error = xfs_bunmapi(tp, dp, dead_blkno, count,
- xfs_bmapi_aflag(w), 0, args->firstblock,
- args->dfops, &done);
+ xfs_bmapi_aflag(w), 0, &done);
if (error == -ENOSPC) {
if (w != XFS_DATA_FORK)
break;
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 28260073ae71..84dd865b6c3d 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -7,7 +7,6 @@
#ifndef __XFS_DA_BTREE_H__
#define __XFS_DA_BTREE_H__
-struct xfs_defer_ops;
struct xfs_inode;
struct xfs_trans;
struct zone;
@@ -57,8 +56,6 @@ typedef struct xfs_da_args {
xfs_dahash_t hashval; /* hash value of name */
xfs_ino_t inumber; /* input/output inode number */
struct xfs_inode *dp; /* directory inode to manipulate */
- xfs_fsblock_t *firstblock; /* ptr to firstblock for bmap calls */
- struct xfs_defer_ops *dfops; /* ptr to freelist for bmap_finish */
struct xfs_trans *trans; /* current trans (changes over time) */
xfs_extlen_t total; /* total blocks needed, for 1st bmap */
int whichfork; /* data or attribute fork */
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index c3e5bffda4f5..e792b167150a 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -14,6 +14,9 @@
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_inode.h"
+#include "xfs_inode_item.h"
#include "xfs_trace.h"
/*
@@ -177,146 +180,157 @@ static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
* the pending list.
*/
STATIC void
-xfs_defer_intake_work(
- struct xfs_trans *tp,
- struct xfs_defer_ops *dop)
+xfs_defer_create_intents(
+ struct xfs_trans *tp)
{
struct list_head *li;
struct xfs_defer_pending *dfp;
- list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
+ list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
dfp->dfp_count);
- trace_xfs_defer_intake_work(tp->t_mountp, dfp);
+ trace_xfs_defer_create_intent(tp->t_mountp, dfp);
list_sort(tp->t_mountp, &dfp->dfp_work,
dfp->dfp_type->diff_items);
list_for_each(li, &dfp->dfp_work)
dfp->dfp_type->log_item(tp, dfp->dfp_intent, li);
}
-
- list_splice_tail_init(&dop->dop_intake, &dop->dop_pending);
}
/* Abort all the intents that were committed. */
STATIC void
xfs_defer_trans_abort(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
- int error)
+ struct list_head *dop_pending)
{
struct xfs_defer_pending *dfp;
- trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_);
+ trace_xfs_defer_trans_abort(tp, _RET_IP_);
/* Abort intent items that don't have a done item. */
- list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
+ list_for_each_entry(dfp, dop_pending, dfp_list) {
trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
if (dfp->dfp_intent && !dfp->dfp_done) {
dfp->dfp_type->abort_intent(dfp->dfp_intent);
dfp->dfp_intent = NULL;
}
}
-
- /* Shut down FS. */
- xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ?
- SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR);
}
/* Roll a transaction so we can do some deferred op processing. */
STATIC int
xfs_defer_trans_roll(
- struct xfs_trans **tp,
- struct xfs_defer_ops *dop)
+ struct xfs_trans **tpp)
{
+ struct xfs_trans *tp = *tpp;
+ struct xfs_buf_log_item *bli;
+ struct xfs_inode_log_item *ili;
+ struct xfs_log_item *lip;
+ struct xfs_buf *bplist[XFS_DEFER_OPS_NR_BUFS];
+ struct xfs_inode *iplist[XFS_DEFER_OPS_NR_INODES];
+ int bpcount = 0, ipcount = 0;
int i;
int error;
- /* Log all the joined inodes. */
- for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
- xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
-
- /* Hold the (previously bjoin'd) buffer locked across the roll. */
- for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
- xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
+ list_for_each_entry(lip, &tp->t_items, li_trans) {
+ switch (lip->li_type) {
+ case XFS_LI_BUF:
+ bli = container_of(lip, struct xfs_buf_log_item,
+ bli_item);
+ if (bli->bli_flags & XFS_BLI_HOLD) {
+ if (bpcount >= XFS_DEFER_OPS_NR_BUFS) {
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+ xfs_trans_dirty_buf(tp, bli->bli_buf);
+ bplist[bpcount++] = bli->bli_buf;
+ }
+ break;
+ case XFS_LI_INODE:
+ ili = container_of(lip, struct xfs_inode_log_item,
+ ili_item);
+ if (ili->ili_lock_flags == 0) {
+ if (ipcount >= XFS_DEFER_OPS_NR_INODES) {
+ ASSERT(0);
+ return -EFSCORRUPTED;
+ }
+ xfs_trans_log_inode(tp, ili->ili_inode,
+ XFS_ILOG_CORE);
+ iplist[ipcount++] = ili->ili_inode;
+ }
+ break;
+ default:
+ break;
+ }
+ }
- trace_xfs_defer_trans_roll((*tp)->t_mountp, dop, _RET_IP_);
+ trace_xfs_defer_trans_roll(tp, _RET_IP_);
/* Roll the transaction. */
- error = xfs_trans_roll(tp);
+ error = xfs_trans_roll(tpp);
+ tp = *tpp;
if (error) {
- trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error);
- xfs_defer_trans_abort(*tp, dop, error);
+ trace_xfs_defer_trans_roll_error(tp, error);
return error;
}
- dop->dop_committed = true;
/* Rejoin the joined inodes. */
- for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
- xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
+ for (i = 0; i < ipcount; i++)
+ xfs_trans_ijoin(tp, iplist[i], 0);
/* Rejoin the buffers and dirty them so the log moves forward. */
- for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
- xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
- xfs_trans_bhold(*tp, dop->dop_bufs[i]);
+ for (i = 0; i < bpcount; i++) {
+ xfs_trans_bjoin(tp, bplist[i]);
+ xfs_trans_bhold(tp, bplist[i]);
}
return error;
}
-/* Do we have any work items to finish? */
-bool
-xfs_defer_has_unfinished_work(
- struct xfs_defer_ops *dop)
-{
- return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake);
-}
-
/*
- * Add this inode to the deferred op. Each joined inode is relogged
- * each time we roll the transaction.
+ * Reset an already used dfops after finish.
*/
-int
-xfs_defer_ijoin(
- struct xfs_defer_ops *dop,
- struct xfs_inode *ip)
+static void
+xfs_defer_reset(
+ struct xfs_trans *tp)
{
- int i;
-
- for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) {
- if (dop->dop_inodes[i] == ip)
- return 0;
- else if (dop->dop_inodes[i] == NULL) {
- dop->dop_inodes[i] = ip;
- return 0;
- }
- }
+ ASSERT(list_empty(&tp->t_dfops));
- ASSERT(0);
- return -EFSCORRUPTED;
+ /*
+ * Low mode state transfers across transaction rolls to mirror dfops
+ * lifetime. Clear it now that dfops is reset.
+ */
+ tp->t_flags &= ~XFS_TRANS_LOWMODE;
}
/*
- * Add this buffer to the deferred op. Each joined buffer is relogged
- * each time we roll the transaction.
+ * Free up any items left in the list.
*/
-int
-xfs_defer_bjoin(
- struct xfs_defer_ops *dop,
- struct xfs_buf *bp)
+static void
+xfs_defer_cancel_list(
+ struct xfs_mount *mp,
+ struct list_head *dop_list)
{
- int i;
+ struct xfs_defer_pending *dfp;
+ struct xfs_defer_pending *pli;
+ struct list_head *pwi;
+ struct list_head *n;
- for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
- if (dop->dop_bufs[i] == bp)
- return 0;
- else if (dop->dop_bufs[i] == NULL) {
- dop->dop_bufs[i] = bp;
- return 0;
+ /*
+ * Free the pending items. Caller should already have arranged
+ * for the intent items to be released.
+ */
+ list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
+ trace_xfs_defer_cancel_list(mp, dfp);
+ list_del(&dfp->dfp_list);
+ list_for_each_safe(pwi, n, &dfp->dfp_work) {
+ list_del(pwi);
+ dfp->dfp_count--;
+ dfp->dfp_type->cancel_item(pwi);
}
+ ASSERT(dfp->dfp_count == 0);
+ kmem_free(dfp);
}
-
- ASSERT(0);
- return -EFSCORRUPTED;
}
/*
@@ -328,9 +342,8 @@ xfs_defer_bjoin(
* If an inode is provided, relog it to the new transaction.
*/
int
-xfs_defer_finish(
- struct xfs_trans **tp,
- struct xfs_defer_ops *dop)
+xfs_defer_finish_noroll(
+ struct xfs_trans **tp)
{
struct xfs_defer_pending *dfp;
struct list_head *li;
@@ -338,35 +351,28 @@ xfs_defer_finish(
void *state;
int error = 0;
void (*cleanup_fn)(struct xfs_trans *, void *, int);
- struct xfs_defer_ops *orig_dop;
+ LIST_HEAD(dop_pending);
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
- trace_xfs_defer_finish((*tp)->t_mountp, dop, _RET_IP_);
-
- /*
- * Attach dfops to the transaction during deferred ops processing. This
- * explicitly causes calls into the allocator to defer AGFL block frees.
- * Note that this code can go away once all dfops users attach to the
- * associated tp.
- */
- ASSERT(!(*tp)->t_agfl_dfops || ((*tp)->t_agfl_dfops == dop));
- orig_dop = (*tp)->t_agfl_dfops;
- (*tp)->t_agfl_dfops = dop;
+ trace_xfs_defer_finish(*tp, _RET_IP_);
/* Until we run out of pending work to finish... */
- while (xfs_defer_has_unfinished_work(dop)) {
- /* Log intents for work items sitting in the intake. */
- xfs_defer_intake_work(*tp, dop);
-
- /* Roll the transaction. */
- error = xfs_defer_trans_roll(tp, dop);
+ while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
+ /* log intents and pull in intake items */
+ xfs_defer_create_intents(*tp);
+ list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
+
+ /*
+ * Roll the transaction.
+ */
+ error = xfs_defer_trans_roll(tp);
if (error)
goto out;
/* Log an intent-done item for the first pending item. */
- dfp = list_first_entry(&dop->dop_pending,
- struct xfs_defer_pending, dfp_list);
+ dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
+ dfp_list);
trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
dfp->dfp_count);
@@ -377,7 +383,7 @@ xfs_defer_finish(
list_for_each_safe(li, n, &dfp->dfp_work) {
list_del(li);
dfp->dfp_count--;
- error = dfp->dfp_type->finish_item(*tp, dop, li,
+ error = dfp->dfp_type->finish_item(*tp, li,
dfp->dfp_done, &state);
if (error == -EAGAIN) {
/*
@@ -396,7 +402,6 @@ xfs_defer_finish(
*/
if (cleanup_fn)
cleanup_fn(*tp, state, error);
- xfs_defer_trans_abort(*tp, dop, error);
goto out;
}
}
@@ -425,72 +430,72 @@ xfs_defer_finish(
}
out:
- (*tp)->t_agfl_dfops = orig_dop;
- if (error)
- trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error);
- else
- trace_xfs_defer_finish_done((*tp)->t_mountp, dop, _RET_IP_);
- return error;
+ if (error) {
+ xfs_defer_trans_abort(*tp, &dop_pending);
+ xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
+ trace_xfs_defer_finish_error(*tp, error);
+ xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
+ xfs_defer_cancel(*tp);
+ return error;
+ }
+
+ trace_xfs_defer_finish_done(*tp, _RET_IP_);
+ return 0;
}
-/*
- * Free up any items left in the list.
- */
-void
-xfs_defer_cancel(
- struct xfs_defer_ops *dop)
+int
+xfs_defer_finish(
+ struct xfs_trans **tp)
{
- struct xfs_defer_pending *dfp;
- struct xfs_defer_pending *pli;
- struct list_head *pwi;
- struct list_head *n;
-
- trace_xfs_defer_cancel(NULL, dop, _RET_IP_);
+ int error;
/*
- * Free the pending items. Caller should already have arranged
- * for the intent items to be released.
+ * Finish and roll the transaction once more to avoid returning to the
+ * caller with a dirty transaction.
*/
- list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) {
- trace_xfs_defer_intake_cancel(NULL, dfp);
- list_del(&dfp->dfp_list);
- list_for_each_safe(pwi, n, &dfp->dfp_work) {
- list_del(pwi);
- dfp->dfp_count--;
- dfp->dfp_type->cancel_item(pwi);
- }
- ASSERT(dfp->dfp_count == 0);
- kmem_free(dfp);
- }
- list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) {
- trace_xfs_defer_pending_cancel(NULL, dfp);
- list_del(&dfp->dfp_list);
- list_for_each_safe(pwi, n, &dfp->dfp_work) {
- list_del(pwi);
- dfp->dfp_count--;
- dfp->dfp_type->cancel_item(pwi);
+ error = xfs_defer_finish_noroll(tp);
+ if (error)
+ return error;
+ if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
+ error = xfs_defer_trans_roll(tp);
+ if (error) {
+ xfs_force_shutdown((*tp)->t_mountp,
+ SHUTDOWN_CORRUPT_INCORE);
+ return error;
}
- ASSERT(dfp->dfp_count == 0);
- kmem_free(dfp);
}
+ xfs_defer_reset(*tp);
+ return 0;
+}
+
+void
+xfs_defer_cancel(
+ struct xfs_trans *tp)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+
+ trace_xfs_defer_cancel(tp, _RET_IP_);
+ xfs_defer_cancel_list(mp, &tp->t_dfops);
}
/* Add an item for later deferred processing. */
void
xfs_defer_add(
- struct xfs_defer_ops *dop,
+ struct xfs_trans *tp,
enum xfs_defer_ops_type type,
struct list_head *li)
{
struct xfs_defer_pending *dfp = NULL;
+ ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+
/*
* Add the item to a pending item at the end of the intake list.
* If the last pending item has the same type, reuse it. Else,
* create a new pending item at the end of the intake list.
*/
- if (!list_empty(&dop->dop_intake)) {
- dfp = list_last_entry(&dop->dop_intake,
+ if (!list_empty(&tp->t_dfops)) {
+ dfp = list_last_entry(&tp->t_dfops,
struct xfs_defer_pending, dfp_list);
if (dfp->dfp_type->type != type ||
(dfp->dfp_type->max_items &&
@@ -505,7 +510,7 @@ xfs_defer_add(
dfp->dfp_done = NULL;
dfp->dfp_count = 0;
INIT_LIST_HEAD(&dfp->dfp_work);
- list_add_tail(&dfp->dfp_list, &dop->dop_intake);
+ list_add_tail(&dfp->dfp_list, &tp->t_dfops);
}
list_add_tail(li, &dfp->dfp_work);
@@ -520,15 +525,25 @@ xfs_defer_init_op_type(
defer_op_types[type->type] = type;
}
-/* Initialize a deferred operation. */
+/*
+ * Move deferred ops from one transaction to another and reset the source to
+ * initial state. This is primarily used to carry state forward across
+ * transaction rolls with pending dfops.
+ */
void
-xfs_defer_init(
- struct xfs_defer_ops *dop,
- xfs_fsblock_t *fbp)
+xfs_defer_move(
+ struct xfs_trans *dtp,
+ struct xfs_trans *stp)
{
- memset(dop, 0, sizeof(struct xfs_defer_ops));
- *fbp = NULLFSBLOCK;
- INIT_LIST_HEAD(&dop->dop_intake);
- INIT_LIST_HEAD(&dop->dop_pending);
- trace_xfs_defer_init(NULL, dop, _RET_IP_);
+ list_splice_init(&stp->t_dfops, &dtp->t_dfops);
+
+ /*
+ * Low free space mode was historically controlled by a dfops field.
+ * This meant that low mode state potentially carried across multiple
+ * transaction rolls. Transfer low mode on a dfops move to preserve
+ * that behavior.
+ */
+ dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
+
+ xfs_defer_reset(stp);
}
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index a02b2b748b6d..2584a5b95b0d 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -24,17 +24,6 @@ struct xfs_defer_pending {
/*
* Header for deferred operation list.
- *
- * dop_low is used by the allocator to activate the lowspace algorithm -
- * when free space is running low the extent allocator may choose to
- * allocate an extent from an AG without leaving sufficient space for
- * a btree split when inserting the new extent. In this case the allocator
- * will enable the lowspace algorithm which is supposed to allow further
- * allocations (such as btree splits and newroots) to allocate from
- * sequential AGs. In order to avoid locking AGs out of order the lowspace
- * algorithm will start searching for free space from AG 0. If the correct
- * transaction reservations have been made then this algorithm will eventually
- * find all the space it needs.
*/
enum xfs_defer_ops_type {
XFS_DEFER_OPS_TYPE_BMAP,
@@ -45,28 +34,12 @@ enum xfs_defer_ops_type {
XFS_DEFER_OPS_TYPE_MAX,
};
-#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
-#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
-
-struct xfs_defer_ops {
- bool dop_committed; /* did any trans commit? */
- bool dop_low; /* alloc in low mode */
- struct list_head dop_intake; /* unlogged pending work */
- struct list_head dop_pending; /* logged pending work */
-
- /* relog these with each roll */
- struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
- struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
-};
-
-void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
+void xfs_defer_add(struct xfs_trans *tp, enum xfs_defer_ops_type type,
struct list_head *h);
-int xfs_defer_finish(struct xfs_trans **tp, struct xfs_defer_ops *dop);
-void xfs_defer_cancel(struct xfs_defer_ops *dop);
-void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
-bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
-int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
-int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
+int xfs_defer_finish_noroll(struct xfs_trans **tp);
+int xfs_defer_finish(struct xfs_trans **tp);
+void xfs_defer_cancel(struct xfs_trans *);
+void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
/* Description of a deferred type. */
struct xfs_defer_op_type {
@@ -74,8 +47,8 @@ struct xfs_defer_op_type {
unsigned int max_items;
void (*abort_intent)(void *);
void *(*create_done)(struct xfs_trans *, void *, unsigned int);
- int (*finish_item)(struct xfs_trans *, struct xfs_defer_ops *,
- struct list_head *, void *, void **);
+ int (*finish_item)(struct xfs_trans *, struct list_head *, void *,
+ void **);
void (*finish_cleanup)(struct xfs_trans *, void *, int);
void (*cancel_item)(struct list_head *);
int (*diff_items)(void *, struct list_head *, struct list_head *);
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 59169aff30fe..229152cd1a24 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -239,12 +239,10 @@ xfs_dir_init(
*/
int
xfs_dir_createname(
- xfs_trans_t *tp,
- xfs_inode_t *dp,
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
struct xfs_name *name,
xfs_ino_t inum, /* new entry inode number */
- xfs_fsblock_t *first, /* bmap's firstblock */
- struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
@@ -252,6 +250,7 @@ xfs_dir_createname(
int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
+
if (inum) {
rval = xfs_dir_ino_validate(tp->t_mountp, inum);
if (rval)
@@ -270,8 +269,6 @@ xfs_dir_createname(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = inum;
args->dp = dp;
- args->firstblock = first;
- args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -416,17 +413,15 @@ out_free:
*/
int
xfs_dir_removename(
- xfs_trans_t *tp,
- xfs_inode_t *dp,
- struct xfs_name *name,
- xfs_ino_t ino,
- xfs_fsblock_t *first, /* bmap's firstblock */
- struct xfs_defer_ops *dfops, /* bmap's freeblock list */
- xfs_extlen_t total) /* bmap's total block count */
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
+ struct xfs_name *name,
+ xfs_ino_t ino,
+ xfs_extlen_t total) /* bmap's total block count */
{
- struct xfs_da_args *args;
- int rval;
- int v; /* type-checking value */
+ struct xfs_da_args *args;
+ int rval;
+ int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
XFS_STATS_INC(dp->i_mount, xs_dir_remove);
@@ -442,8 +437,6 @@ xfs_dir_removename(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = ino;
args->dp = dp;
- args->firstblock = first;
- args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -478,17 +471,15 @@ out_free:
*/
int
xfs_dir_replace(
- xfs_trans_t *tp,
- xfs_inode_t *dp,
- struct xfs_name *name, /* name of entry to replace */
- xfs_ino_t inum, /* new inode number */
- xfs_fsblock_t *first, /* bmap's firstblock */
- struct xfs_defer_ops *dfops, /* bmap's freeblock list */
- xfs_extlen_t total) /* bmap's total block count */
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
+ struct xfs_name *name, /* name of entry to replace */
+ xfs_ino_t inum, /* new inode number */
+ xfs_extlen_t total) /* bmap's total block count */
{
- struct xfs_da_args *args;
- int rval;
- int v; /* type-checking value */
+ struct xfs_da_args *args;
+ int rval;
+ int v; /* type-checking value */
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
@@ -507,8 +498,6 @@ xfs_dir_replace(
args->hashval = dp->i_mount->m_dirnameops->hashname(name);
args->inumber = inum;
args->dp = dp;
- args->firstblock = first;
- args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -547,7 +536,7 @@ xfs_dir_canenter(
xfs_inode_t *dp,
struct xfs_name *name) /* name of entry to add */
{
- return xfs_dir_createname(tp, dp, name, 0, NULL, NULL, 0);
+ return xfs_dir_createname(tp, dp, name, 0, 0);
}
/*
@@ -645,17 +634,17 @@ xfs_dir2_isleaf(
*/
int
xfs_dir2_shrink_inode(
- xfs_da_args_t *args,
- xfs_dir2_db_t db,
- struct xfs_buf *bp)
+ struct xfs_da_args *args,
+ xfs_dir2_db_t db,
+ struct xfs_buf *bp)
{
- xfs_fileoff_t bno; /* directory file offset */
- xfs_dablk_t da; /* directory file offset */
- int done; /* bunmap is finished */
- xfs_inode_t *dp;
- int error;
- xfs_mount_t *mp;
- xfs_trans_t *tp;
+ xfs_fileoff_t bno; /* directory file offset */
+ xfs_dablk_t da; /* directory file offset */
+ int done; /* bunmap is finished */
+ struct xfs_inode *dp;
+ int error;
+ struct xfs_mount *mp;
+ struct xfs_trans *tp;
trace_xfs_dir2_shrink_inode(args, db);
@@ -665,8 +654,7 @@ xfs_dir2_shrink_inode(
da = xfs_dir2_db_to_da(args->geo, db);
/* Unmap the fsblock(s). */
- error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0,
- args->firstblock, args->dfops, &done);
+ error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0, &done);
if (error) {
/*
* ENOSPC actually can happen if we're in a removename with no
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index ed385316c7dc..c3e3f6b813d8 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -9,7 +9,6 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-struct xfs_defer_ops;
struct xfs_da_args;
struct xfs_inode;
struct xfs_mount;
@@ -118,19 +117,16 @@ extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_inode *pdp);
extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
- xfs_fsblock_t *first,
- struct xfs_defer_ops *dfops, xfs_extlen_t tot);
+ xfs_extlen_t tot);
extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t *inum,
struct xfs_name *ci_name);
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
- xfs_fsblock_t *first,
- struct xfs_defer_ops *dfops, xfs_extlen_t tot);
+ xfs_extlen_t tot);
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
- xfs_fsblock_t *first,
- struct xfs_defer_ops *dfops, xfs_extlen_t tot);
+ xfs_extlen_t tot);
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name);
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 2daf874969ab..f1bb3434f51c 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -1012,7 +1012,7 @@ xfs_dir2_leafn_rebalance(
int oldstale; /* old count of stale leaves */
#endif
int oldsum; /* old total leaf count */
- int swap; /* swapped leaf blocks */
+ int swap_blocks; /* swapped leaf blocks */
struct xfs_dir2_leaf_entry *ents1;
struct xfs_dir2_leaf_entry *ents2;
struct xfs_dir3_icleaf_hdr hdr1;
@@ -1023,13 +1023,10 @@ xfs_dir2_leafn_rebalance(
/*
* If the block order is wrong, swap the arguments.
*/
- if ((swap = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp))) {
- xfs_da_state_blk_t *tmp; /* temp for block swap */
+ swap_blocks = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp);
+ if (swap_blocks)
+ swap(blk1, blk2);
- tmp = blk1;
- blk1 = blk2;
- blk2 = tmp;
- }
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
@@ -1093,11 +1090,11 @@ xfs_dir2_leafn_rebalance(
* Mark whether we're inserting into the old or new leaf.
*/
if (hdr1.count < hdr2.count)
- state->inleaf = swap;
+ state->inleaf = swap_blocks;
else if (hdr1.count > hdr2.count)
- state->inleaf = !swap;
+ state->inleaf = !swap_blocks;
else
- state->inleaf = swap ^ (blk1->index <= hdr1.count);
+ state->inleaf = swap_blocks ^ (blk1->index <= hdr1.count);
/*
* Adjust the expected index for insertion.
*/
diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h
index b9974e7a8e6e..66077a105cbb 100644
--- a/fs/xfs/libxfs/xfs_errortag.h
+++ b/fs/xfs/libxfs/xfs_errortag.h
@@ -53,7 +53,8 @@
#define XFS_ERRTAG_LOG_ITEM_PIN 30
#define XFS_ERRTAG_BUF_LRU_REF 31
#define XFS_ERRTAG_FORCE_SCRUB_REPAIR 32
-#define XFS_ERRTAG_MAX 33
+#define XFS_ERRTAG_FORCE_SUMMARY_RECALC 33
+#define XFS_ERRTAG_MAX 34
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -91,5 +92,6 @@
#define XFS_RANDOM_LOG_ITEM_PIN 1
#define XFS_RANDOM_BUF_LRU_REF 2
#define XFS_RANDOM_FORCE_SCRUB_REPAIR 1
+#define XFS_RANDOM_FORCE_SUMMARY_RECALC 1
#endif /* __XFS_ERRORTAG_H_ */
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 0d968e8143aa..a8f6db735d5d 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1838,23 +1838,24 @@ out_error:
*/
STATIC void
xfs_difree_inode_chunk(
- struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
- struct xfs_inobt_rec_incore *rec,
- struct xfs_defer_ops *dfops)
+ struct xfs_inobt_rec_incore *rec)
{
- xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, rec->ir_startino);
- int startidx, endidx;
- int nextbit;
- xfs_agblock_t agbno;
- int contigblk;
- struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
+ rec->ir_startino);
+ int startidx, endidx;
+ int nextbit;
+ xfs_agblock_t agbno;
+ int contigblk;
+ struct xfs_owner_info oinfo;
DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
- xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, sagbno),
+ xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
mp->m_ialloc_blks, &oinfo);
return;
}
@@ -1898,7 +1899,7 @@ xfs_difree_inode_chunk(
ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
- xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, agbno),
+ xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
contigblk, &oinfo);
/* reset range to current bit and carry on... */
@@ -1915,7 +1916,6 @@ xfs_difree_inobt(
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_agino_t agino,
- struct xfs_defer_ops *dfops,
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
@@ -2003,7 +2003,7 @@ xfs_difree_inobt(
goto error0;
}
- xfs_difree_inode_chunk(mp, agno, &rec, dfops);
+ xfs_difree_inode_chunk(tp, agno, &rec);
} else {
xic->deleted = false;
@@ -2148,7 +2148,6 @@ int
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
- struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *xic) /* cluster info if deleted */
{
/* REFERENCED */
@@ -2200,7 +2199,7 @@ xfs_difree(
/*
* Fix up the inode allocation btree.
*/
- error = xfs_difree_inobt(mp, tp, agbp, agino, dfops, xic, &rec);
+ error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
if (error)
goto error0;
@@ -2260,7 +2259,7 @@ xfs_imap_lookup(
}
xfs_trans_brelse(tp, agbp);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
if (error)
return error;
@@ -2539,7 +2538,7 @@ xfs_agi_verify(
return __this_address;
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
- if (agi->agi_unlinked[i] == NULLAGINO)
+ if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
continue;
if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
return __this_address;
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 90b09c5f163b..e936b7cc9389 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -82,7 +82,6 @@ int /* error */
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
- struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *ifree); /* cluster info if deleted */
/*
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index a5237afec5ab..86c50208a143 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -552,6 +552,7 @@ xfs_inobt_max_size(
static int
xfs_inobt_count_blocks(
struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_btnum_t btnum,
xfs_extlen_t *tree_blocks)
@@ -560,14 +561,14 @@ xfs_inobt_count_blocks(
struct xfs_btree_cur *cur;
int error;
- error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
if (error)
return error;
- cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
error = xfs_btree_count_blocks(cur, tree_blocks);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
- xfs_buf_relse(agbp);
+ xfs_btree_del_cursor(cur, error);
+ xfs_trans_brelse(tp, agbp);
return error;
}
@@ -578,6 +579,7 @@ xfs_inobt_count_blocks(
int
xfs_finobt_calc_reserves(
struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
@@ -588,7 +590,7 @@ xfs_finobt_calc_reserves(
if (!xfs_sb_version_hasfinobt(&mp->m_sb))
return 0;
- error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+ error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO, &tree_len);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bf8f0c405e7d..ebdd0c6b8766 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -60,8 +60,8 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
#define xfs_inobt_rec_check_count(mp, rec) 0
#endif /* DEBUG */
-int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
- xfs_extlen_t *ask, xfs_extlen_t *used);
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
extern xfs_extlen_t xfs_iallocbt_calc_size(struct xfs_mount *mp,
unsigned long long len);
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
index b80c63faace2..771dd072015d 100644
--- a/fs/xfs/libxfs/xfs_iext_tree.c
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -14,6 +14,7 @@
#include "xfs_inode_fork.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_bmap.h"
#include "xfs_trace.h"
/*
@@ -612,6 +613,19 @@ xfs_iext_realloc_root(
cur->leaf = new;
}
+/*
+ * Increment the sequence counter if we are on a COW fork. This allows
+ * the writeback code to skip looking for a COW extent if the COW fork
+ * hasn't changed. We use WRITE_ONCE here to ensure the update to the
+ * sequence counter is seen before the modifications to the extent
+ * tree itself take effect.
+ */
+static inline void xfs_iext_inc_seq(struct xfs_ifork *ifp, int state)
+{
+ if (state & BMAP_COWFORK)
+ WRITE_ONCE(ifp->if_seq, READ_ONCE(ifp->if_seq) + 1);
+}
+
void
xfs_iext_insert(
struct xfs_inode *ip,
@@ -624,6 +638,8 @@ xfs_iext_insert(
struct xfs_iext_leaf *new = NULL;
int nr_entries, i;
+ xfs_iext_inc_seq(ifp, state);
+
if (ifp->if_height == 0)
xfs_iext_alloc_root(ifp, cur);
else if (ifp->if_height == 1)
@@ -864,6 +880,8 @@ xfs_iext_remove(
ASSERT(ifp->if_u1.if_root != NULL);
ASSERT(xfs_iext_valid(ifp, cur));
+ xfs_iext_inc_seq(ifp, state);
+
nr_entries = xfs_iext_leaf_nr_entries(ifp, leaf, cur->pos) - 1;
for (i = cur->pos; i < nr_entries; i++)
leaf->recs[i] = leaf->recs[i + 1];
@@ -970,6 +988,8 @@ xfs_iext_update_extent(
{
struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
+ xfs_iext_inc_seq(ifp, state);
+
if (cur->pos == 0) {
struct xfs_bmbt_irec old;
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 183ec0cb8921..f9acf1d436f6 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -158,7 +158,6 @@ xfs_init_local_fork(
}
ifp->if_bytes = size;
- ifp->if_real_bytes = real_size;
ifp->if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
ifp->if_flags |= XFS_IFINLINE;
}
@@ -226,7 +225,6 @@ xfs_iformat_extents(
return -EFSCORRUPTED;
}
- ifp->if_real_bytes = 0;
ifp->if_bytes = 0;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
@@ -271,7 +269,7 @@ xfs_iformat_btree(
{
struct xfs_mount *mp = ip->i_mount;
xfs_bmdr_block_t *dfp;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
/* REFERENCED */
int nrecs;
int size;
@@ -317,7 +315,6 @@ xfs_iformat_btree(
ifp->if_flags &= ~XFS_IFEXTENTS;
ifp->if_flags |= XFS_IFBROOT;
- ifp->if_real_bytes = 0;
ifp->if_bytes = 0;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
@@ -350,7 +347,7 @@ xfs_iroot_realloc(
{
struct xfs_mount *mp = ip->i_mount;
int cur_max;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
struct xfs_btree_block *new_broot;
int new_max;
size_t new_size;
@@ -471,55 +468,34 @@ xfs_iroot_realloc(
*/
void
xfs_idata_realloc(
- xfs_inode_t *ip,
- int byte_diff,
- int whichfork)
+ struct xfs_inode *ip,
+ int byte_diff,
+ int whichfork)
{
- xfs_ifork_t *ifp;
- int new_size;
- int real_size;
-
- if (byte_diff == 0) {
- return;
- }
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ int new_size = (int)ifp->if_bytes + byte_diff;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- new_size = (int)ifp->if_bytes + byte_diff;
ASSERT(new_size >= 0);
+ ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork));
+
+ if (byte_diff == 0)
+ return;
if (new_size == 0) {
kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
- real_size = 0;
- } else {
- /*
- * Stuck with malloc/realloc.
- * For inline data, the underlying buffer must be
- * a multiple of 4 bytes in size so that it can be
- * logged and stay on word boundaries. We enforce
- * that here.
- */
- real_size = roundup(new_size, 4);
- if (ifp->if_u1.if_data == NULL) {
- ASSERT(ifp->if_real_bytes == 0);
- ifp->if_u1.if_data = kmem_alloc(real_size,
- KM_SLEEP | KM_NOFS);
- } else {
- /*
- * Only do the realloc if the underlying size
- * is really changing.
- */
- if (ifp->if_real_bytes != real_size) {
- ifp->if_u1.if_data =
- kmem_realloc(ifp->if_u1.if_data,
- real_size,
- KM_SLEEP | KM_NOFS);
- }
- }
+ ifp->if_bytes = 0;
+ return;
}
- ifp->if_real_bytes = real_size;
+
+ /*
+ * For inline data, the underlying buffer must be a multiple of 4 bytes
+ * in size so that it can be logged and stay on word boundaries.
+ * We enforce that here.
+ */
+ ifp->if_u1.if_data = kmem_realloc(ifp->if_u1.if_data,
+ roundup(new_size, 4), KM_SLEEP | KM_NOFS);
ifp->if_bytes = new_size;
- ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
}
void
@@ -527,7 +503,7 @@ xfs_idestroy_fork(
xfs_inode_t *ip,
int whichfork)
{
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
ifp = XFS_IFORK_PTR(ip, whichfork);
if (ifp->if_broot != NULL) {
@@ -543,17 +519,13 @@ xfs_idestroy_fork(
*/
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
if (ifp->if_u1.if_data != NULL) {
- ASSERT(ifp->if_real_bytes != 0);
kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
- ifp->if_real_bytes = 0;
}
} else if ((ifp->if_flags & XFS_IFEXTENTS) && ifp->if_height) {
xfs_iext_destroy(ifp);
}
- ASSERT(ifp->if_real_bytes == 0);
-
if (whichfork == XFS_ATTR_FORK) {
kmem_zone_free(xfs_ifork_zone, ip->i_afp);
ip->i_afp = NULL;
@@ -620,7 +592,7 @@ xfs_iflush_fork(
int whichfork)
{
char *cp;
- xfs_ifork_t *ifp;
+ struct xfs_ifork *ifp;
xfs_mount_t *mp;
static const short brootflag[2] =
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 781b1603df5e..60361d2d74a1 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -12,9 +12,9 @@ struct xfs_dinode;
/*
* File incore extent information, present for each of data & attr forks.
*/
-typedef struct xfs_ifork {
+struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
- int if_real_bytes; /* bytes allocated in if_u1 */
+ unsigned int if_seq; /* cow fork mod counter */
struct xfs_btree_block *if_broot; /* file's incore btree root */
short if_broot_bytes; /* bytes allocated for root */
unsigned char if_flags; /* per-fork flags */
@@ -23,7 +23,7 @@ typedef struct xfs_ifork {
void *if_root; /* extent tree root */
char *if_data; /* inline file data */
} if_u1;
-} xfs_ifork_t;
+};
/*
* Per-fork incore inode flags.
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 79bb79853c9f..e5f97c69b320 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -77,6 +77,19 @@ static inline uint xlog_get_cycle(char *ptr)
#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */
+/*
+ * Log item for unmount records.
+ *
+ * The unmount record used to have a string "Unmount filesystem--" in the
+ * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
+ * We just write the magic number now; see xfs_log_unmount_write.
+ */
+struct xfs_unmount_log_format {
+ uint16_t magic; /* XLOG_UNMOUNT_TYPE */
+ uint16_t pad1;
+ uint32_t pad2; /* may as well make it 64 bits */
+};
+
/* Region types for iovec's i_type */
#define XLOG_REG_TYPE_BFORMAT 1
#define XLOG_REG_TYPE_BCHUNK 2
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 9dda6fd0bb13..542aa1475b5f 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -34,11 +34,9 @@ enum xfs_refc_adjust_op {
};
STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
- xfs_agblock_t agbno, xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops);
+ xfs_agblock_t agbno, xfs_extlen_t aglen);
STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
- xfs_agblock_t agbno, xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops);
+ xfs_agblock_t agbno, xfs_extlen_t aglen);
/*
* Look up the first record less than or equal to [bno, len] in the btree
@@ -870,7 +868,6 @@ xfs_refcount_adjust_extents(
xfs_agblock_t *agbno,
xfs_extlen_t *aglen,
enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
struct xfs_owner_info *oinfo)
{
struct xfs_refcount_irec ext, tmp;
@@ -925,8 +922,8 @@ xfs_refcount_adjust_extents(
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
cur->bc_private.a.agno,
tmp.rc_startblock);
- xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
- tmp.rc_blockcount, oinfo);
+ xfs_bmap_add_free(cur->bc_tp, fsbno,
+ tmp.rc_blockcount, oinfo);
}
(*agbno) += tmp.rc_blockcount;
@@ -968,8 +965,8 @@ xfs_refcount_adjust_extents(
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
cur->bc_private.a.agno,
ext.rc_startblock);
- xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
- ext.rc_blockcount, oinfo);
+ xfs_bmap_add_free(cur->bc_tp, fsbno, ext.rc_blockcount,
+ oinfo);
}
skip:
@@ -998,7 +995,6 @@ xfs_refcount_adjust(
xfs_agblock_t *new_agbno,
xfs_extlen_t *new_aglen,
enum xfs_refc_adjust_op adj,
- struct xfs_defer_ops *dfops,
struct xfs_owner_info *oinfo)
{
bool shape_changed;
@@ -1043,7 +1039,7 @@ xfs_refcount_adjust(
/* Now that we've taken care of the ends, adjust the middle extents */
error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
- adj, dfops, oinfo);
+ adj, oinfo);
if (error)
goto out_error;
@@ -1067,7 +1063,7 @@ xfs_refcount_finish_one_cleanup(
if (rcur == NULL)
return;
agbp = rcur->bc_private.a.agbp;
- xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(rcur, error);
if (error)
xfs_trans_brelse(tp, agbp);
}
@@ -1082,7 +1078,6 @@ xfs_refcount_finish_one_cleanup(
int
xfs_refcount_finish_one(
struct xfs_trans *tp,
- struct xfs_defer_ops *dfops,
enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount,
@@ -1132,7 +1127,7 @@ xfs_refcount_finish_one(
if (!agbp)
return -EFSCORRUPTED;
- rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, dfops);
+ rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
if (!rcur) {
error = -ENOMEM;
goto out_cur;
@@ -1145,23 +1140,23 @@ xfs_refcount_finish_one(
switch (type) {
case XFS_REFCOUNT_INCREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
- new_len, XFS_REFCOUNT_ADJUST_INCREASE, dfops, NULL);
+ new_len, XFS_REFCOUNT_ADJUST_INCREASE, NULL);
*new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
break;
case XFS_REFCOUNT_DECREASE:
error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
- new_len, XFS_REFCOUNT_ADJUST_DECREASE, dfops, NULL);
+ new_len, XFS_REFCOUNT_ADJUST_DECREASE, NULL);
*new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
break;
case XFS_REFCOUNT_ALLOC_COW:
*new_fsb = startblock + blockcount;
*new_len = 0;
- error = __xfs_refcount_cow_alloc(rcur, bno, blockcount, dfops);
+ error = __xfs_refcount_cow_alloc(rcur, bno, blockcount);
break;
case XFS_REFCOUNT_FREE_COW:
*new_fsb = startblock + blockcount;
*new_len = 0;
- error = __xfs_refcount_cow_free(rcur, bno, blockcount, dfops);
+ error = __xfs_refcount_cow_free(rcur, bno, blockcount);
break;
default:
ASSERT(0);
@@ -1183,16 +1178,16 @@ out_cur:
*/
static int
__xfs_refcount_add(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount)
{
struct xfs_refcount_intent *ri;
- trace_xfs_refcount_defer(mp, XFS_FSB_TO_AGNO(mp, startblock),
- type, XFS_FSB_TO_AGBNO(mp, startblock),
+ trace_xfs_refcount_defer(tp->t_mountp,
+ XFS_FSB_TO_AGNO(tp->t_mountp, startblock),
+ type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
blockcount);
ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
@@ -1202,7 +1197,7 @@ __xfs_refcount_add(
ri->ri_startblock = startblock;
ri->ri_blockcount = blockcount;
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
return 0;
}
@@ -1211,14 +1206,13 @@ __xfs_refcount_add(
*/
int
xfs_refcount_increase_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb))
return 0;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_INCREASE,
+ return __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE,
PREV->br_startblock, PREV->br_blockcount);
}
@@ -1227,14 +1221,13 @@ xfs_refcount_increase_extent(
*/
int
xfs_refcount_decrease_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ if (!xfs_sb_version_hasreflink(&tp->t_mountp->m_sb))
return 0;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_DECREASE,
+ return __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE,
PREV->br_startblock, PREV->br_blockcount);
}
@@ -1522,8 +1515,7 @@ STATIC int
__xfs_refcount_cow_alloc(
struct xfs_btree_cur *rcur,
xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops)
+ xfs_extlen_t aglen)
{
trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
agbno, aglen);
@@ -1540,8 +1532,7 @@ STATIC int
__xfs_refcount_cow_free(
struct xfs_btree_cur *rcur,
xfs_agblock_t agbno,
- xfs_extlen_t aglen,
- struct xfs_defer_ops *dfops)
+ xfs_extlen_t aglen)
{
trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
agbno, aglen);
@@ -1554,47 +1545,45 @@ __xfs_refcount_cow_free(
/* Record a CoW staging extent in the refcount btree. */
int
xfs_refcount_alloc_cow_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
+ struct xfs_mount *mp = tp->t_mountp;
int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
- error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
- fsb, len);
+ error = __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len);
if (error)
return error;
/* Add rmap entry */
- return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+ return xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
}
/* Forget a CoW staging event in the refcount btree. */
int
xfs_refcount_free_cow_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_fsblock_t fsb,
xfs_extlen_t len)
{
+ struct xfs_mount *mp = tp->t_mountp;
int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return 0;
/* Remove rmap entry */
- error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+ error = xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
if (error)
return error;
- return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
- fsb, len);
+ return __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len);
}
struct xfs_refcount_recovery {
@@ -1635,7 +1624,6 @@ xfs_refcount_recover_cow_leftovers(
struct list_head debris;
union xfs_btree_irec low;
union xfs_btree_irec high;
- struct xfs_defer_ops dfops;
xfs_fsblock_t fsb;
xfs_agblock_t agbno;
int error;
@@ -1666,7 +1654,7 @@ xfs_refcount_recover_cow_leftovers(
error = -ENOMEM;
goto out_trans;
}
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
/* Find all the leftover CoW staging extents. */
memset(&low, 0, sizeof(low));
@@ -1675,11 +1663,11 @@ xfs_refcount_recover_cow_leftovers(
high.rc.rc_startblock = -1U;
error = xfs_btree_query_range(cur, &low, &high,
xfs_refcount_recover_extent, &debris);
- if (error)
- goto out_cursor;
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
xfs_trans_brelse(tp, agbp);
xfs_trans_cancel(tp);
+ if (error)
+ goto out_free;
/* Now iterate the list to free the leftovers */
list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1691,21 +1679,15 @@ xfs_refcount_recover_cow_leftovers(
trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
/* Free the orphan record */
- xfs_defer_init(&dfops, &fsb);
agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
- error = xfs_refcount_free_cow_extent(mp, &dfops, fsb,
+ error = xfs_refcount_free_cow_extent(tp, fsb,
rr->rr_rrec.rc_blockcount);
if (error)
- goto out_defer;
+ goto out_trans;
/* Free the block. */
- xfs_bmap_add_free(mp, &dfops, fsb,
- rr->rr_rrec.rc_blockcount, NULL);
-
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_defer;
+ xfs_bmap_add_free(tp, fsb, rr->rr_rrec.rc_blockcount, NULL);
error = xfs_trans_commit(tp);
if (error)
@@ -1716,8 +1698,6 @@ xfs_refcount_recover_cow_leftovers(
}
return error;
-out_defer:
- xfs_defer_cancel(&dfops);
out_trans:
xfs_trans_cancel(tp);
out_free:
@@ -1727,11 +1707,6 @@ out_free:
kmem_free(rr);
}
return error;
-
-out_cursor:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- xfs_trans_brelse(tp, agbp);
- goto out_trans;
}
/* Is there a record covering a given extent? */
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 5fef74412727..1d9c518575e7 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -29,29 +29,26 @@ struct xfs_refcount_intent {
xfs_extlen_t ri_blockcount;
};
-extern int xfs_refcount_increase_extent(struct xfs_mount *mp,
- struct xfs_defer_ops *dfops, struct xfs_bmbt_irec *irec);
-extern int xfs_refcount_decrease_extent(struct xfs_mount *mp,
- struct xfs_defer_ops *dfops, struct xfs_bmbt_irec *irec);
+extern int xfs_refcount_increase_extent(struct xfs_trans *tp,
+ struct xfs_bmbt_irec *irec);
+extern int xfs_refcount_decrease_extent(struct xfs_trans *tp,
+ struct xfs_bmbt_irec *irec);
extern void xfs_refcount_finish_one_cleanup(struct xfs_trans *tp,
struct xfs_btree_cur *rcur, int error);
extern int xfs_refcount_finish_one(struct xfs_trans *tp,
- struct xfs_defer_ops *dfops, enum xfs_refcount_intent_type type,
- xfs_fsblock_t startblock, xfs_extlen_t blockcount,
- xfs_fsblock_t *new_fsb, xfs_extlen_t *new_len,
- struct xfs_btree_cur **pcur);
+ enum xfs_refcount_intent_type type, xfs_fsblock_t startblock,
+ xfs_extlen_t blockcount, xfs_fsblock_t *new_fsb,
+ xfs_extlen_t *new_len, struct xfs_btree_cur **pcur);
extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur,
xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno,
xfs_extlen_t *flen, bool find_end_of_shared);
-extern int xfs_refcount_alloc_cow_extent(struct xfs_mount *mp,
- struct xfs_defer_ops *dfops, xfs_fsblock_t fsb,
- xfs_extlen_t len);
-extern int xfs_refcount_free_cow_extent(struct xfs_mount *mp,
- struct xfs_defer_ops *dfops, xfs_fsblock_t fsb,
- xfs_extlen_t len);
+extern int xfs_refcount_alloc_cow_extent(struct xfs_trans *tp,
+ xfs_fsblock_t fsb, xfs_extlen_t len);
+extern int xfs_refcount_free_cow_extent(struct xfs_trans *tp,
+ xfs_fsblock_t fsb, xfs_extlen_t len);
extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
xfs_agnumber_t agno);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index b71937982c5b..1aaa01c97517 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -27,8 +27,7 @@ xfs_refcountbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agbp, cur->bc_private.a.agno,
- cur->bc_private.a.dfops);
+ cur->bc_private.a.agbp, cur->bc_private.a.agno);
}
STATIC void
@@ -71,7 +70,6 @@ xfs_refcountbt_alloc_block(
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
xfs_refc_block(args.mp));
- args.firstblock = args.fsbno;
xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
args.minlen = args.maxlen = args.prod = 1;
args.resv = XFS_AG_RESV_METADATA;
@@ -323,8 +321,7 @@ xfs_refcountbt_init_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_buf *agbp,
- xfs_agnumber_t agno,
- struct xfs_defer_ops *dfops)
+ xfs_agnumber_t agno)
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
@@ -344,7 +341,6 @@ xfs_refcountbt_init_cursor(
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
- cur->bc_private.a.dfops = dfops;
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_private.a.priv.refc.nr_ops = 0;
@@ -408,6 +404,7 @@ xfs_refcountbt_max_size(
int
xfs_refcountbt_calc_reserves(
struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
@@ -422,14 +419,14 @@ xfs_refcountbt_calc_reserves(
return 0;
- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
return error;
agf = XFS_BUF_TO_AGF(agbp);
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_refcount_blocks);
- xfs_buf_relse(agbp);
+ xfs_trans_brelse(tp, agbp);
*ask += xfs_refcountbt_max_size(mp, agblocks);
*used += tree_len;
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index d2852b6e1fa8..ba416f71c824 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -44,8 +44,8 @@ struct xfs_mount;
((index) - 1) * sizeof(xfs_refcount_ptr_t)))
extern struct xfs_btree_cur *xfs_refcountbt_init_cursor(struct xfs_mount *mp,
- struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agnumber_t agno,
- struct xfs_defer_ops *dfops);
+ struct xfs_trans *tp, struct xfs_buf *agbp,
+ xfs_agnumber_t agno);
extern int xfs_refcountbt_maxrecs(int blocklen, bool leaf);
extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
@@ -55,6 +55,7 @@ extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
- xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
+ struct xfs_trans *tp, xfs_agnumber_t agno, xfs_extlen_t *ask,
+ xfs_extlen_t *used);
#endif /* __XFS_REFCOUNT_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index d4460b0d2d81..245af452840e 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -670,14 +670,8 @@ xfs_rmap_free(
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
- if (error)
- goto out_error;
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
- return 0;
-
-out_error:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ xfs_btree_del_cursor(cur, error);
return error;
}
@@ -753,19 +747,19 @@ xfs_rmap_map(
&have_lt);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
-
- error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
- if (error)
- goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
- trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
- cur->bc_private.a.agno, ltrec.rm_startblock,
- ltrec.rm_blockcount, ltrec.rm_owner,
- ltrec.rm_offset, ltrec.rm_flags);
+ if (have_lt) {
+ error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
+ trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+ cur->bc_private.a.agno, ltrec.rm_startblock,
+ ltrec.rm_blockcount, ltrec.rm_owner,
+ ltrec.rm_offset, ltrec.rm_flags);
- if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
- have_lt = 0;
+ if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
+ have_lt = 0;
+ }
XFS_WANT_CORRUPTED_GOTO(mp,
have_lt == 0 ||
@@ -912,14 +906,8 @@ xfs_rmap_alloc(
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
error = xfs_rmap_map(cur, bno, len, false, oinfo);
- if (error)
- goto out_error;
-
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
- return 0;
-out_error:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ xfs_btree_del_cursor(cur, error);
return error;
}
@@ -2156,7 +2144,7 @@ xfs_rmap_finish_one_cleanup(
if (rcur == NULL)
return;
agbp = rcur->bc_private.a.agbp;
- xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(rcur, error);
if (error)
xfs_trans_brelse(tp, agbp);
}
@@ -2289,18 +2277,18 @@ xfs_rmap_update_is_needed(
*/
static int
__xfs_rmap_add(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
enum xfs_rmap_intent_type type,
uint64_t owner,
int whichfork,
struct xfs_bmbt_irec *bmap)
{
- struct xfs_rmap_intent *ri;
+ struct xfs_rmap_intent *ri;
- trace_xfs_rmap_defer(mp, XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
+ trace_xfs_rmap_defer(tp->t_mountp,
+ XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
type,
- XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
+ XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
owner, whichfork,
bmap->br_startoff,
bmap->br_blockcount,
@@ -2313,23 +2301,22 @@ __xfs_rmap_add(
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
- xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
+ xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
return 0;
}
/* Map an extent into a file. */
int
xfs_rmap_map_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_rmap_update_is_needed(mp, whichfork))
+ if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return 0;
- return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+ return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_MAP_SHARED : XFS_RMAP_MAP, ip->i_ino,
whichfork, PREV);
}
@@ -2337,25 +2324,29 @@ xfs_rmap_map_extent(
/* Unmap an extent out of a file. */
int
xfs_rmap_unmap_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
{
- if (!xfs_rmap_update_is_needed(mp, whichfork))
+ if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
return 0;
- return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+ return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_UNMAP_SHARED : XFS_RMAP_UNMAP, ip->i_ino,
whichfork, PREV);
}
-/* Convert a data fork extent from unwritten to real or vice versa. */
+/*
+ * Convert a data fork extent from unwritten to real or vice versa.
+ *
+ * Note that tp can be NULL here as no transaction is used for COW fork
+ * unwritten conversion.
+ */
int
xfs_rmap_convert_extent(
struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
struct xfs_bmbt_irec *PREV)
@@ -2363,7 +2354,7 @@ xfs_rmap_convert_extent(
if (!xfs_rmap_update_is_needed(mp, whichfork))
return 0;
- return __xfs_rmap_add(mp, dfops, xfs_is_reflink_inode(ip) ?
+ return __xfs_rmap_add(tp, xfs_is_reflink_inode(ip) ?
XFS_RMAP_CONVERT_SHARED : XFS_RMAP_CONVERT, ip->i_ino,
whichfork, PREV);
}
@@ -2371,8 +2362,7 @@ xfs_rmap_convert_extent(
/* Schedule the creation of an rmap for non-file data. */
int
xfs_rmap_alloc_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len,
@@ -2380,23 +2370,21 @@ xfs_rmap_alloc_extent(
{
struct xfs_bmbt_irec bmap;
- if (!xfs_rmap_update_is_needed(mp, XFS_DATA_FORK))
+ if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
return 0;
- bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
+ bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- return __xfs_rmap_add(mp, dfops, XFS_RMAP_ALLOC, owner,
- XFS_DATA_FORK, &bmap);
+ return __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
}
/* Schedule the deletion of an rmap for non-file data. */
int
xfs_rmap_free_extent(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len,
@@ -2404,16 +2392,15 @@ xfs_rmap_free_extent(
{
struct xfs_bmbt_irec bmap;
- if (!xfs_rmap_update_is_needed(mp, XFS_DATA_FORK))
+ if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
return 0;
- bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
+ bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
bmap.br_blockcount = len;
bmap.br_startoff = 0;
bmap.br_state = XFS_EXT_NORM;
- return __xfs_rmap_add(mp, dfops, XFS_RMAP_FREE, owner,
- XFS_DATA_FORK, &bmap);
+ return __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
}
/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 9f19454768b2..157dc722ad35 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -185,21 +185,17 @@ struct xfs_rmap_intent {
};
/* functions for updating the rmapbt based on bmbt map/unmap operations */
-int xfs_rmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+int xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ int whichfork, struct xfs_bmbt_irec *imap);
+int xfs_rmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+ int whichfork, struct xfs_bmbt_irec *imap);
+int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *imap);
-int xfs_rmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, int whichfork,
- struct xfs_bmbt_irec *imap);
-int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- struct xfs_inode *ip, int whichfork,
- struct xfs_bmbt_irec *imap);
-int xfs_rmap_alloc_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
- uint64_t owner);
-int xfs_rmap_free_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
- xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
- uint64_t owner);
+int xfs_rmap_alloc_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
+ xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
+int xfs_rmap_free_extent(struct xfs_trans *tp, xfs_agnumber_t agno,
+ xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner);
void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
struct xfs_btree_cur *rcur, int error);
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 221a88ea60bb..f79cf040d745 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -554,6 +554,7 @@ xfs_rmapbt_max_size(
int
xfs_rmapbt_calc_reserves(
struct xfs_mount *mp,
+ struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_extlen_t *ask,
xfs_extlen_t *used)
@@ -567,14 +568,14 @@ xfs_rmapbt_calc_reserves(
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
- error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+ error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
if (error)
return error;
agf = XFS_BUF_TO_AGF(agbp);
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_rmap_blocks);
- xfs_buf_relse(agbp);
+ xfs_trans_brelse(tp, agbp);
/* Reserve 1% of the AG or enough for 1 block per record. */
*ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index 50198b6c3bb2..820d668b063d 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -51,7 +51,7 @@ extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
xfs_agblock_t agblocks);
-extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
+extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
#endif /* __XFS_RMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 350119eeaecb..081f46e30556 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -96,80 +96,146 @@ xfs_perag_put(
trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
}
-/*
- * Check the validity of the SB found.
- */
+/* Check all the superblock fields we care about when reading one in. */
STATIC int
-xfs_mount_validate_sb(
- xfs_mount_t *mp,
- xfs_sb_t *sbp,
- bool check_inprogress,
- bool check_version)
+xfs_validate_sb_read(
+ struct xfs_mount *mp,
+ struct xfs_sb *sbp)
{
- uint32_t agcount = 0;
- uint32_t rem;
-
- if (sbp->sb_magicnum != XFS_SB_MAGIC) {
- xfs_warn(mp, "bad magic number");
- return -EWRONGFS;
- }
-
-
- if (!xfs_sb_good_version(sbp)) {
- xfs_warn(mp, "bad version");
- return -EWRONGFS;
- }
+ if (XFS_SB_VERSION_NUM(sbp) != XFS_SB_VERSION_5)
+ return 0;
/*
- * Version 5 superblock feature mask validation. Reject combinations the
- * kernel cannot support up front before checking anything else. For
- * write validation, we don't need to check feature masks.
+ * Version 5 superblock feature mask validation. Reject combinations
+ * the kernel cannot support up front before checking anything else.
*/
- if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
- if (xfs_sb_has_compat_feature(sbp,
- XFS_SB_FEAT_COMPAT_UNKNOWN)) {
- xfs_warn(mp,
+ if (xfs_sb_has_compat_feature(sbp, XFS_SB_FEAT_COMPAT_UNKNOWN)) {
+ xfs_warn(mp,
"Superblock has unknown compatible features (0x%x) enabled.",
- (sbp->sb_features_compat &
- XFS_SB_FEAT_COMPAT_UNKNOWN));
- xfs_warn(mp,
+ (sbp->sb_features_compat & XFS_SB_FEAT_COMPAT_UNKNOWN));
+ xfs_warn(mp,
"Using a more recent kernel is recommended.");
- }
+ }
- if (xfs_sb_has_ro_compat_feature(sbp,
- XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
- xfs_alert(mp,
+ if (xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ xfs_alert(mp,
"Superblock has unknown read-only compatible features (0x%x) enabled.",
- (sbp->sb_features_ro_compat &
- XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
- if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
- xfs_warn(mp,
+ (sbp->sb_features_ro_compat &
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ xfs_warn(mp,
"Attempted to mount read-only compatible filesystem read-write.");
- xfs_warn(mp,
+ xfs_warn(mp,
"Filesystem can only be safely mounted read only.");
- return -EINVAL;
- }
- }
- if (xfs_sb_has_incompat_feature(sbp,
- XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
- xfs_warn(mp,
-"Superblock has unknown incompatible features (0x%x) enabled.",
- (sbp->sb_features_incompat &
- XFS_SB_FEAT_INCOMPAT_UNKNOWN));
- xfs_warn(mp,
-"Filesystem can not be safely mounted by this kernel.");
return -EINVAL;
}
- } else if (xfs_sb_version_hascrc(sbp)) {
- /*
- * We can't read verify the sb LSN because the read verifier is
- * called before the log is allocated and processed. We know the
- * log is set up before write verifier (!check_version) calls,
- * so just check it here.
- */
- if (!xfs_log_check_lsn(mp, sbp->sb_lsn))
- return -EFSCORRUPTED;
+ }
+ if (xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+"Superblock has unknown incompatible features (0x%x) enabled.",
+ (sbp->sb_features_incompat &
+ XFS_SB_FEAT_INCOMPAT_UNKNOWN));
+ xfs_warn(mp,
+"Filesystem cannot be safely mounted by this kernel.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Check all the superblock fields we care about when writing one out. */
+STATIC int
+xfs_validate_sb_write(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ struct xfs_sb *sbp)
+{
+ /*
+ * Carry out additional sb summary counter sanity checks when we write
+ * the superblock. We skip this in the read validator because there
+ * could be newer superblocks in the log and if the values are garbage
+ * even after replay we'll recalculate them at the end of log mount.
+ *
+ * mkfs has traditionally written zeroed counters to inprogress and
+ * secondary superblocks, so allow this usage to continue because
+ * we never read counters from such superblocks.
+ */
+ if (XFS_BUF_ADDR(bp) == XFS_SB_DADDR && !sbp->sb_inprogress &&
+ (sbp->sb_fdblocks > sbp->sb_dblocks ||
+ !xfs_verify_icount(mp, sbp->sb_icount) ||
+ sbp->sb_ifree > sbp->sb_icount)) {
+ xfs_warn(mp, "SB summary counter sanity check failed");
+ return -EFSCORRUPTED;
+ }
+
+ if (XFS_SB_VERSION_NUM(sbp) != XFS_SB_VERSION_5)
+ return 0;
+
+ /*
+ * Version 5 superblock feature mask validation. Reject combinations
+ * the kernel cannot support since we checked for unsupported bits in
+ * the read verifier, which means that memory is corrupt.
+ */
+ if (xfs_sb_has_compat_feature(sbp, XFS_SB_FEAT_COMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+"Corruption detected in superblock compatible features (0x%x)!",
+ (sbp->sb_features_compat & XFS_SB_FEAT_COMPAT_UNKNOWN));
+ return -EFSCORRUPTED;
+ }
+
+ if (xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ xfs_alert(mp,
+"Corruption detected in superblock read-only compatible features (0x%x)!",
+ (sbp->sb_features_ro_compat &
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+ return -EFSCORRUPTED;
+ }
+ if (xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+"Corruption detected in superblock incompatible features (0x%x)!",
+ (sbp->sb_features_incompat &
+ XFS_SB_FEAT_INCOMPAT_UNKNOWN));
+ return -EFSCORRUPTED;
+ }
+ if (xfs_sb_has_incompat_log_feature(sbp,
+ XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
+ xfs_warn(mp,
+"Corruption detected in superblock incompatible log features (0x%x)!",
+ (sbp->sb_features_log_incompat &
+ XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * We can't read verify the sb LSN because the read verifier is called
+ * before the log is allocated and processed. We know the log is set up
+ * before write verifier calls, so check it here.
+ */
+ if (!xfs_log_check_lsn(mp, sbp->sb_lsn))
+ return -EFSCORRUPTED;
+
+ return 0;
+}
+
+/* Check the validity of the SB. */
+STATIC int
+xfs_validate_sb_common(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ struct xfs_sb *sbp)
+{
+ uint32_t agcount = 0;
+ uint32_t rem;
+
+ if (sbp->sb_magicnum != XFS_SB_MAGIC) {
+ xfs_warn(mp, "bad magic number");
+ return -EWRONGFS;
+ }
+
+ if (!xfs_sb_good_version(sbp)) {
+ xfs_warn(mp, "bad version");
+ return -EWRONGFS;
}
if (xfs_sb_version_has_pquotino(sbp)) {
@@ -321,7 +387,12 @@ xfs_mount_validate_sb(
return -EFBIG;
}
- if (check_inprogress && sbp->sb_inprogress) {
+ /*
+ * Don't touch the filesystem if a user tool thinks it owns the primary
+ * superblock. mkfs doesn't clear the flag from secondary supers, so
+ * we don't check them at all.
+ */
+ if (XFS_BUF_ADDR(bp) == XFS_SB_DADDR && sbp->sb_inprogress) {
xfs_warn(mp, "Offline file system operation in progress!");
return -EFSCORRUPTED;
}
@@ -596,29 +667,6 @@ xfs_sb_to_disk(
}
}
-static int
-xfs_sb_verify(
- struct xfs_buf *bp,
- bool check_version)
-{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_sb sb;
-
- /*
- * Use call variant which doesn't convert quota flags from disk
- * format, because xfs_mount_validate_sb checks the on-disk flags.
- */
- __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
-
- /*
- * Only check the in progress field for the primary superblock as
- * mkfs.xfs doesn't clear it from secondary superblocks.
- */
- return xfs_mount_validate_sb(mp, &sb,
- bp->b_maps[0].bm_bn == XFS_SB_DADDR,
- check_version);
-}
-
/*
* If the superblock has the CRC feature bit set or the CRC field is non-null,
* check that the CRC is valid. We check the CRC field is non-null because a
@@ -633,11 +681,12 @@ xfs_sb_verify(
*/
static void
xfs_sb_read_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
- int error;
+ struct xfs_sb sb;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
+ int error;
/*
* open code the version check to avoid needing to convert the entire
@@ -657,7 +706,16 @@ xfs_sb_read_verify(
}
}
}
- error = xfs_sb_verify(bp, true);
+
+ /*
+ * Check all the superblock fields. Don't byteswap the xquota flags
+ * because _verify_common checks the on-disk values.
+ */
+ __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
+ error = xfs_validate_sb_common(mp, bp, &sb);
+ if (error)
+ goto out_error;
+ error = xfs_validate_sb_read(mp, &sb);
out_error:
if (error == -EFSCORRUPTED || error == -EFSBADCRC)
@@ -691,15 +749,22 @@ static void
xfs_sb_write_verify(
struct xfs_buf *bp)
{
+ struct xfs_sb sb;
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_log_item;
int error;
- error = xfs_sb_verify(bp, false);
- if (error) {
- xfs_verifier_error(bp, error, __this_address);
- return;
- }
+ /*
+ * Check all the superblock fields. Don't byteswap the xquota flags
+ * because _verify_common checks the on-disk values.
+ */
+ __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
+ error = xfs_validate_sb_common(mp, bp, &sb);
+ if (error)
+ goto out_error;
+ error = xfs_validate_sb_write(mp, bp, &sb);
+ if (error)
+ goto out_error;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
@@ -708,6 +773,10 @@ xfs_sb_write_verify(
XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF);
+ return;
+
+out_error:
+ xfs_verifier_error(bp, error, __this_address);
}
const struct xfs_buf_ops xfs_sb_buf_ops = {
@@ -804,6 +873,7 @@ xfs_initialize_perag_data(
uint64_t bfree = 0;
uint64_t bfreelst = 0;
uint64_t btree = 0;
+ uint64_t fdblocks;
int error;
for (index = 0; index < agcount; index++) {
@@ -827,17 +897,31 @@ xfs_initialize_perag_data(
btree += pag->pagf_btreeblks;
xfs_perag_put(pag);
}
+ fdblocks = bfree + bfreelst + btree;
+
+ /*
+ * If the new summary counts are obviously incorrect, fail the
+ * mount operation because that implies the AGFs are also corrupt.
+ * Clear BAD_SUMMARY so that we don't unmount with a dirty log, which
+ * will prevent xfs_repair from fixing anything.
+ */
+ if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
+ xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
+ error = -EFSCORRUPTED;
+ goto out;
+ }
/* Overwrite incore superblock counters with just-read data */
spin_lock(&mp->m_sb_lock);
sbp->sb_ifree = ifree;
sbp->sb_icount = ialloc;
- sbp->sb_fdblocks = bfree + bfreelst + btree;
+ sbp->sb_fdblocks = fdblocks;
spin_unlock(&mp->m_sb_lock);
xfs_reinit_percpu_counters(mp);
-
- return 0;
+out:
+ mp->m_flags &= ~XFS_MOUNT_BAD_SUMMARY;
+ return error;
}
/*
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 22089f1c880a..1c5debe748f0 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -64,6 +64,18 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp,
#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
#define XFS_TRANS_NO_WRITECOUNT 0x40 /* do not elevate SB writecount */
#define XFS_TRANS_NOFS 0x80 /* pass KM_NOFS to kmem_alloc */
+/*
+ * LOWMODE is used by the allocator to activate the lowspace algorithm - when
+ * free space is running low the extent allocator may choose to allocate an
+ * extent from an AG without leaving sufficient space for a btree split when
+ * inserting the new extent. In this case the allocator will enable the
+ * lowspace algorithm which is supposed to allow further allocations (such as
+ * btree splits and newroots) to allocate from sequential AGs. In order to
+ * avoid locking AGs out of order the lowspace algorithm will start searching
+ * for free space from AG 0. If the correct transaction reservations have been
+ * made then this algorithm will eventually find all the space it needs.
+ */
+#define XFS_TRANS_LOWMODE 0x100 /* allocate in low space mode */
/*
* Field values for xfs_trans_mod_sb.
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index 2e2a243cef2e..33a5ca346baf 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -171,3 +171,37 @@ xfs_verify_rtbno(
{
return rtbno < mp->m_sb.sb_rblocks;
}
+
+/* Calculate the range of valid icount values. */
+static void
+xfs_icount_range(
+ struct xfs_mount *mp,
+ unsigned long long *min,
+ unsigned long long *max)
+{
+ unsigned long long nr_inos = 0;
+ xfs_agnumber_t agno;
+
+ /* root, rtbitmap, rtsum all live in the first chunk */
+ *min = XFS_INODES_PER_CHUNK;
+
+ for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ xfs_agino_t first, last;
+
+ xfs_agino_range(mp, agno, &first, &last);
+ nr_inos += last - first + 1;
+ }
+ *max = nr_inos;
+}
+
+/* Sanity-checking of inode counts. */
+bool
+xfs_verify_icount(
+ struct xfs_mount *mp,
+ unsigned long long icount)
+{
+ unsigned long long min, max;
+
+ xfs_icount_range(mp, &min, &max);
+ return icount >= min && icount <= max;
+}
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 4055d62f690c..b9e6c89284c3 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -165,5 +165,6 @@ bool xfs_verify_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
+bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount);
#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 9bb0745f1ad2..3068a9382feb 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -28,30 +28,30 @@
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_superblock_xref(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_superblock_xref(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- xfs_agnumber_t agno = sc->sm->sm_agno;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sc->sm->sm_agno;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
agbno = XFS_SB_BLOCK(mp);
- error = xfs_scrub_ag_init(sc, agno, &sc->sa);
- if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ error = xchk_ag_init(sc, agno, &sc->sa);
+ if (!xchk_xref_process_error(sc, agno, agbno, &error))
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */
}
@@ -65,17 +65,17 @@ xfs_scrub_superblock_xref(
* sb 0 is ok and we can use its information to check everything else.
*/
int
-xfs_scrub_superblock(
- struct xfs_scrub_context *sc)
+xchk_superblock(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_buf *bp;
- struct xfs_dsb *sb;
- xfs_agnumber_t agno;
- uint32_t v2_ok;
- __be32 features_mask;
- int error;
- __be16 vernum_mask;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *bp;
+ struct xfs_dsb *sb;
+ xfs_agnumber_t agno;
+ uint32_t v2_ok;
+ __be32 features_mask;
+ int error;
+ __be16 vernum_mask;
agno = sc->sm->sm_agno;
if (agno == 0)
@@ -98,7 +98,7 @@ xfs_scrub_superblock(
default:
break;
}
- if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
+ if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
return error;
sb = XFS_BUF_TO_SBP(bp);
@@ -110,46 +110,46 @@ xfs_scrub_superblock(
* checked.
*/
if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check sb_versionnum bits that are set at mkfs time. */
vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
@@ -163,7 +163,7 @@ xfs_scrub_superblock(
XFS_SB_VERSION_DIRV2BIT);
if ((sb->sb_versionnum & vernum_mask) !=
(cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check sb_versionnum bits that can be set after mkfs time. */
vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
@@ -171,40 +171,40 @@ xfs_scrub_superblock(
XFS_SB_VERSION_QUOTABIT);
if ((sb->sb_versionnum & vernum_mask) !=
(cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
/*
* Skip the summary counters since we track them in memory anyway.
@@ -212,10 +212,10 @@ xfs_scrub_superblock(
*/
if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
/*
* Skip the quota flags since repair will force quotacheck.
@@ -223,46 +223,46 @@ xfs_scrub_superblock(
*/
if (sb->sb_flags != mp->m_sb.sb_flags)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Do we see any invalid bits in sb_features2? */
if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
if (sb->sb_features2 != 0)
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
} else {
v2_ok = XFS_SB_VERSION2_OKBITS;
if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
v2_ok |= XFS_SB_VERSION2_CRCBIT;
if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_features2 != sb->sb_bad_features2)
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
}
/* Check sb_features2 flags that are set at mkfs time. */
@@ -272,26 +272,26 @@ xfs_scrub_superblock(
XFS_SB_VERSION2_FTYPE);
if ((sb->sb_features2 & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check sb_features2 flags that can be set after mkfs time. */
features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
if ((sb->sb_features2 & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (!xfs_sb_version_hascrc(&mp->m_sb)) {
/* all v5 fields must be zero */
if (memchr_inv(&sb->sb_features_compat, 0,
sizeof(struct xfs_dsb) -
offsetof(struct xfs_dsb, sb_features_compat)))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
} else {
/* Check compat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
if ((sb->sb_features_compat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check ro compat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
@@ -301,7 +301,7 @@ xfs_scrub_superblock(
if ((sb->sb_features_ro_compat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check incompat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
@@ -311,22 +311,22 @@ xfs_scrub_superblock(
if ((sb->sb_features_incompat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_incompat) &
features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Check log incompat flags; all are set at mkfs time. */
features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
if ((sb->sb_features_log_incompat & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
features_mask))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
/* Don't care about sb_crc */
if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
- xfs_scrub_block_set_preen(sc, bp);
+ xchk_block_set_preen(sc, bp);
/* Don't care about sb_lsn */
}
@@ -334,15 +334,15 @@ xfs_scrub_superblock(
if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
/* The metadata UUID must be the same for all supers */
if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
}
/* Everything else must be zero. */
if (memchr_inv(sb + 1, 0,
BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
- xfs_scrub_superblock_xref(sc, bp);
+ xchk_superblock_xref(sc, bp);
return error;
}
@@ -351,7 +351,7 @@ xfs_scrub_superblock(
/* Tally freespace record lengths. */
STATIC int
-xfs_scrub_agf_record_bno_lengths(
+xchk_agf_record_bno_lengths(
struct xfs_btree_cur *cur,
struct xfs_alloc_rec_incore *rec,
void *priv)
@@ -364,75 +364,75 @@ xfs_scrub_agf_record_bno_lengths(
/* Check agf_freeblks */
static inline void
-xfs_scrub_agf_xref_freeblks(
- struct xfs_scrub_context *sc)
+xchk_agf_xref_freeblks(
+ struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
- xfs_extlen_t blocks = 0;
- int error;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_extlen_t blocks = 0;
+ int error;
if (!sc->sa.bno_cur)
return;
error = xfs_alloc_query_all(sc->sa.bno_cur,
- xfs_scrub_agf_record_bno_lengths, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ xchk_agf_record_bno_lengths, &blocks);
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return;
if (blocks != be32_to_cpu(agf->agf_freeblks))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
}
/* Cross reference the AGF with the cntbt (freespace by length btree) */
static inline void
-xfs_scrub_agf_xref_cntbt(
- struct xfs_scrub_context *sc)
+xchk_agf_xref_cntbt(
+ struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
- xfs_agblock_t agbno;
- xfs_extlen_t blocks;
- int have;
- int error;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_agblock_t agbno;
+ xfs_extlen_t blocks;
+ int have;
+ int error;
if (!sc->sa.cnt_cur)
return;
/* Any freespace at all? */
error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return;
if (!have) {
if (agf->agf_freeblks != be32_to_cpu(0))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
return;
}
/* Check agf_longest */
error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return;
if (!have || blocks != be32_to_cpu(agf->agf_longest))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
}
/* Check the btree block counts in the AGF against the btrees. */
STATIC void
-xfs_scrub_agf_xref_btreeblks(
- struct xfs_scrub_context *sc)
+xchk_agf_xref_btreeblks(
+ struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
- struct xfs_mount *mp = sc->mp;
- xfs_agblock_t blocks;
- xfs_agblock_t btreeblks;
- int error;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t blocks;
+ xfs_agblock_t btreeblks;
+ int error;
/* Check agf_rmap_blocks; set up for agf_btreeblks check */
if (sc->sa.rmap_cur) {
error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
btreeblks = blocks - 1;
if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
} else {
btreeblks = 0;
}
@@ -447,136 +447,136 @@ xfs_scrub_agf_xref_btreeblks(
/* Check agf_btreeblks */
error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return;
btreeblks += blocks - 1;
error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
return;
btreeblks += blocks - 1;
if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
}
/* Check agf_refcount_blocks against tree size */
static inline void
-xfs_scrub_agf_xref_refcblks(
- struct xfs_scrub_context *sc)
+xchk_agf_xref_refcblks(
+ struct xfs_scrub *sc)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
- xfs_agblock_t blocks;
- int error;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_agblock_t blocks;
+ int error;
if (!sc->sa.refc_cur)
return;
error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_agf_xref(
- struct xfs_scrub_context *sc)
+xchk_agf_xref(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
agbno = XFS_AGF_BLOCK(mp);
- error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ error = xchk_ag_btcur_init(sc, &sc->sa);
if (error)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_agf_xref_freeblks(sc);
- xfs_scrub_agf_xref_cntbt(sc);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_agf_xref_freeblks(sc);
+ xchk_agf_xref_cntbt(sc);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_agf_xref_btreeblks(sc);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
- xfs_scrub_agf_xref_refcblks(sc);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_agf_xref_btreeblks(sc);
+ xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_agf_xref_refcblks(sc);
/* scrub teardown will take care of sc->sa for us */
}
/* Scrub the AGF. */
int
-xfs_scrub_agf(
- struct xfs_scrub_context *sc)
+xchk_agf(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_agf *agf;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_agblock_t eoag;
- xfs_agblock_t agfl_first;
- xfs_agblock_t agfl_last;
- xfs_agblock_t agfl_count;
- xfs_agblock_t fl_count;
- int level;
- int error = 0;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_agf *agf;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_agblock_t eoag;
+ xfs_agblock_t agfl_first;
+ xfs_agblock_t agfl_last;
+ xfs_agblock_t agfl_count;
+ xfs_agblock_t fl_count;
+ int level;
+ int error = 0;
agno = sc->sa.agno = sc->sm->sm_agno;
- error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp);
- if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
+ if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
goto out;
- xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
+ xchk_buffer_recheck(sc, sc->sa.agf_bp);
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
/* Check the AG length */
eoag = be32_to_cpu(agf->agf_length);
if (eoag != xfs_ag_block_count(mp, agno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
/* Check the AGF btree roots and levels */
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
}
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
agbno = be32_to_cpu(agf->agf_refcount_root);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
level = be32_to_cpu(agf->agf_refcount_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
}
/* Check the AGFL counters */
@@ -588,57 +588,57 @@ xfs_scrub_agf(
else
fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
if (agfl_count != 0 && fl_count != agfl_count)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
- xfs_scrub_agf_xref(sc);
+ xchk_agf_xref(sc);
out:
return error;
}
/* AGFL */
-struct xfs_scrub_agfl_info {
- struct xfs_owner_info oinfo;
- unsigned int sz_entries;
- unsigned int nr_entries;
- xfs_agblock_t *entries;
- struct xfs_scrub_context *sc;
+struct xchk_agfl_info {
+ struct xfs_owner_info oinfo;
+ unsigned int sz_entries;
+ unsigned int nr_entries;
+ xfs_agblock_t *entries;
+ struct xfs_scrub *sc;
};
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_agfl_block_xref(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- struct xfs_owner_info *oinfo)
+xchk_agfl_block_xref(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ struct xfs_owner_info *oinfo)
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_xref_is_owned_by(sc, agbno, 1, oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
}
/* Scrub an AGFL block. */
STATIC int
-xfs_scrub_agfl_block(
- struct xfs_mount *mp,
- xfs_agblock_t agbno,
- void *priv)
+xchk_agfl_block(
+ struct xfs_mount *mp,
+ xfs_agblock_t agbno,
+ void *priv)
{
- struct xfs_scrub_agfl_info *sai = priv;
- struct xfs_scrub_context *sc = sai->sc;
- xfs_agnumber_t agno = sc->sa.agno;
+ struct xchk_agfl_info *sai = priv;
+ struct xfs_scrub *sc = sai->sc;
+ xfs_agnumber_t agno = sc->sa.agno;
if (xfs_verify_agbno(mp, agno, agbno) &&
sai->nr_entries < sai->sz_entries)
sai->entries[sai->nr_entries++] = agbno;
else
- xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
- xfs_scrub_agfl_block_xref(sc, agbno, priv);
+ xchk_agfl_block_xref(sc, agbno, priv);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return XFS_BTREE_QUERY_RANGE_ABORT;
@@ -647,7 +647,7 @@ xfs_scrub_agfl_block(
}
static int
-xfs_scrub_agblock_cmp(
+xchk_agblock_cmp(
const void *pa,
const void *pb)
{
@@ -659,28 +659,28 @@ xfs_scrub_agblock_cmp(
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_agfl_xref(
- struct xfs_scrub_context *sc)
+xchk_agfl_xref(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
agbno = XFS_AGFL_BLOCK(mp);
- error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ error = xchk_ag_btcur_init(sc, &sc->sa);
if (error)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
/*
* Scrub teardown will take care of sc->sa for us. Leave sc->sa
@@ -690,26 +690,26 @@ xfs_scrub_agfl_xref(
/* Scrub the AGFL. */
int
-xfs_scrub_agfl(
- struct xfs_scrub_context *sc)
+xchk_agfl(
+ struct xfs_scrub *sc)
{
- struct xfs_scrub_agfl_info sai;
- struct xfs_agf *agf;
- xfs_agnumber_t agno;
- unsigned int agflcount;
- unsigned int i;
- int error;
+ struct xchk_agfl_info sai;
+ struct xfs_agf *agf;
+ xfs_agnumber_t agno;
+ unsigned int agflcount;
+ unsigned int i;
+ int error;
agno = sc->sa.agno = sc->sm->sm_agno;
- error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp);
- if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
+ if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
goto out;
if (!sc->sa.agf_bp)
return -EFSCORRUPTED;
- xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
+ xchk_buffer_recheck(sc, sc->sa.agfl_bp);
- xfs_scrub_agfl_xref(sc);
+ xchk_agfl_xref(sc);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -718,7 +718,7 @@ xfs_scrub_agfl(
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
agflcount = be32_to_cpu(agf->agf_flcount);
if (agflcount > xfs_agfl_size(sc->mp)) {
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
goto out;
}
memset(&sai, 0, sizeof(sai));
@@ -734,7 +734,7 @@ xfs_scrub_agfl(
/* Check the blocks in the AGFL. */
xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
- sc->sa.agfl_bp, xfs_scrub_agfl_block, &sai);
+ sc->sa.agfl_bp, xchk_agfl_block, &sai);
if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
error = 0;
goto out_free;
@@ -743,16 +743,16 @@ xfs_scrub_agfl(
goto out_free;
if (agflcount != sai.nr_entries) {
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
goto out_free;
}
/* Sort entries, check for duplicates. */
sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
- xfs_scrub_agblock_cmp, NULL);
+ xchk_agblock_cmp, NULL);
for (i = 1; i < sai.nr_entries; i++) {
if (sai.entries[i] == sai.entries[i - 1]) {
- xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agf_bp);
break;
}
}
@@ -767,103 +767,103 @@ out:
/* Check agi_count/agi_freecount */
static inline void
-xfs_scrub_agi_xref_icounts(
- struct xfs_scrub_context *sc)
+xchk_agi_xref_icounts(
+ struct xfs_scrub *sc)
{
- struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
- xfs_agino_t icount;
- xfs_agino_t freecount;
- int error;
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
+ xfs_agino_t icount;
+ xfs_agino_t freecount;
+ int error;
if (!sc->sa.ino_cur)
return;
error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
return;
if (be32_to_cpu(agi->agi_count) != icount ||
be32_to_cpu(agi->agi_freecount) != freecount)
- xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_agi_xref(
- struct xfs_scrub_context *sc)
+xchk_agi_xref(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
agbno = XFS_AGI_BLOCK(mp);
- error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ error = xchk_ag_btcur_init(sc, &sc->sa);
if (error)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
- xfs_scrub_agi_xref_icounts(sc);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_xref_is_not_inode_chunk(sc, agbno, 1);
+ xchk_agi_xref_icounts(sc);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */
}
/* Scrub the AGI. */
int
-xfs_scrub_agi(
- struct xfs_scrub_context *sc)
+xchk_agi(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_agi *agi;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_agblock_t eoag;
- xfs_agino_t agino;
- xfs_agino_t first_agino;
- xfs_agino_t last_agino;
- xfs_agino_t icount;
- int i;
- int level;
- int error = 0;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_agi *agi;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_agblock_t eoag;
+ xfs_agino_t agino;
+ xfs_agino_t first_agino;
+ xfs_agino_t last_agino;
+ xfs_agino_t icount;
+ int i;
+ int level;
+ int error = 0;
agno = sc->sa.agno = sc->sm->sm_agno;
- error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
&sc->sa.agf_bp, &sc->sa.agfl_bp);
- if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
+ if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
goto out;
- xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
+ xchk_buffer_recheck(sc, sc->sa.agi_bp);
agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
/* Check the AG length */
eoag = be32_to_cpu(agi->agi_length);
if (eoag != xfs_ag_block_count(mp, agno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check btree roots and levels */
agbno = be32_to_cpu(agi->agi_root);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
level = be32_to_cpu(agi->agi_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
agbno = be32_to_cpu(agi->agi_free_root);
if (!xfs_verify_agbno(mp, agno, agbno))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
level = be32_to_cpu(agi->agi_free_level);
if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
}
/* Check inode counters */
@@ -871,16 +871,16 @@ xfs_scrub_agi(
icount = be32_to_cpu(agi->agi_count);
if (icount > last_agino - first_agino + 1 ||
icount < be32_to_cpu(agi->agi_freecount))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check inode pointers */
agino = be32_to_cpu(agi->agi_newino);
if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
agino = be32_to_cpu(agi->agi_dirino);
if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
/* Check unlinked inode buckets */
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
@@ -888,13 +888,13 @@ xfs_scrub_agi(
if (agino == NULLAGINO)
continue;
if (!xfs_verify_agino(mp, agno, agino))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
}
if (agi->agi_pad32 != cpu_to_be32(0))
- xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xchk_block_set_corrupt(sc, sc->sa.agi_bp);
- xfs_scrub_agi_xref(sc);
+ xchk_agi_xref(sc);
out:
return error;
}
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 117eedac53df..f7568a4b5fe5 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -17,24 +17,31 @@
#include "xfs_sb.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
+#include "xfs_alloc_btree.h"
#include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_refcount.h"
+#include "xfs_refcount_btree.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
+#include "scrub/repair.h"
+#include "scrub/bitmap.h"
/* Superblock */
/* Repair the superblock. */
int
-xfs_repair_superblock(
- struct xfs_scrub_context *sc)
+xrep_superblock(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_buf *bp;
- xfs_agnumber_t agno;
- int error;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *bp;
+ xfs_agnumber_t agno;
+ int error;
/* Don't try to repair AG 0's sb; let xfs_repair deal with it. */
agno = sc->sm->sm_agno;
@@ -54,3 +61,873 @@ xfs_repair_superblock(
xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
return error;
}
+
+/* AGF */
+
+struct xrep_agf_allocbt {
+ struct xfs_scrub *sc;
+ xfs_agblock_t freeblks;
+ xfs_agblock_t longest;
+};
+
+/* Record free space shape information. */
+STATIC int
+xrep_agf_walk_allocbt(
+ struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *rec,
+ void *priv)
+{
+ struct xrep_agf_allocbt *raa = priv;
+ int error = 0;
+
+ if (xchk_should_terminate(raa->sc, &error))
+ return error;
+
+ raa->freeblks += rec->ar_blockcount;
+ if (rec->ar_blockcount > raa->longest)
+ raa->longest = rec->ar_blockcount;
+ return error;
+}
+
+/* Does this AGFL block look sane? */
+STATIC int
+xrep_agf_check_agfl_block(
+ struct xfs_mount *mp,
+ xfs_agblock_t agbno,
+ void *priv)
+{
+ struct xfs_scrub *sc = priv;
+
+ if (!xfs_verify_agbno(mp, sc->sa.agno, agbno))
+ return -EFSCORRUPTED;
+ return 0;
+}
+
+/*
+ * Offset within the xrep_find_ag_btree array for each btree type. Avoid the
+ * XFS_BTNUM_ names here to avoid creating a sparse array.
+ */
+enum {
+ XREP_AGF_BNOBT = 0,
+ XREP_AGF_CNTBT,
+ XREP_AGF_RMAPBT,
+ XREP_AGF_REFCOUNTBT,
+ XREP_AGF_END,
+ XREP_AGF_MAX
+};
+
+/* Check a btree root candidate. */
+static inline bool
+xrep_check_btree_root(
+ struct xfs_scrub *sc,
+ struct xrep_find_ag_btree *fab)
+{
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sc->sm->sm_agno;
+
+ return xfs_verify_agbno(mp, agno, fab->root) &&
+ fab->height <= XFS_BTREE_MAXLEVELS;
+}
+
+/*
+ * Given the btree roots described by *fab, find the roots, check them for
+ * sanity, and pass the root data back out via *fab.
+ *
+ * This is /also/ a chicken and egg problem because we have to use the rmapbt
+ * (rooted in the AGF) to find the btrees rooted in the AGF. We also have no
+ * idea if the btrees make any sense. If we hit obvious corruptions in those
+ * btrees we'll bail out.
+ */
+STATIC int
+xrep_agf_find_btrees(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp,
+ struct xrep_find_ag_btree *fab,
+ struct xfs_buf *agfl_bp)
+{
+ struct xfs_agf *old_agf = XFS_BUF_TO_AGF(agf_bp);
+ int error;
+
+ /* Go find the root data. */
+ error = xrep_find_ag_btree_roots(sc, agf_bp, fab, agfl_bp);
+ if (error)
+ return error;
+
+ /* We must find the bnobt, cntbt, and rmapbt roots. */
+ if (!xrep_check_btree_root(sc, &fab[XREP_AGF_BNOBT]) ||
+ !xrep_check_btree_root(sc, &fab[XREP_AGF_CNTBT]) ||
+ !xrep_check_btree_root(sc, &fab[XREP_AGF_RMAPBT]))
+ return -EFSCORRUPTED;
+
+ /*
+ * We relied on the rmapbt to reconstruct the AGF. If we get a
+ * different root then something's seriously wrong.
+ */
+ if (fab[XREP_AGF_RMAPBT].root !=
+ be32_to_cpu(old_agf->agf_roots[XFS_BTNUM_RMAPi]))
+ return -EFSCORRUPTED;
+
+ /* We must find the refcountbt root if that feature is enabled. */
+ if (xfs_sb_version_hasreflink(&sc->mp->m_sb) &&
+ !xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT]))
+ return -EFSCORRUPTED;
+
+ return 0;
+}
+
+/*
+ * Reinitialize the AGF header, making an in-core copy of the old contents so
+ * that we know which in-core state needs to be reinitialized.
+ */
+STATIC void
+xrep_agf_init_header(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp,
+ struct xfs_agf *old_agf)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+
+ memcpy(old_agf, agf, sizeof(*old_agf));
+ memset(agf, 0, BBTOB(agf_bp->b_length));
+ agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
+ agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
+ agf->agf_seqno = cpu_to_be32(sc->sa.agno);
+ agf->agf_length = cpu_to_be32(xfs_ag_block_count(mp, sc->sa.agno));
+ agf->agf_flfirst = old_agf->agf_flfirst;
+ agf->agf_fllast = old_agf->agf_fllast;
+ agf->agf_flcount = old_agf->agf_flcount;
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
+
+ /* Mark the incore AGF data stale until we're done fixing things. */
+ ASSERT(sc->sa.pag->pagf_init);
+ sc->sa.pag->pagf_init = 0;
+}
+
+/* Set btree root information in an AGF. */
+STATIC void
+xrep_agf_set_roots(
+ struct xfs_scrub *sc,
+ struct xfs_agf *agf,
+ struct xrep_find_ag_btree *fab)
+{
+ agf->agf_roots[XFS_BTNUM_BNOi] =
+ cpu_to_be32(fab[XREP_AGF_BNOBT].root);
+ agf->agf_levels[XFS_BTNUM_BNOi] =
+ cpu_to_be32(fab[XREP_AGF_BNOBT].height);
+
+ agf->agf_roots[XFS_BTNUM_CNTi] =
+ cpu_to_be32(fab[XREP_AGF_CNTBT].root);
+ agf->agf_levels[XFS_BTNUM_CNTi] =
+ cpu_to_be32(fab[XREP_AGF_CNTBT].height);
+
+ agf->agf_roots[XFS_BTNUM_RMAPi] =
+ cpu_to_be32(fab[XREP_AGF_RMAPBT].root);
+ agf->agf_levels[XFS_BTNUM_RMAPi] =
+ cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
+
+ if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
+ agf->agf_refcount_root =
+ cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root);
+ agf->agf_refcount_level =
+ cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].height);
+ }
+}
+
+/* Update all AGF fields which derive from btree contents. */
+STATIC int
+xrep_agf_calc_from_btrees(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp)
+{
+ struct xrep_agf_allocbt raa = { .sc = sc };
+ struct xfs_btree_cur *cur = NULL;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t btreeblks;
+ xfs_agblock_t blocks;
+ int error;
+
+ /* Update the AGF counters from the bnobt. */
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
+ XFS_BTNUM_BNO);
+ error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
+ if (error)
+ goto err;
+ error = xfs_btree_count_blocks(cur, &blocks);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+ btreeblks = blocks - 1;
+ agf->agf_freeblks = cpu_to_be32(raa.freeblks);
+ agf->agf_longest = cpu_to_be32(raa.longest);
+
+ /* Update the AGF counters from the cntbt. */
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
+ XFS_BTNUM_CNT);
+ error = xfs_btree_count_blocks(cur, &blocks);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+ btreeblks += blocks - 1;
+
+ /* Update the AGF counters from the rmapbt. */
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ error = xfs_btree_count_blocks(cur, &blocks);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+ agf->agf_rmap_blocks = cpu_to_be32(blocks);
+ btreeblks += blocks - 1;
+
+ agf->agf_btreeblks = cpu_to_be32(btreeblks);
+
+ /* Update the AGF counters from the refcountbt. */
+ if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
+ sc->sa.agno);
+ error = xfs_btree_count_blocks(cur, &blocks);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+ agf->agf_refcount_blocks = cpu_to_be32(blocks);
+ }
+
+ return 0;
+err:
+ xfs_btree_del_cursor(cur, error);
+ return error;
+}
+
+/* Commit the new AGF and reinitialize the incore state. */
+STATIC int
+xrep_agf_commit_new(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp)
+{
+ struct xfs_perag *pag;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+
+ /* Trigger fdblocks recalculation */
+ xfs_force_summary_recalc(sc->mp);
+
+ /* Write this to disk. */
+ xfs_trans_buf_set_type(sc->tp, agf_bp, XFS_BLFT_AGF_BUF);
+ xfs_trans_log_buf(sc->tp, agf_bp, 0, BBTOB(agf_bp->b_length) - 1);
+
+ /* Now reinitialize the in-core counters we changed. */
+ pag = sc->sa.pag;
+ pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
+ pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
+ pag->pagf_longest = be32_to_cpu(agf->agf_longest);
+ pag->pagf_levels[XFS_BTNUM_BNOi] =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
+ pag->pagf_levels[XFS_BTNUM_CNTi] =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
+ pag->pagf_levels[XFS_BTNUM_RMAPi] =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
+ pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
+ pag->pagf_init = 1;
+
+ return 0;
+}
+
+/* Repair the AGF. v5 filesystems only. */
+int
+xrep_agf(
+ struct xfs_scrub *sc)
+{
+ struct xrep_find_ag_btree fab[XREP_AGF_MAX] = {
+ [XREP_AGF_BNOBT] = {
+ .rmap_owner = XFS_RMAP_OWN_AG,
+ .buf_ops = &xfs_allocbt_buf_ops,
+ .magic = XFS_ABTB_CRC_MAGIC,
+ },
+ [XREP_AGF_CNTBT] = {
+ .rmap_owner = XFS_RMAP_OWN_AG,
+ .buf_ops = &xfs_allocbt_buf_ops,
+ .magic = XFS_ABTC_CRC_MAGIC,
+ },
+ [XREP_AGF_RMAPBT] = {
+ .rmap_owner = XFS_RMAP_OWN_AG,
+ .buf_ops = &xfs_rmapbt_buf_ops,
+ .magic = XFS_RMAP_CRC_MAGIC,
+ },
+ [XREP_AGF_REFCOUNTBT] = {
+ .rmap_owner = XFS_RMAP_OWN_REFC,
+ .buf_ops = &xfs_refcountbt_buf_ops,
+ .magic = XFS_REFC_CRC_MAGIC,
+ },
+ [XREP_AGF_END] = {
+ .buf_ops = NULL,
+ },
+ };
+ struct xfs_agf old_agf;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *agf_bp;
+ struct xfs_buf *agfl_bp;
+ struct xfs_agf *agf;
+ int error;
+
+ /* We require the rmapbt to rebuild anything. */
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ xchk_perag_get(sc->mp, &sc->sa);
+ /*
+ * Make sure we have the AGF buffer, as scrub might have decided it
+ * was corrupt after xfs_alloc_read_agf failed with -EFSCORRUPTED.
+ */
+ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
+ XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGF_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL);
+ if (error)
+ return error;
+ agf_bp->b_ops = &xfs_agf_buf_ops;
+ agf = XFS_BUF_TO_AGF(agf_bp);
+
+ /*
+ * Load the AGFL so that we can screen out OWN_AG blocks that are on
+ * the AGFL now; these blocks might have once been part of the
+ * bno/cnt/rmap btrees but are not now. This is a chicken and egg
+ * problem: the AGF is corrupt, so we have to trust the AGFL contents
+ * because we can't do any serious cross-referencing with any of the
+ * btrees rooted in the AGF. If the AGFL contents are obviously bad
+ * then we'll bail out.
+ */
+ error = xfs_alloc_read_agfl(mp, sc->tp, sc->sa.agno, &agfl_bp);
+ if (error)
+ return error;
+
+ /*
+ * Spot-check the AGFL blocks; if they're obviously corrupt then
+ * there's nothing we can do but bail out.
+ */
+ error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(agf_bp), agfl_bp,
+ xrep_agf_check_agfl_block, sc);
+ if (error)
+ return error;
+
+ /*
+ * Find the AGF btree roots. This is also a chicken-and-egg situation;
+ * see the function for more details.
+ */
+ error = xrep_agf_find_btrees(sc, agf_bp, fab, agfl_bp);
+ if (error)
+ return error;
+
+ /* Start rewriting the header and implant the btrees we found. */
+ xrep_agf_init_header(sc, agf_bp, &old_agf);
+ xrep_agf_set_roots(sc, agf, fab);
+ error = xrep_agf_calc_from_btrees(sc, agf_bp);
+ if (error)
+ goto out_revert;
+
+ /* Commit the changes and reinitialize incore state. */
+ return xrep_agf_commit_new(sc, agf_bp);
+
+out_revert:
+ /* Mark the incore AGF state stale and revert the AGF. */
+ sc->sa.pag->pagf_init = 0;
+ memcpy(agf, &old_agf, sizeof(old_agf));
+ return error;
+}
+
+/* AGFL */
+
+struct xrep_agfl {
+ /* Bitmap of other OWN_AG metadata blocks. */
+ struct xfs_bitmap agmetablocks;
+
+ /* Bitmap of free space. */
+ struct xfs_bitmap *freesp;
+
+ struct xfs_scrub *sc;
+};
+
+/* Record all OWN_AG (free space btree) information from the rmap data. */
+STATIC int
+xrep_agfl_walk_rmap(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xrep_agfl *ra = priv;
+ xfs_fsblock_t fsb;
+ int error = 0;
+
+ if (xchk_should_terminate(ra->sc, &error))
+ return error;
+
+ /* Record all the OWN_AG blocks. */
+ if (rec->rm_owner == XFS_RMAP_OWN_AG) {
+ fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
+ rec->rm_startblock);
+ error = xfs_bitmap_set(ra->freesp, fsb, rec->rm_blockcount);
+ if (error)
+ return error;
+ }
+
+ return xfs_bitmap_set_btcur_path(&ra->agmetablocks, cur);
+}
+
+/*
+ * Map out all the non-AGFL OWN_AG space in this AG so that we can deduce
+ * which blocks belong to the AGFL.
+ *
+ * Compute the set of old AGFL blocks by subtracting from the list of OWN_AG
+ * blocks the list of blocks owned by all other OWN_AG metadata (bnobt, cntbt,
+ * rmapbt). These are the old AGFL blocks, so return that list and the number
+ * of blocks we're actually going to put back on the AGFL.
+ */
+STATIC int
+xrep_agfl_collect_blocks(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp,
+ struct xfs_bitmap *agfl_extents,
+ xfs_agblock_t *flcount)
+{
+ struct xrep_agfl ra;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_btree_cur *cur;
+ struct xfs_bitmap_range *br;
+ struct xfs_bitmap_range *n;
+ int error;
+
+ ra.sc = sc;
+ ra.freesp = agfl_extents;
+ xfs_bitmap_init(&ra.agmetablocks);
+
+ /* Find all space used by the free space btrees & rmapbt. */
+ cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
+ error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+
+ /* Find all blocks currently being used by the bnobt. */
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
+ XFS_BTNUM_BNO);
+ error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+
+ /* Find all blocks currently being used by the cntbt. */
+ cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno,
+ XFS_BTNUM_CNT);
+ error = xfs_bitmap_set_btblocks(&ra.agmetablocks, cur);
+ if (error)
+ goto err;
+
+ xfs_btree_del_cursor(cur, error);
+
+ /*
+ * Drop the freesp meta blocks that are in use by btrees.
+ * The remaining blocks /should/ be AGFL blocks.
+ */
+ error = xfs_bitmap_disunion(agfl_extents, &ra.agmetablocks);
+ xfs_bitmap_destroy(&ra.agmetablocks);
+ if (error)
+ return error;
+
+ /*
+ * Calculate the new AGFL size. If we found more blocks than fit in
+ * the AGFL we'll free them later.
+ */
+ *flcount = 0;
+ for_each_xfs_bitmap_extent(br, n, agfl_extents) {
+ *flcount += br->len;
+ if (*flcount > xfs_agfl_size(mp))
+ break;
+ }
+ if (*flcount > xfs_agfl_size(mp))
+ *flcount = xfs_agfl_size(mp);
+ return 0;
+
+err:
+ xfs_bitmap_destroy(&ra.agmetablocks);
+ xfs_btree_del_cursor(cur, error);
+ return error;
+}
+
+/* Update the AGF and reset the in-core state. */
+STATIC void
+xrep_agfl_update_agf(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agf_bp,
+ xfs_agblock_t flcount)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agf_bp);
+
+ ASSERT(flcount <= xfs_agfl_size(sc->mp));
+
+ /* Trigger fdblocks recalculation */
+ xfs_force_summary_recalc(sc->mp);
+
+ /* Update the AGF counters. */
+ if (sc->sa.pag->pagf_init)
+ sc->sa.pag->pagf_flcount = flcount;
+ agf->agf_flfirst = cpu_to_be32(0);
+ agf->agf_flcount = cpu_to_be32(flcount);
+ agf->agf_fllast = cpu_to_be32(flcount - 1);
+
+ xfs_alloc_log_agf(sc->tp, agf_bp,
+ XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
+}
+
+/* Write out a totally new AGFL. */
+STATIC void
+xrep_agfl_init_header(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agfl_bp,
+ struct xfs_bitmap *agfl_extents,
+ xfs_agblock_t flcount)
+{
+ struct xfs_mount *mp = sc->mp;
+ __be32 *agfl_bno;
+ struct xfs_bitmap_range *br;
+ struct xfs_bitmap_range *n;
+ struct xfs_agfl *agfl;
+ xfs_agblock_t agbno;
+ unsigned int fl_off;
+
+ ASSERT(flcount <= xfs_agfl_size(mp));
+
+ /*
+ * Start rewriting the header by setting the bno[] array to
+ * NULLAGBLOCK, then setting AGFL header fields.
+ */
+ agfl = XFS_BUF_TO_AGFL(agfl_bp);
+ memset(agfl, 0xFF, BBTOB(agfl_bp->b_length));
+ agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
+ agfl->agfl_seqno = cpu_to_be32(sc->sa.agno);
+ uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
+
+ /*
+ * Fill the AGFL with the remaining blocks. If agfl_extents has more
+ * blocks than fit in the AGFL, they will be freed in a subsequent
+ * step.
+ */
+ fl_off = 0;
+ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
+ for_each_xfs_bitmap_extent(br, n, agfl_extents) {
+ agbno = XFS_FSB_TO_AGBNO(mp, br->start);
+
+ trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
+
+ while (br->len > 0 && fl_off < flcount) {
+ agfl_bno[fl_off] = cpu_to_be32(agbno);
+ fl_off++;
+ agbno++;
+
+ /*
+ * We've now used br->start by putting it in the AGFL,
+ * so bump br so that we don't reap the block later.
+ */
+ br->start++;
+ br->len--;
+ }
+
+ if (br->len)
+ break;
+ list_del(&br->list);
+ kmem_free(br);
+ }
+
+ /* Write new AGFL to disk. */
+ xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
+ xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
+}
+
+/* Repair the AGFL. */
+int
+xrep_agfl(
+ struct xfs_scrub *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_bitmap agfl_extents;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *agf_bp;
+ struct xfs_buf *agfl_bp;
+ xfs_agblock_t flcount;
+ int error;
+
+ /* We require the rmapbt to rebuild anything. */
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ xchk_perag_get(sc->mp, &sc->sa);
+ xfs_bitmap_init(&agfl_extents);
+
+ /*
+ * Read the AGF so that we can query the rmapbt. We hope that there's
+ * nothing wrong with the AGF, but all the AG header repair functions
+ * have this chicken-and-egg problem.
+ */
+ error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
+ if (error)
+ return error;
+ if (!agf_bp)
+ return -ENOMEM;
+
+ /*
+ * Make sure we have the AGFL buffer, as scrub might have decided it
+ * was corrupt after xfs_alloc_read_agfl failed with -EFSCORRUPTED.
+ */
+ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
+ XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGFL_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL);
+ if (error)
+ return error;
+ agfl_bp->b_ops = &xfs_agfl_buf_ops;
+
+ /* Gather all the extents we're going to put on the new AGFL. */
+ error = xrep_agfl_collect_blocks(sc, agf_bp, &agfl_extents, &flcount);
+ if (error)
+ goto err;
+
+ /*
+ * Update AGF and AGFL. We reset the global free block counter when
+ * we adjust the AGF flcount (which can fail) so avoid updating any
+ * buffers until we know that part works.
+ */
+ xrep_agfl_update_agf(sc, agf_bp, flcount);
+ xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount);
+
+ /*
+ * Ok, the AGFL should be ready to go now. Roll the transaction to
+ * make the new AGFL permanent before we start using it to return
+ * freespace overflow to the freespace btrees.
+ */
+ sc->sa.agf_bp = agf_bp;
+ sc->sa.agfl_bp = agfl_bp;
+ error = xrep_roll_ag_trans(sc);
+ if (error)
+ goto err;
+
+ /* Dump any AGFL overflow. */
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
+ return xrep_reap_extents(sc, &agfl_extents, &oinfo, XFS_AG_RESV_AGFL);
+err:
+ xfs_bitmap_destroy(&agfl_extents);
+ return error;
+}
+
+/* AGI */
+
+/*
+ * Offset within the xrep_find_ag_btree array for each btree type. Avoid the
+ * XFS_BTNUM_ names here to avoid creating a sparse array.
+ */
+enum {
+ XREP_AGI_INOBT = 0,
+ XREP_AGI_FINOBT,
+ XREP_AGI_END,
+ XREP_AGI_MAX
+};
+
+/*
+ * Given the inode btree roots described by *fab, find the roots, check them
+ * for sanity, and pass the root data back out via *fab.
+ */
+STATIC int
+xrep_agi_find_btrees(
+ struct xfs_scrub *sc,
+ struct xrep_find_ag_btree *fab)
+{
+ struct xfs_buf *agf_bp;
+ struct xfs_mount *mp = sc->mp;
+ int error;
+
+ /* Read the AGF. */
+ error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
+ if (error)
+ return error;
+ if (!agf_bp)
+ return -ENOMEM;
+
+ /* Find the btree roots. */
+ error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
+ if (error)
+ return error;
+
+ /* We must find the inobt root. */
+ if (!xrep_check_btree_root(sc, &fab[XREP_AGI_INOBT]))
+ return -EFSCORRUPTED;
+
+ /* We must find the finobt root if that feature is enabled. */
+ if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
+ !xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT]))
+ return -EFSCORRUPTED;
+
+ return 0;
+}
+
+/*
+ * Reinitialize the AGI header, making an in-core copy of the old contents so
+ * that we know which in-core state needs to be reinitialized.
+ */
+STATIC void
+xrep_agi_init_header(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agi_bp,
+ struct xfs_agi *old_agi)
+{
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+ struct xfs_mount *mp = sc->mp;
+
+ memcpy(old_agi, agi, sizeof(*old_agi));
+ memset(agi, 0, BBTOB(agi_bp->b_length));
+ agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
+ agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
+ agi->agi_seqno = cpu_to_be32(sc->sa.agno);
+ agi->agi_length = cpu_to_be32(xfs_ag_block_count(mp, sc->sa.agno));
+ agi->agi_newino = cpu_to_be32(NULLAGINO);
+ agi->agi_dirino = cpu_to_be32(NULLAGINO);
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
+
+ /* We don't know how to fix the unlinked list yet. */
+ memcpy(&agi->agi_unlinked, &old_agi->agi_unlinked,
+ sizeof(agi->agi_unlinked));
+
+ /* Mark the incore AGF data stale until we're done fixing things. */
+ ASSERT(sc->sa.pag->pagi_init);
+ sc->sa.pag->pagi_init = 0;
+}
+
+/* Set btree root information in an AGI. */
+STATIC void
+xrep_agi_set_roots(
+ struct xfs_scrub *sc,
+ struct xfs_agi *agi,
+ struct xrep_find_ag_btree *fab)
+{
+ agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root);
+ agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height);
+
+ if (xfs_sb_version_hasfinobt(&sc->mp->m_sb)) {
+ agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root);
+ agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height);
+ }
+}
+
+/* Update the AGI counters. */
+STATIC int
+xrep_agi_calc_from_btrees(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agi_bp)
+{
+ struct xfs_btree_cur *cur;
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+ struct xfs_mount *mp = sc->mp;
+ xfs_agino_t count;
+ xfs_agino_t freecount;
+ int error;
+
+ cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp, sc->sa.agno,
+ XFS_BTNUM_INO);
+ error = xfs_ialloc_count_inodes(cur, &count, &freecount);
+ if (error)
+ goto err;
+ xfs_btree_del_cursor(cur, error);
+
+ agi->agi_count = cpu_to_be32(count);
+ agi->agi_freecount = cpu_to_be32(freecount);
+ return 0;
+err:
+ xfs_btree_del_cursor(cur, error);
+ return error;
+}
+
+/* Trigger reinitialization of the in-core data. */
+STATIC int
+xrep_agi_commit_new(
+ struct xfs_scrub *sc,
+ struct xfs_buf *agi_bp)
+{
+ struct xfs_perag *pag;
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(agi_bp);
+
+ /* Trigger inode count recalculation */
+ xfs_force_summary_recalc(sc->mp);
+
+ /* Write this to disk. */
+ xfs_trans_buf_set_type(sc->tp, agi_bp, XFS_BLFT_AGI_BUF);
+ xfs_trans_log_buf(sc->tp, agi_bp, 0, BBTOB(agi_bp->b_length) - 1);
+
+ /* Now reinitialize the in-core counters if necessary. */
+ pag = sc->sa.pag;
+ pag->pagi_count = be32_to_cpu(agi->agi_count);
+ pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
+ pag->pagi_init = 1;
+
+ return 0;
+}
+
+/* Repair the AGI. */
+int
+xrep_agi(
+ struct xfs_scrub *sc)
+{
+ struct xrep_find_ag_btree fab[XREP_AGI_MAX] = {
+ [XREP_AGI_INOBT] = {
+ .rmap_owner = XFS_RMAP_OWN_INOBT,
+ .buf_ops = &xfs_inobt_buf_ops,
+ .magic = XFS_IBT_CRC_MAGIC,
+ },
+ [XREP_AGI_FINOBT] = {
+ .rmap_owner = XFS_RMAP_OWN_INOBT,
+ .buf_ops = &xfs_inobt_buf_ops,
+ .magic = XFS_FIBT_CRC_MAGIC,
+ },
+ [XREP_AGI_END] = {
+ .buf_ops = NULL
+ },
+ };
+ struct xfs_agi old_agi;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *agi_bp;
+ struct xfs_agi *agi;
+ int error;
+
+ /* We require the rmapbt to rebuild anything. */
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ xchk_perag_get(sc->mp, &sc->sa);
+ /*
+ * Make sure we have the AGI buffer, as scrub might have decided it
+ * was corrupt after xfs_ialloc_read_agi failed with -EFSCORRUPTED.
+ */
+ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
+ XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGI_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0, &agi_bp, NULL);
+ if (error)
+ return error;
+ agi_bp->b_ops = &xfs_agi_buf_ops;
+ agi = XFS_BUF_TO_AGI(agi_bp);
+
+ /* Find the AGI btree roots. */
+ error = xrep_agi_find_btrees(sc, fab);
+ if (error)
+ return error;
+
+ /* Start rewriting the header and implant the btrees we found. */
+ xrep_agi_init_header(sc, agi_bp, &old_agi);
+ xrep_agi_set_roots(sc, agi, fab);
+ error = xrep_agi_calc_from_btrees(sc, agi_bp);
+ if (error)
+ goto out_revert;
+
+ /* Reinitialize in-core state. */
+ return xrep_agi_commit_new(sc, agi_bp);
+
+out_revert:
+ /* Mark the incore AGI state stale and revert the AGI. */
+ sc->sa.pag->pagi_init = 0;
+ memcpy(agi, &old_agi, sizeof(old_agi));
+ return error;
+}
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 50e4f7fa06f0..036b5c7021eb 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -28,11 +28,11 @@
* Set us up to scrub free space btrees.
*/
int
-xfs_scrub_setup_ag_allocbt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_ag_allocbt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_ag_btree(sc, ip, false);
+ return xchk_setup_ag_btree(sc, ip, false);
}
/* Free space btree scrubber. */
@@ -41,71 +41,71 @@ xfs_scrub_setup_ag_allocbt(
* bnobt/cntbt record, respectively.
*/
STATIC void
-xfs_scrub_allocbt_xref_other(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_allocbt_xref_other(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- struct xfs_btree_cur **pcur;
- xfs_agblock_t fbno;
- xfs_extlen_t flen;
- int has_otherrec;
- int error;
+ struct xfs_btree_cur **pcur;
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ int has_otherrec;
+ int error;
if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
pcur = &sc->sa.cnt_cur;
else
pcur = &sc->sa.bno_cur;
- if (!*pcur || xfs_scrub_skip_xref(sc->sm))
+ if (!*pcur || xchk_skip_xref(sc->sm))
return;
error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
- if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ if (!xchk_should_check_xref(sc, &error, pcur))
return;
if (!has_otherrec) {
- xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return;
}
error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
- if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ if (!xchk_should_check_xref(sc, &error, pcur))
return;
if (!has_otherrec) {
- xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ xchk_btree_xref_set_corrupt(sc, *pcur, 0);
return;
}
if (fbno != agbno || flen != len)
- xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ xchk_btree_xref_set_corrupt(sc, *pcur, 0);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_allocbt_xref(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_allocbt_xref(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_allocbt_xref_other(sc, agbno, len);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
- xfs_scrub_xref_has_no_owner(sc, agbno, len);
- xfs_scrub_xref_is_not_shared(sc, agbno, len);
+ xchk_allocbt_xref_other(sc, agbno, len);
+ xchk_xref_is_not_inode_chunk(sc, agbno, len);
+ xchk_xref_has_no_owner(sc, agbno, len);
+ xchk_xref_is_not_shared(sc, agbno, len);
}
/* Scrub a bnobt/cntbt record. */
STATIC int
-xfs_scrub_allocbt_rec(
- struct xfs_scrub_btree *bs,
- union xfs_btree_rec *rec)
+xchk_allocbt_rec(
+ struct xchk_btree *bs,
+ union xfs_btree_rec *rec)
{
- struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
- xfs_agblock_t bno;
- xfs_extlen_t len;
- int error = 0;
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ int error = 0;
bno = be32_to_cpu(rec->alloc.ar_startblock);
len = be32_to_cpu(rec->alloc.ar_blockcount);
@@ -113,57 +113,57 @@ xfs_scrub_allocbt_rec(
if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- xfs_scrub_allocbt_xref(bs->sc, bno, len);
+ xchk_allocbt_xref(bs->sc, bno, len);
return error;
}
/* Scrub the freespace btrees for some AG. */
STATIC int
-xfs_scrub_allocbt(
- struct xfs_scrub_context *sc,
- xfs_btnum_t which)
+xchk_allocbt(
+ struct xfs_scrub *sc,
+ xfs_btnum_t which)
{
- struct xfs_owner_info oinfo;
- struct xfs_btree_cur *cur;
+ struct xfs_owner_info oinfo;
+ struct xfs_btree_cur *cur;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
- return xfs_scrub_btree(sc, cur, xfs_scrub_allocbt_rec, &oinfo, NULL);
+ return xchk_btree(sc, cur, xchk_allocbt_rec, &oinfo, NULL);
}
int
-xfs_scrub_bnobt(
- struct xfs_scrub_context *sc)
+xchk_bnobt(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_allocbt(sc, XFS_BTNUM_BNO);
+ return xchk_allocbt(sc, XFS_BTNUM_BNO);
}
int
-xfs_scrub_cntbt(
- struct xfs_scrub_context *sc)
+xchk_cntbt(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT);
+ return xchk_allocbt(sc, XFS_BTNUM_CNT);
}
/* xref check that the extent is not free */
void
-xfs_scrub_xref_is_used_space(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_xref_is_used_space(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- bool is_freesp;
- int error;
+ bool is_freesp;
+ int error;
- if (!sc->sa.bno_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return;
if (is_freesp)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
}
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
index de51cf8a8516..81d5e90547a1 100644
--- a/fs/xfs/scrub/attr.c
+++ b/fs/xfs/scrub/attr.c
@@ -32,11 +32,11 @@
/* Set us up to scrub an inode's extended attributes. */
int
-xfs_scrub_setup_xattr(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_xattr(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- size_t sz;
+ size_t sz;
/*
* Allocate the buffer without the inode lock held. We need enough
@@ -50,14 +50,14 @@ xfs_scrub_setup_xattr(
if (!sc->buf)
return -ENOMEM;
- return xfs_scrub_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, ip, 0);
}
/* Extended Attributes */
-struct xfs_scrub_xattr {
+struct xchk_xattr {
struct xfs_attr_list_context context;
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
};
/*
@@ -69,22 +69,22 @@ struct xfs_scrub_xattr {
* or if we get more or less data than we expected.
*/
static void
-xfs_scrub_xattr_listent(
+xchk_xattr_listent(
struct xfs_attr_list_context *context,
int flags,
unsigned char *name,
int namelen,
int valuelen)
{
- struct xfs_scrub_xattr *sx;
+ struct xchk_xattr *sx;
struct xfs_da_args args = { NULL };
int error = 0;
- sx = container_of(context, struct xfs_scrub_xattr, context);
+ sx = container_of(context, struct xchk_xattr, context);
if (flags & XFS_ATTR_INCOMPLETE) {
/* Incomplete attr key, just mark the inode for preening. */
- xfs_scrub_ino_set_preen(sx->sc, context->dp->i_ino);
+ xchk_ino_set_preen(sx->sc, context->dp->i_ino);
return;
}
@@ -106,11 +106,11 @@ xfs_scrub_xattr_listent(
error = xfs_attr_get_ilocked(context->dp, &args);
if (error == -EEXIST)
error = 0;
- if (!xfs_scrub_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
+ if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
&error))
goto fail_xref;
if (args.valuelen != valuelen)
- xfs_scrub_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
+ xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
args.blkno);
fail_xref:
if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@@ -126,14 +126,14 @@ fail_xref:
* the smallest address
*/
STATIC bool
-xfs_scrub_xattr_set_map(
- struct xfs_scrub_context *sc,
- unsigned long *map,
- unsigned int start,
- unsigned int len)
+xchk_xattr_set_map(
+ struct xfs_scrub *sc,
+ unsigned long *map,
+ unsigned int start,
+ unsigned int len)
{
- unsigned int mapsize = sc->mp->m_attr_geo->blksize;
- bool ret = true;
+ unsigned int mapsize = sc->mp->m_attr_geo->blksize;
+ bool ret = true;
if (start >= mapsize)
return false;
@@ -154,8 +154,8 @@ xfs_scrub_xattr_set_map(
* attr freemap has problems or points to used space.
*/
STATIC bool
-xfs_scrub_xattr_check_freemap(
- struct xfs_scrub_context *sc,
+xchk_xattr_check_freemap(
+ struct xfs_scrub *sc,
unsigned long *map,
struct xfs_attr3_icleaf_hdr *leafhdr)
{
@@ -168,7 +168,7 @@ xfs_scrub_xattr_check_freemap(
freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
bitmap_zero(freemap, mapsize);
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
- if (!xfs_scrub_xattr_set_map(sc, freemap,
+ if (!xchk_xattr_set_map(sc, freemap,
leafhdr->freemap[i].base,
leafhdr->freemap[i].size))
return false;
@@ -184,8 +184,8 @@ xfs_scrub_xattr_check_freemap(
* Returns the number of bytes used for the name/value data.
*/
STATIC void
-xfs_scrub_xattr_entry(
- struct xfs_scrub_da_btree *ds,
+xchk_xattr_entry(
+ struct xchk_da_btree *ds,
int level,
char *buf_end,
struct xfs_attr_leafblock *leaf,
@@ -204,17 +204,17 @@ xfs_scrub_xattr_entry(
unsigned int namesize;
if (ent->pad2 != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
/* Hash values in order? */
if (be32_to_cpu(ent->hashval) < *last_hashval)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
*last_hashval = be32_to_cpu(ent->hashval);
nameidx = be16_to_cpu(ent->nameidx);
if (nameidx < leafhdr->firstused ||
nameidx >= mp->m_attr_geo->blksize) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return;
}
@@ -225,27 +225,27 @@ xfs_scrub_xattr_entry(
be16_to_cpu(lentry->valuelen));
name_end = (char *)lentry + namesize;
if (lentry->namelen == 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
} else {
rentry = xfs_attr3_leaf_name_remote(leaf, idx);
namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
name_end = (char *)rentry + namesize;
if (rentry->namelen == 0 || rentry->valueblk == 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
}
if (name_end > buf_end)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
- if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
- xfs_scrub_da_set_corrupt(ds, level);
+ if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
+ xchk_da_set_corrupt(ds, level);
if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
*usedbytes += namesize;
}
/* Scrub an attribute leaf. */
STATIC int
-xfs_scrub_xattr_block(
- struct xfs_scrub_da_btree *ds,
+xchk_xattr_block(
+ struct xchk_da_btree *ds,
int level)
{
struct xfs_attr3_icleaf_hdr leafhdr;
@@ -275,10 +275,10 @@ xfs_scrub_xattr_block(
if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
leaf->hdr.info.hdr.pad != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
} else {
if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
}
/* Check the leaf header */
@@ -286,44 +286,44 @@ xfs_scrub_xattr_block(
hdrsize = xfs_attr3_leaf_hdr_size(leaf);
if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused > mp->m_attr_geo->blksize)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused < hdrsize)
- xfs_scrub_da_set_corrupt(ds, level);
- if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
+ if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
+ xchk_da_set_corrupt(ds, level);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
entries = xfs_attr3_leaf_entryp(leaf);
if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
/* Mark the leaf entry itself. */
off = (char *)ent - (char *)leaf;
- if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, off,
+ if (!xchk_xattr_set_map(ds->sc, usedmap, off,
sizeof(xfs_attr_leaf_entry_t))) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
/* Check the entry and nameval. */
- xfs_scrub_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
+ xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
usedmap, ent, i, &usedbytes, &last_hashval);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
}
- if (!xfs_scrub_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
- xfs_scrub_da_set_corrupt(ds, level);
+ if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
+ xchk_da_set_corrupt(ds, level);
if (leafhdr.usedbytes != usedbytes)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
out:
return 0;
@@ -331,8 +331,8 @@ out:
/* Scrub a attribute btree record. */
STATIC int
-xfs_scrub_xattr_rec(
- struct xfs_scrub_da_btree *ds,
+xchk_xattr_rec(
+ struct xchk_da_btree *ds,
int level,
void *rec)
{
@@ -352,14 +352,14 @@ xfs_scrub_xattr_rec(
blk = &ds->state->path.blk[level];
/* Check the whole block, if necessary. */
- error = xfs_scrub_xattr_block(ds, level);
+ error = xchk_xattr_block(ds, level);
if (error)
goto out;
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
/* Check the hash of the entry. */
- error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval);
+ error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error)
goto out;
@@ -368,7 +368,7 @@ xfs_scrub_xattr_rec(
hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
nameidx = be16_to_cpu(ent->nameidx);
if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
@@ -377,12 +377,12 @@ xfs_scrub_xattr_rec(
badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
XFS_ATTR_INCOMPLETE);
if ((ent->flags & badflags) != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
if (ent->flags & XFS_ATTR_LOCAL) {
lentry = (struct xfs_attr_leaf_name_local *)
(((char *)bp->b_addr) + nameidx);
if (lentry->namelen <= 0) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
@@ -390,13 +390,13 @@ xfs_scrub_xattr_rec(
rentry = (struct xfs_attr_leaf_name_remote *)
(((char *)bp->b_addr) + nameidx);
if (rentry->namelen <= 0) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
}
if (calc_hash != hash)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
out:
return error;
@@ -404,10 +404,10 @@ out:
/* Scrub the extended attribute metadata. */
int
-xfs_scrub_xattr(
- struct xfs_scrub_context *sc)
+xchk_xattr(
+ struct xfs_scrub *sc)
{
- struct xfs_scrub_xattr sx;
+ struct xchk_xattr sx;
struct attrlist_cursor_kern cursor = { 0 };
xfs_dablk_t last_checked = -1U;
int error = 0;
@@ -417,7 +417,7 @@ xfs_scrub_xattr(
memset(&sx, 0, sizeof(sx));
/* Check attribute tree structure */
- error = xfs_scrub_da_btree(sc, XFS_ATTR_FORK, xfs_scrub_xattr_rec,
+ error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
&last_checked);
if (error)
goto out;
@@ -429,7 +429,7 @@ xfs_scrub_xattr(
sx.context.dp = sc->ip;
sx.context.cursor = &cursor;
sx.context.resynch = 1;
- sx.context.put_listent = xfs_scrub_xattr_listent;
+ sx.context.put_listent = xchk_xattr_listent;
sx.context.tp = sc->tp;
sx.context.flags = ATTR_INCOMPLETE;
sx.sc = sc;
@@ -438,7 +438,7 @@ xfs_scrub_xattr(
* Look up every xattr in this file by name.
*
* Use the backend implementation of xfs_attr_list to call
- * xfs_scrub_xattr_listent on every attribute key in this inode.
+ * xchk_xattr_listent on every attribute key in this inode.
* In other words, we use the same iterator/callback mechanism
* that listattr uses to scrub extended attributes, though in our
* _listent function, we check the value of the attribute.
@@ -451,7 +451,7 @@ xfs_scrub_xattr(
* locking order.
*/
error = xfs_attr_list_int_ilocked(&sx.context);
- if (!xfs_scrub_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
+ if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
goto out;
out:
return error;
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
new file mode 100644
index 000000000000..fdadc9e1dc49
--- /dev/null
+++ b/fs/xfs/scrub/bitmap.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_btree.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+#include "scrub/repair.h"
+#include "scrub/bitmap.h"
+
+/*
+ * Set a range of this bitmap. Caller must ensure the range is not set.
+ *
+ * This is the logical equivalent of bitmap |= mask(start, len).
+ */
+int
+xfs_bitmap_set(
+ struct xfs_bitmap *bitmap,
+ uint64_t start,
+ uint64_t len)
+{
+ struct xfs_bitmap_range *bmr;
+
+ bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
+ if (!bmr)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&bmr->list);
+ bmr->start = start;
+ bmr->len = len;
+ list_add_tail(&bmr->list, &bitmap->list);
+
+ return 0;
+}
+
+/* Free everything related to this bitmap. */
+void
+xfs_bitmap_destroy(
+ struct xfs_bitmap *bitmap)
+{
+ struct xfs_bitmap_range *bmr;
+ struct xfs_bitmap_range *n;
+
+ for_each_xfs_bitmap_extent(bmr, n, bitmap) {
+ list_del(&bmr->list);
+ kmem_free(bmr);
+ }
+}
+
+/* Set up a per-AG block bitmap. */
+void
+xfs_bitmap_init(
+ struct xfs_bitmap *bitmap)
+{
+ INIT_LIST_HEAD(&bitmap->list);
+}
+
+/* Compare two btree extents. */
+static int
+xfs_bitmap_range_cmp(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_bitmap_range *ap;
+ struct xfs_bitmap_range *bp;
+
+ ap = container_of(a, struct xfs_bitmap_range, list);
+ bp = container_of(b, struct xfs_bitmap_range, list);
+
+ if (ap->start > bp->start)
+ return 1;
+ if (ap->start < bp->start)
+ return -1;
+ return 0;
+}
+
+/*
+ * Remove all the blocks mentioned in @sub from the extents in @bitmap.
+ *
+ * The intent is that callers will iterate the rmapbt for all of its records
+ * for a given owner to generate @bitmap; and iterate all the blocks of the
+ * metadata structures that are not being rebuilt and have the same rmapbt
+ * owner to generate @sub. This routine subtracts all the extents
+ * mentioned in sub from all the extents linked in @bitmap, which leaves
+ * @bitmap as the list of blocks that are not accounted for, which we assume
+ * are the dead blocks of the old metadata structure. The blocks mentioned in
+ * @bitmap can be reaped.
+ *
+ * This is the logical equivalent of bitmap &= ~sub.
+ */
+#define LEFT_ALIGNED (1 << 0)
+#define RIGHT_ALIGNED (1 << 1)
+int
+xfs_bitmap_disunion(
+ struct xfs_bitmap *bitmap,
+ struct xfs_bitmap *sub)
+{
+ struct list_head *lp;
+ struct xfs_bitmap_range *br;
+ struct xfs_bitmap_range *new_br;
+ struct xfs_bitmap_range *sub_br;
+ uint64_t sub_start;
+ uint64_t sub_len;
+ int state;
+ int error = 0;
+
+ if (list_empty(&bitmap->list) || list_empty(&sub->list))
+ return 0;
+ ASSERT(!list_empty(&sub->list));
+
+ list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
+ list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
+
+ /*
+ * Now that we've sorted both lists, we iterate bitmap once, rolling
+ * forward through sub and/or bitmap as necessary until we find an
+ * overlap or reach the end of either list. We do not reset lp to the
+ * head of bitmap nor do we reset sub_br to the head of sub. The
+ * list traversal is similar to merge sort, but we're deleting
+ * instead. In this manner we avoid O(n^2) operations.
+ */
+ sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
+ list);
+ lp = bitmap->list.next;
+ while (lp != &bitmap->list) {
+ br = list_entry(lp, struct xfs_bitmap_range, list);
+
+ /*
+ * Advance sub_br and/or br until we find a pair that
+ * intersect or we run out of extents.
+ */
+ while (sub_br->start + sub_br->len <= br->start) {
+ if (list_is_last(&sub_br->list, &sub->list))
+ goto out;
+ sub_br = list_next_entry(sub_br, list);
+ }
+ if (sub_br->start >= br->start + br->len) {
+ lp = lp->next;
+ continue;
+ }
+
+ /* trim sub_br to fit the extent we have */
+ sub_start = sub_br->start;
+ sub_len = sub_br->len;
+ if (sub_br->start < br->start) {
+ sub_len -= br->start - sub_br->start;
+ sub_start = br->start;
+ }
+ if (sub_len > br->len)
+ sub_len = br->len;
+
+ state = 0;
+ if (sub_start == br->start)
+ state |= LEFT_ALIGNED;
+ if (sub_start + sub_len == br->start + br->len)
+ state |= RIGHT_ALIGNED;
+ switch (state) {
+ case LEFT_ALIGNED:
+ /* Coincides with only the left. */
+ br->start += sub_len;
+ br->len -= sub_len;
+ break;
+ case RIGHT_ALIGNED:
+ /* Coincides with only the right. */
+ br->len -= sub_len;
+ lp = lp->next;
+ break;
+ case LEFT_ALIGNED | RIGHT_ALIGNED:
+ /* Total overlap, just delete ex. */
+ lp = lp->next;
+ list_del(&br->list);
+ kmem_free(br);
+ break;
+ case 0:
+ /*
+ * Deleting from the middle: add the new right extent
+ * and then shrink the left extent.
+ */
+ new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
+ KM_MAYFAIL);
+ if (!new_br) {
+ error = -ENOMEM;
+ goto out;
+ }
+ INIT_LIST_HEAD(&new_br->list);
+ new_br->start = sub_start + sub_len;
+ new_br->len = br->start + br->len - new_br->start;
+ list_add(&new_br->list, &br->list);
+ br->len = sub_start - br->start;
+ lp = lp->next;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+out:
+ return error;
+}
+#undef LEFT_ALIGNED
+#undef RIGHT_ALIGNED
+
+/*
+ * Record all btree blocks seen while iterating all records of a btree.
+ *
+ * We know that the btree query_all function starts at the left edge and walks
+ * towards the right edge of the tree. Therefore, we know that we can walk up
+ * the btree cursor towards the root; if the pointer for a given level points
+ * to the first record/key in that block, we haven't seen this block before;
+ * and therefore we need to remember that we saw this block in the btree.
+ *
+ * So if our btree is:
+ *
+ * 4
+ * / | \
+ * 1 2 3
+ *
+ * Pretend for this example that each leaf block has 100 btree records. For
+ * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
+ * that we saw block 1. Then we observe that bc_ptrs[1] == 1, so we record
+ * block 4. The list is [1, 4].
+ *
+ * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
+ * loop. The list remains [1, 4].
+ *
+ * For the 101st btree record, we've moved onto leaf block 2. Now
+ * bc_ptrs[0] == 1 again, so we record that we saw block 2. We see that
+ * bc_ptrs[1] == 2, so we exit the loop. The list is now [1, 4, 2].
+ *
+ * For the 102nd record, bc_ptrs[0] == 2, so we continue.
+ *
+ * For the 201st record, we've moved on to leaf block 3. bc_ptrs[0] == 1, so
+ * we add 3 to the list. Now it is [1, 4, 2, 3].
+ *
+ * For the 300th record we just exit, with the list being [1, 4, 2, 3].
+ */
+
+/*
+ * Record all the buffers pointed to by the btree cursor. Callers already
+ * engaged in a btree walk should call this function to capture the list of
+ * blocks going from the leaf towards the root.
+ */
+int
+xfs_bitmap_set_btcur_path(
+ struct xfs_bitmap *bitmap,
+ struct xfs_btree_cur *cur)
+{
+ struct xfs_buf *bp;
+ xfs_fsblock_t fsb;
+ int i;
+ int error;
+
+ for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
+ xfs_btree_get_block(cur, i, &bp);
+ if (!bp)
+ continue;
+ fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
+ error = xfs_bitmap_set(bitmap, fsb, 1);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/* Collect a btree's block in the bitmap. */
+STATIC int
+xfs_bitmap_collect_btblock(
+ struct xfs_btree_cur *cur,
+ int level,
+ void *priv)
+{
+ struct xfs_bitmap *bitmap = priv;
+ struct xfs_buf *bp;
+ xfs_fsblock_t fsbno;
+
+ xfs_btree_get_block(cur, level, &bp);
+ if (!bp)
+ return 0;
+
+ fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
+ return xfs_bitmap_set(bitmap, fsbno, 1);
+}
+
+/* Walk the btree and mark the bitmap wherever a btree block is found. */
+int
+xfs_bitmap_set_btblocks(
+ struct xfs_bitmap *bitmap,
+ struct xfs_btree_cur *cur)
+{
+ return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
+}
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
new file mode 100644
index 000000000000..ae8ecbce6fa6
--- /dev/null
+++ b/fs/xfs/scrub/bitmap.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#ifndef __XFS_SCRUB_BITMAP_H__
+#define __XFS_SCRUB_BITMAP_H__
+
+struct xfs_bitmap_range {
+ struct list_head list;
+ uint64_t start;
+ uint64_t len;
+};
+
+struct xfs_bitmap {
+ struct list_head list;
+};
+
+void xfs_bitmap_init(struct xfs_bitmap *bitmap);
+void xfs_bitmap_destroy(struct xfs_bitmap *bitmap);
+
+#define for_each_xfs_bitmap_extent(bex, n, bitmap) \
+ list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
+
+#define for_each_xfs_bitmap_block(b, bex, n, bitmap) \
+ list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
+ for ((b) = bex->start; (b) < bex->start + bex->len; (b)++)
+
+int xfs_bitmap_set(struct xfs_bitmap *bitmap, uint64_t start, uint64_t len);
+int xfs_bitmap_disunion(struct xfs_bitmap *bitmap, struct xfs_bitmap *sub);
+int xfs_bitmap_set_btcur_path(struct xfs_bitmap *bitmap,
+ struct xfs_btree_cur *cur);
+int xfs_bitmap_set_btblocks(struct xfs_bitmap *bitmap,
+ struct xfs_btree_cur *cur);
+
+#endif /* __XFS_SCRUB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 3d08589f5c60..e1d11f3223e3 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -33,13 +33,13 @@
/* Set us up with an inode's bmap. */
int
-xfs_scrub_setup_inode_bmap(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_inode_bmap(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- int error;
+ int error;
- error = xfs_scrub_get_inode(sc, ip);
+ error = xchk_get_inode(sc, ip);
if (error)
goto out;
@@ -60,7 +60,7 @@ xfs_scrub_setup_inode_bmap(
}
/* Got the inode, lock it and we're ready to go. */
- error = xfs_scrub_trans_alloc(sc, 0);
+ error = xchk_trans_alloc(sc, 0);
if (error)
goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL;
@@ -78,27 +78,27 @@ out:
* is in btree format.
*/
-struct xfs_scrub_bmap_info {
- struct xfs_scrub_context *sc;
- xfs_fileoff_t lastoff;
- bool is_rt;
- bool is_shared;
- int whichfork;
+struct xchk_bmap_info {
+ struct xfs_scrub *sc;
+ xfs_fileoff_t lastoff;
+ bool is_rt;
+ bool is_shared;
+ int whichfork;
};
/* Look for a corresponding rmap for this irec. */
static inline bool
-xfs_scrub_bmap_get_rmap(
- struct xfs_scrub_bmap_info *info,
- struct xfs_bmbt_irec *irec,
- xfs_agblock_t agbno,
- uint64_t owner,
- struct xfs_rmap_irec *rmap)
+xchk_bmap_get_rmap(
+ struct xchk_bmap_info *info,
+ struct xfs_bmbt_irec *irec,
+ xfs_agblock_t agbno,
+ uint64_t owner,
+ struct xfs_rmap_irec *rmap)
{
- xfs_fileoff_t offset;
- unsigned int rflags = 0;
- int has_rmap;
- int error;
+ xfs_fileoff_t offset;
+ unsigned int rflags = 0;
+ int has_rmap;
+ int error;
if (info->whichfork == XFS_ATTR_FORK)
rflags |= XFS_RMAP_ATTR_FORK;
@@ -120,7 +120,7 @@ xfs_scrub_bmap_get_rmap(
if (info->is_shared) {
error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
owner, offset, rflags, rmap, &has_rmap);
- if (!xfs_scrub_should_check_xref(info->sc, &error,
+ if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur))
return false;
goto out;
@@ -131,36 +131,36 @@ xfs_scrub_bmap_get_rmap(
*/
error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
offset, rflags, &has_rmap);
- if (!xfs_scrub_should_check_xref(info->sc, &error,
+ if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur))
return false;
if (!has_rmap)
goto out;
error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
- if (!xfs_scrub_should_check_xref(info->sc, &error,
+ if (!xchk_should_check_xref(info->sc, &error,
&info->sc->sa.rmap_cur))
return false;
out:
if (!has_rmap)
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
return has_rmap;
}
/* Make sure that we have rmapbt records for this extent. */
STATIC void
-xfs_scrub_bmap_xref_rmap(
- struct xfs_scrub_bmap_info *info,
- struct xfs_bmbt_irec *irec,
- xfs_agblock_t agbno)
+xchk_bmap_xref_rmap(
+ struct xchk_bmap_info *info,
+ struct xfs_bmbt_irec *irec,
+ xfs_agblock_t agbno)
{
- struct xfs_rmap_irec rmap;
- unsigned long long rmap_end;
- uint64_t owner;
+ struct xfs_rmap_irec rmap;
+ unsigned long long rmap_end;
+ uint64_t owner;
- if (!info->sc->sa.rmap_cur || xfs_scrub_skip_xref(info->sc->sm))
+ if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm))
return;
if (info->whichfork == XFS_COW_FORK)
@@ -169,14 +169,14 @@ xfs_scrub_bmap_xref_rmap(
owner = info->sc->ip->i_ino;
/* Find the rmap record for this irec. */
- if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
+ if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap))
return;
/* Check the rmap. */
rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
if (rmap.rm_startblock > agbno ||
agbno + irec->br_blockcount > rmap_end)
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/*
@@ -189,12 +189,12 @@ xfs_scrub_bmap_xref_rmap(
rmap.rm_blockcount;
if (rmap.rm_offset > irec->br_startoff ||
irec->br_startoff + irec->br_blockcount > rmap_end)
- xfs_scrub_fblock_xref_set_corrupt(info->sc,
+ xchk_fblock_xref_set_corrupt(info->sc,
info->whichfork, irec->br_startoff);
}
if (rmap.rm_owner != owner)
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/*
@@ -207,46 +207,46 @@ xfs_scrub_bmap_xref_rmap(
if (owner != XFS_RMAP_OWN_COW &&
irec->br_state == XFS_EXT_UNWRITTEN &&
!(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (info->whichfork == XFS_ATTR_FORK &&
!(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
- xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_xref_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
}
/* Cross-reference a single rtdev extent record. */
STATIC void
-xfs_scrub_bmap_rt_extent_xref(
- struct xfs_scrub_bmap_info *info,
- struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
- struct xfs_bmbt_irec *irec)
+xchk_bmap_rt_extent_xref(
+ struct xchk_bmap_info *info,
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *irec)
{
if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
+ xchk_xref_is_used_rt_space(info->sc, irec->br_startblock,
irec->br_blockcount);
}
/* Cross-reference a single datadev extent record. */
STATIC void
-xfs_scrub_bmap_extent_xref(
- struct xfs_scrub_bmap_info *info,
- struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
- struct xfs_bmbt_irec *irec)
+xchk_bmap_extent_xref(
+ struct xchk_bmap_info *info,
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *irec)
{
- struct xfs_mount *mp = info->sc->mp;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_extlen_t len;
- int error;
+ struct xfs_mount *mp = info->sc->mp;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ int error;
if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
@@ -255,44 +255,44 @@ xfs_scrub_bmap_extent_xref(
agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
len = irec->br_blockcount;
- error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
- if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
+ error = xchk_ag_init(info->sc, agno, &info->sc->sa);
+ if (!xchk_fblock_process_error(info->sc, info->whichfork,
irec->br_startoff, &error))
return;
- xfs_scrub_xref_is_used_space(info->sc, agbno, len);
- xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
- xfs_scrub_bmap_xref_rmap(info, irec, agbno);
+ xchk_xref_is_used_space(info->sc, agbno, len);
+ xchk_xref_is_not_inode_chunk(info->sc, agbno, len);
+ xchk_bmap_xref_rmap(info, irec, agbno);
switch (info->whichfork) {
case XFS_DATA_FORK:
if (xfs_is_reflink_inode(info->sc->ip))
break;
/* fall through */
case XFS_ATTR_FORK:
- xfs_scrub_xref_is_not_shared(info->sc, agbno,
+ xchk_xref_is_not_shared(info->sc, agbno,
irec->br_blockcount);
break;
case XFS_COW_FORK:
- xfs_scrub_xref_is_cow_staging(info->sc, agbno,
+ xchk_xref_is_cow_staging(info->sc, agbno,
irec->br_blockcount);
break;
}
- xfs_scrub_ag_free(info->sc, &info->sc->sa);
+ xchk_ag_free(info->sc, &info->sc->sa);
}
/* Scrub a single extent record. */
STATIC int
-xfs_scrub_bmap_extent(
- struct xfs_inode *ip,
- struct xfs_btree_cur *cur,
- struct xfs_scrub_bmap_info *info,
- struct xfs_bmbt_irec *irec)
+xchk_bmap_extent(
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xchk_bmap_info *info,
+ struct xfs_bmbt_irec *irec)
{
- struct xfs_mount *mp = info->sc->mp;
- struct xfs_buf *bp = NULL;
- xfs_filblks_t end;
- int error = 0;
+ struct xfs_mount *mp = info->sc->mp;
+ struct xfs_buf *bp = NULL;
+ xfs_filblks_t end;
+ int error = 0;
if (cur)
xfs_btree_get_block(cur, 0, &bp);
@@ -302,12 +302,12 @@ xfs_scrub_bmap_extent(
* from the incore list, for which there is no ordering check.
*/
if (irec->br_startoff < info->lastoff)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/* There should never be a "hole" extent in either extent list. */
if (irec->br_startblock == HOLESTARTBLOCK)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/*
@@ -315,40 +315,40 @@ xfs_scrub_bmap_extent(
* in-core extent scan, and we should never see these in the bmbt.
*/
if (isnullstartblock(irec->br_startblock))
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/* Make sure the extent points to a valid place. */
if (irec->br_blockcount > MAXEXTLEN)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
end = irec->br_startblock + irec->br_blockcount - 1;
if (info->is_rt &&
(!xfs_verify_rtbno(mp, irec->br_startblock) ||
!xfs_verify_rtbno(mp, end)))
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (!info->is_rt &&
(!xfs_verify_fsbno(mp, irec->br_startblock) ||
!xfs_verify_fsbno(mp, end) ||
XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
XFS_FSB_TO_AGNO(mp, end)))
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
/* We don't allow unwritten extents on attr forks. */
if (irec->br_state == XFS_EXT_UNWRITTEN &&
info->whichfork == XFS_ATTR_FORK)
- xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ xchk_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (info->is_rt)
- xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
+ xchk_bmap_rt_extent_xref(info, ip, cur, irec);
else
- xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
+ xchk_bmap_extent_xref(info, ip, cur, irec);
info->lastoff = irec->br_startoff + irec->br_blockcount;
return error;
@@ -356,17 +356,17 @@ xfs_scrub_bmap_extent(
/* Scrub a bmbt record. */
STATIC int
-xfs_scrub_bmapbt_rec(
- struct xfs_scrub_btree *bs,
- union xfs_btree_rec *rec)
+xchk_bmapbt_rec(
+ struct xchk_btree *bs,
+ union xfs_btree_rec *rec)
{
- struct xfs_bmbt_irec irec;
- struct xfs_scrub_bmap_info *info = bs->private;
- struct xfs_inode *ip = bs->cur->bc_private.b.ip;
- struct xfs_buf *bp = NULL;
- struct xfs_btree_block *block;
- uint64_t owner;
- int i;
+ struct xfs_bmbt_irec irec;
+ struct xchk_bmap_info *info = bs->private;
+ struct xfs_inode *ip = bs->cur->bc_private.b.ip;
+ struct xfs_buf *bp = NULL;
+ struct xfs_btree_block *block;
+ uint64_t owner;
+ int i;
/*
* Check the owners of the btree blocks up to the level below
@@ -378,54 +378,53 @@ xfs_scrub_bmapbt_rec(
block = xfs_btree_get_block(bs->cur, i, &bp);
owner = be64_to_cpu(block->bb_u.l.bb_owner);
if (owner != ip->i_ino)
- xfs_scrub_fblock_set_corrupt(bs->sc,
+ xchk_fblock_set_corrupt(bs->sc,
info->whichfork, 0);
}
}
/* Set up the in-core record and scrub it. */
xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
- return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
+ return xchk_bmap_extent(ip, bs->cur, info, &irec);
}
/* Scan the btree records. */
STATIC int
-xfs_scrub_bmap_btree(
- struct xfs_scrub_context *sc,
- int whichfork,
- struct xfs_scrub_bmap_info *info)
+xchk_bmap_btree(
+ struct xfs_scrub *sc,
+ int whichfork,
+ struct xchk_bmap_info *info)
{
- struct xfs_owner_info oinfo;
- struct xfs_mount *mp = sc->mp;
- struct xfs_inode *ip = sc->ip;
- struct xfs_btree_cur *cur;
- int error;
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_btree_cur *cur;
+ int error;
cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
- error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR :
- XFS_BTREE_NOERROR);
+ error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
+ xfs_btree_del_cursor(cur, error);
return error;
}
-struct xfs_scrub_bmap_check_rmap_info {
- struct xfs_scrub_context *sc;
- int whichfork;
- struct xfs_iext_cursor icur;
+struct xchk_bmap_check_rmap_info {
+ struct xfs_scrub *sc;
+ int whichfork;
+ struct xfs_iext_cursor icur;
};
/* Can we find bmaps that fit this rmap? */
STATIC int
-xfs_scrub_bmap_check_rmap(
+xchk_bmap_check_rmap(
struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec,
void *priv)
{
struct xfs_bmbt_irec irec;
- struct xfs_scrub_bmap_check_rmap_info *sbcri = priv;
+ struct xchk_bmap_check_rmap_info *sbcri = priv;
struct xfs_ifork *ifp;
- struct xfs_scrub_context *sc = sbcri->sc;
+ struct xfs_scrub *sc = sbcri->sc;
bool have_map;
/* Is this even the right fork? */
@@ -440,14 +439,14 @@ xfs_scrub_bmap_check_rmap(
/* Now look up the bmbt record. */
ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
if (!ifp) {
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
goto out;
}
have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
&sbcri->icur, &irec);
if (!have_map)
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
/*
* bmap extent record lengths are constrained to 2^21 blocks in length
@@ -458,14 +457,14 @@ xfs_scrub_bmap_check_rmap(
*/
while (have_map) {
if (irec.br_startoff != rec->rm_offset)
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
cur->bc_private.a.agno, rec->rm_startblock))
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (irec.br_blockcount > rec->rm_blockcount)
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
break;
@@ -476,7 +475,7 @@ xfs_scrub_bmap_check_rmap(
break;
have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
if (!have_map)
- xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
+ xchk_fblock_set_corrupt(sc, sbcri->whichfork,
rec->rm_offset);
}
@@ -488,12 +487,12 @@ out:
/* Make sure each rmap has a corresponding bmbt entry. */
STATIC int
-xfs_scrub_bmap_check_ag_rmaps(
- struct xfs_scrub_context *sc,
+xchk_bmap_check_ag_rmaps(
+ struct xfs_scrub *sc,
int whichfork,
xfs_agnumber_t agno)
{
- struct xfs_scrub_bmap_check_rmap_info sbcri;
+ struct xchk_bmap_check_rmap_info sbcri;
struct xfs_btree_cur *cur;
struct xfs_buf *agf;
int error;
@@ -510,11 +509,11 @@ xfs_scrub_bmap_check_ag_rmaps(
sbcri.sc = sc;
sbcri.whichfork = whichfork;
- error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri);
+ error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
if (error == XFS_BTREE_QUERY_RANGE_ABORT)
error = 0;
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
out_agf:
xfs_trans_brelse(sc->tp, agf);
return error;
@@ -522,13 +521,13 @@ out_agf:
/* Make sure each rmap has a corresponding bmbt entry. */
STATIC int
-xfs_scrub_bmap_check_rmaps(
- struct xfs_scrub_context *sc,
- int whichfork)
+xchk_bmap_check_rmaps(
+ struct xfs_scrub *sc,
+ int whichfork)
{
- loff_t size;
- xfs_agnumber_t agno;
- int error;
+ loff_t size;
+ xfs_agnumber_t agno;
+ int error;
if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
whichfork == XFS_COW_FORK ||
@@ -562,7 +561,7 @@ xfs_scrub_bmap_check_rmaps(
return 0;
for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
- error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno);
+ error = xchk_bmap_check_ag_rmaps(sc, whichfork, agno);
if (error)
return error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@@ -579,18 +578,18 @@ xfs_scrub_bmap_check_rmaps(
* Then we unconditionally scan the incore extent cache.
*/
STATIC int
-xfs_scrub_bmap(
- struct xfs_scrub_context *sc,
- int whichfork)
+xchk_bmap(
+ struct xfs_scrub *sc,
+ int whichfork)
{
- struct xfs_bmbt_irec irec;
- struct xfs_scrub_bmap_info info = { NULL };
- struct xfs_mount *mp = sc->mp;
- struct xfs_inode *ip = sc->ip;
- struct xfs_ifork *ifp;
- xfs_fileoff_t endoff;
- struct xfs_iext_cursor icur;
- int error = 0;
+ struct xfs_bmbt_irec irec;
+ struct xchk_bmap_info info = { NULL };
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_ifork *ifp;
+ xfs_fileoff_t endoff;
+ struct xfs_iext_cursor icur;
+ int error = 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -606,7 +605,7 @@ xfs_scrub_bmap(
goto out;
/* No CoW forks on non-reflink inodes/filesystems. */
if (!xfs_is_reflink_inode(ip)) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
goto out;
}
break;
@@ -615,7 +614,7 @@ xfs_scrub_bmap(
goto out_check_rmap;
if (!xfs_sb_version_hasattr(&mp->m_sb) &&
!xfs_sb_version_hasattr2(&mp->m_sb))
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
break;
default:
ASSERT(whichfork == XFS_DATA_FORK);
@@ -631,22 +630,22 @@ xfs_scrub_bmap(
goto out;
case XFS_DINODE_FMT_EXTENTS:
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out;
}
break;
case XFS_DINODE_FMT_BTREE:
if (whichfork == XFS_COW_FORK) {
- xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out;
}
- error = xfs_scrub_bmap_btree(sc, whichfork, &info);
+ error = xchk_bmap_btree(sc, whichfork, &info);
if (error)
goto out;
break;
default:
- xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ xchk_fblock_set_corrupt(sc, whichfork, 0);
goto out;
}
@@ -656,37 +655,37 @@ xfs_scrub_bmap(
/* Now try to scrub the in-memory extent list. */
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(sc->tp, ip, whichfork);
- if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
+ if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
goto out;
}
/* Find the offset of the last extent in the mapping. */
error = xfs_bmap_last_offset(ip, &endoff, whichfork);
- if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
+ if (!xchk_fblock_process_error(sc, whichfork, 0, &error))
goto out;
/* Scrub extent records. */
info.lastoff = 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
for_each_xfs_iext(ifp, &icur, &irec) {
- if (xfs_scrub_should_terminate(sc, &error) ||
+ if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break;
if (isnullstartblock(irec.br_startblock))
continue;
if (irec.br_startoff >= endoff) {
- xfs_scrub_fblock_set_corrupt(sc, whichfork,
+ xchk_fblock_set_corrupt(sc, whichfork,
irec.br_startoff);
goto out;
}
- error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec);
+ error = xchk_bmap_extent(ip, NULL, &info, &irec);
if (error)
goto out;
}
out_check_rmap:
- error = xfs_scrub_bmap_check_rmaps(sc, whichfork);
- if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error))
+ error = xchk_bmap_check_rmaps(sc, whichfork);
+ if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
goto out;
out:
return error;
@@ -694,27 +693,27 @@ out:
/* Scrub an inode's data fork. */
int
-xfs_scrub_bmap_data(
- struct xfs_scrub_context *sc)
+xchk_bmap_data(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_bmap(sc, XFS_DATA_FORK);
+ return xchk_bmap(sc, XFS_DATA_FORK);
}
/* Scrub an inode's attr fork. */
int
-xfs_scrub_bmap_attr(
- struct xfs_scrub_context *sc)
+xchk_bmap_attr(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_bmap(sc, XFS_ATTR_FORK);
+ return xchk_bmap(sc, XFS_ATTR_FORK);
}
/* Scrub an inode's CoW fork. */
int
-xfs_scrub_bmap_cow(
- struct xfs_scrub_context *sc)
+xchk_bmap_cow(
+ struct xfs_scrub *sc)
{
if (!xfs_is_reflink_inode(sc->ip))
return -ENOENT;
- return xfs_scrub_bmap(sc, XFS_COW_FORK);
+ return xchk_bmap(sc, XFS_COW_FORK);
}
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 5b472045f036..4ae959f7ad2c 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -29,13 +29,13 @@
* operational errors in common.c.
*/
static bool
-__xfs_scrub_btree_process_error(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level,
- int *error,
- __u32 errflag,
- void *ret_ip)
+__xchk_btree_process_error(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
if (*error == 0)
return true;
@@ -43,7 +43,7 @@ __xfs_scrub_btree_process_error(
switch (*error) {
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
@@ -53,10 +53,10 @@ __xfs_scrub_btree_process_error(
/* fall through */
default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
- trace_xfs_scrub_ifork_btree_op_error(sc, cur, level,
+ trace_xchk_ifork_btree_op_error(sc, cur, level,
*error, ret_ip);
else
- trace_xfs_scrub_btree_op_error(sc, cur, level,
+ trace_xchk_btree_op_error(sc, cur, level,
*error, ret_ip);
break;
}
@@ -64,63 +64,63 @@ __xfs_scrub_btree_process_error(
}
bool
-xfs_scrub_btree_process_error(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level,
- int *error)
+xchk_btree_process_error(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
{
- return __xfs_scrub_btree_process_error(sc, cur, level, error,
+ return __xchk_btree_process_error(sc, cur, level, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
-xfs_scrub_btree_xref_process_error(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level,
- int *error)
+xchk_btree_xref_process_error(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
{
- return __xfs_scrub_btree_process_error(sc, cur, level, error,
+ return __xchk_btree_process_error(sc, cur, level, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address);
}
/* Record btree block corruption. */
static void
-__xfs_scrub_btree_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level,
- __u32 errflag,
- void *ret_ip)
+__xchk_btree_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ __u32 errflag,
+ void *ret_ip)
{
sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
- trace_xfs_scrub_ifork_btree_error(sc, cur, level,
+ trace_xchk_ifork_btree_error(sc, cur, level,
ret_ip);
else
- trace_xfs_scrub_btree_error(sc, cur, level,
+ trace_xchk_btree_error(sc, cur, level,
ret_ip);
}
void
-xfs_scrub_btree_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level)
+xchk_btree_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level)
{
- __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
+ __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
__return_address);
}
void
-xfs_scrub_btree_xref_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- int level)
+xchk_btree_xref_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ int level)
{
- __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
+ __xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
__return_address);
}
@@ -129,8 +129,8 @@ xfs_scrub_btree_xref_set_corrupt(
* keys.
*/
STATIC void
-xfs_scrub_btree_rec(
- struct xfs_scrub_btree *bs)
+xchk_btree_rec(
+ struct xchk_btree *bs)
{
struct xfs_btree_cur *cur = bs->cur;
union xfs_btree_rec *rec;
@@ -144,11 +144,11 @@ xfs_scrub_btree_rec(
block = xfs_btree_get_block(cur, 0, &bp);
rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
- trace_xfs_scrub_btree_rec(bs->sc, cur, 0);
+ trace_xchk_btree_rec(bs->sc, cur, 0);
/* If this isn't the first record, are they in order? */
if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 0);
+ xchk_btree_set_corrupt(bs->sc, cur, 0);
bs->firstrec = false;
memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
@@ -160,7 +160,7 @@ xfs_scrub_btree_rec(
keyblock = xfs_btree_get_block(cur, 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+ xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return;
@@ -169,7 +169,7 @@ xfs_scrub_btree_rec(
cur->bc_ops->init_high_key_from_rec(&hkey, rec);
keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+ xchk_btree_set_corrupt(bs->sc, cur, 1);
}
/*
@@ -177,8 +177,8 @@ xfs_scrub_btree_rec(
* keys.
*/
STATIC void
-xfs_scrub_btree_key(
- struct xfs_scrub_btree *bs,
+xchk_btree_key(
+ struct xchk_btree *bs,
int level)
{
struct xfs_btree_cur *cur = bs->cur;
@@ -191,12 +191,12 @@ xfs_scrub_btree_key(
block = xfs_btree_get_block(cur, level, &bp);
key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
- trace_xfs_scrub_btree_key(bs->sc, cur, level);
+ trace_xchk_btree_key(bs->sc, cur, level);
/* If this isn't the first key, are they in order? */
if (!bs->firstkey[level] &&
!cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
bs->firstkey[level] = false;
memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
@@ -207,7 +207,7 @@ xfs_scrub_btree_key(
keyblock = xfs_btree_get_block(cur, level + 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return;
@@ -216,7 +216,7 @@ xfs_scrub_btree_key(
key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
}
/*
@@ -224,12 +224,12 @@ xfs_scrub_btree_key(
* Callers do not need to set the corrupt flag.
*/
static bool
-xfs_scrub_btree_ptr_ok(
- struct xfs_scrub_btree *bs,
- int level,
- union xfs_btree_ptr *ptr)
+xchk_btree_ptr_ok(
+ struct xchk_btree *bs,
+ int level,
+ union xfs_btree_ptr *ptr)
{
- bool res;
+ bool res;
/* A btree rooted in an inode has no block pointer to the root. */
if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
@@ -242,29 +242,29 @@ xfs_scrub_btree_ptr_ok(
else
res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
if (!res)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return res;
}
/* Check that a btree block's sibling matches what we expect it. */
STATIC int
-xfs_scrub_btree_block_check_sibling(
- struct xfs_scrub_btree *bs,
- int level,
- int direction,
- union xfs_btree_ptr *sibling)
+xchk_btree_block_check_sibling(
+ struct xchk_btree *bs,
+ int level,
+ int direction,
+ union xfs_btree_ptr *sibling)
{
- struct xfs_btree_cur *cur = bs->cur;
- struct xfs_btree_block *pblock;
- struct xfs_buf *pbp;
- struct xfs_btree_cur *ncur = NULL;
- union xfs_btree_ptr *pp;
- int success;
- int error;
+ struct xfs_btree_cur *cur = bs->cur;
+ struct xfs_btree_block *pblock;
+ struct xfs_buf *pbp;
+ struct xfs_btree_cur *ncur = NULL;
+ union xfs_btree_ptr *pp;
+ int success;
+ int error;
error = xfs_btree_dup_cursor(cur, &ncur);
- if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error) ||
+ if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error) ||
!ncur)
return error;
@@ -278,7 +278,7 @@ xfs_scrub_btree_block_check_sibling(
else
error = xfs_btree_decrement(ncur, level + 1, &success);
if (error == 0 && success)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
error = 0;
goto out;
}
@@ -288,23 +288,23 @@ xfs_scrub_btree_block_check_sibling(
error = xfs_btree_increment(ncur, level + 1, &success);
else
error = xfs_btree_decrement(ncur, level + 1, &success);
- if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error))
+ if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error))
goto out;
if (!success) {
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level + 1);
+ xchk_btree_set_corrupt(bs->sc, cur, level + 1);
goto out;
}
/* Compare upper level pointer to sibling pointer. */
pblock = xfs_btree_get_block(ncur, level + 1, &pbp);
pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
- if (!xfs_scrub_btree_ptr_ok(bs, level + 1, pp))
+ if (!xchk_btree_ptr_ok(bs, level + 1, pp))
goto out;
if (pbp)
- xfs_scrub_buffer_recheck(bs->sc, pbp);
+ xchk_buffer_recheck(bs->sc, pbp);
if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
out:
xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR);
return error;
@@ -312,15 +312,15 @@ out:
/* Check the siblings of a btree block. */
STATIC int
-xfs_scrub_btree_block_check_siblings(
- struct xfs_scrub_btree *bs,
- struct xfs_btree_block *block)
+xchk_btree_block_check_siblings(
+ struct xchk_btree *bs,
+ struct xfs_btree_block *block)
{
- struct xfs_btree_cur *cur = bs->cur;
- union xfs_btree_ptr leftsib;
- union xfs_btree_ptr rightsib;
- int level;
- int error = 0;
+ struct xfs_btree_cur *cur = bs->cur;
+ union xfs_btree_ptr leftsib;
+ union xfs_btree_ptr rightsib;
+ int level;
+ int error = 0;
xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB);
xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB);
@@ -330,7 +330,7 @@ xfs_scrub_btree_block_check_siblings(
if (level == cur->bc_nlevels - 1) {
if (!xfs_btree_ptr_is_null(cur, &leftsib) ||
!xfs_btree_ptr_is_null(cur, &rightsib))
- xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ xchk_btree_set_corrupt(bs->sc, cur, level);
goto out;
}
@@ -339,10 +339,10 @@ xfs_scrub_btree_block_check_siblings(
* parent level pointers?
* (These function absorbs error codes for us.)
*/
- error = xfs_scrub_btree_block_check_sibling(bs, level, -1, &leftsib);
+ error = xchk_btree_block_check_sibling(bs, level, -1, &leftsib);
if (error)
return error;
- error = xfs_scrub_btree_block_check_sibling(bs, level, 1, &rightsib);
+ error = xchk_btree_block_check_sibling(bs, level, 1, &rightsib);
if (error)
return error;
out:
@@ -360,16 +360,16 @@ struct check_owner {
* an rmap record for it.
*/
STATIC int
-xfs_scrub_btree_check_block_owner(
- struct xfs_scrub_btree *bs,
- int level,
- xfs_daddr_t daddr)
+xchk_btree_check_block_owner(
+ struct xchk_btree *bs,
+ int level,
+ xfs_daddr_t daddr)
{
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_btnum_t btnum;
- bool init_sa;
- int error = 0;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_btnum_t btnum;
+ bool init_sa;
+ int error = 0;
if (!bs->cur)
return 0;
@@ -380,13 +380,13 @@ xfs_scrub_btree_check_block_owner(
init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
if (init_sa) {
- error = xfs_scrub_ag_init(bs->sc, agno, &bs->sc->sa);
- if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur,
+ error = xchk_ag_init(bs->sc, agno, &bs->sc->sa);
+ if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
level, &error))
return error;
}
- xfs_scrub_xref_is_used_space(bs->sc, agbno, 1);
+ xchk_xref_is_used_space(bs->sc, agbno, 1);
/*
* The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we
* have to nullify it (to shut down further block owner checks) if
@@ -395,25 +395,25 @@ xfs_scrub_btree_check_block_owner(
if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
bs->cur = NULL;
- xfs_scrub_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
+ xchk_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
bs->cur = NULL;
if (init_sa)
- xfs_scrub_ag_free(bs->sc, &bs->sc->sa);
+ xchk_ag_free(bs->sc, &bs->sc->sa);
return error;
}
/* Check the owner of a btree block. */
STATIC int
-xfs_scrub_btree_check_owner(
- struct xfs_scrub_btree *bs,
- int level,
- struct xfs_buf *bp)
+xchk_btree_check_owner(
+ struct xchk_btree *bs,
+ int level,
+ struct xfs_buf *bp)
{
- struct xfs_btree_cur *cur = bs->cur;
- struct check_owner *co;
+ struct xfs_btree_cur *cur = bs->cur;
+ struct check_owner *co;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
return 0;
@@ -437,7 +437,7 @@ xfs_scrub_btree_check_owner(
return 0;
}
- return xfs_scrub_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
+ return xchk_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
}
/*
@@ -445,8 +445,8 @@ xfs_scrub_btree_check_owner(
* special blocks that don't require that.
*/
STATIC void
-xfs_scrub_btree_check_minrecs(
- struct xfs_scrub_btree *bs,
+xchk_btree_check_minrecs(
+ struct xchk_btree *bs,
int level,
struct xfs_btree_block *block)
{
@@ -475,7 +475,7 @@ xfs_scrub_btree_check_minrecs(
if (level >= ok_level)
return;
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, level);
}
/*
@@ -483,21 +483,21 @@ xfs_scrub_btree_check_minrecs(
* and buffer pointers (if applicable) if they're ok to use.
*/
STATIC int
-xfs_scrub_btree_get_block(
- struct xfs_scrub_btree *bs,
- int level,
- union xfs_btree_ptr *pp,
- struct xfs_btree_block **pblock,
- struct xfs_buf **pbp)
+xchk_btree_get_block(
+ struct xchk_btree *bs,
+ int level,
+ union xfs_btree_ptr *pp,
+ struct xfs_btree_block **pblock,
+ struct xfs_buf **pbp)
{
- void *failed_at;
- int error;
+ xfs_failaddr_t failed_at;
+ int error;
*pblock = NULL;
*pbp = NULL;
error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock);
- if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, level, &error) ||
+ if (!xchk_btree_process_error(bs->sc, bs->cur, level, &error) ||
!*pblock)
return error;
@@ -509,19 +509,19 @@ xfs_scrub_btree_get_block(
failed_at = __xfs_btree_check_sblock(bs->cur, *pblock,
level, *pbp);
if (failed_at) {
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0;
}
if (*pbp)
- xfs_scrub_buffer_recheck(bs->sc, *pbp);
+ xchk_buffer_recheck(bs->sc, *pbp);
- xfs_scrub_btree_check_minrecs(bs, level, *pblock);
+ xchk_btree_check_minrecs(bs, level, *pblock);
/*
* Check the block's owner; this function absorbs error codes
* for us.
*/
- error = xfs_scrub_btree_check_owner(bs, level, *pbp);
+ error = xchk_btree_check_owner(bs, level, *pbp);
if (error)
return error;
@@ -529,7 +529,7 @@ xfs_scrub_btree_get_block(
* Check the block's siblings; this function absorbs error codes
* for us.
*/
- return xfs_scrub_btree_block_check_siblings(bs, *pblock);
+ return xchk_btree_block_check_siblings(bs, *pblock);
}
/*
@@ -537,18 +537,18 @@ xfs_scrub_btree_get_block(
* in the parent block.
*/
STATIC void
-xfs_scrub_btree_block_keys(
- struct xfs_scrub_btree *bs,
- int level,
- struct xfs_btree_block *block)
+xchk_btree_block_keys(
+ struct xchk_btree *bs,
+ int level,
+ struct xfs_btree_block *block)
{
- union xfs_btree_key block_keys;
- struct xfs_btree_cur *cur = bs->cur;
- union xfs_btree_key *high_bk;
- union xfs_btree_key *parent_keys;
- union xfs_btree_key *high_pk;
- struct xfs_btree_block *parent_block;
- struct xfs_buf *bp;
+ union xfs_btree_key block_keys;
+ struct xfs_btree_cur *cur = bs->cur;
+ union xfs_btree_key *high_bk;
+ union xfs_btree_key *parent_keys;
+ union xfs_btree_key *high_pk;
+ struct xfs_btree_block *parent_block;
+ struct xfs_buf *bp;
if (level >= cur->bc_nlevels - 1)
return;
@@ -562,7 +562,7 @@ xfs_scrub_btree_block_keys(
parent_block);
if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+ xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return;
@@ -573,7 +573,7 @@ xfs_scrub_btree_block_keys(
parent_block);
if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0)
- xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+ xchk_btree_set_corrupt(bs->sc, cur, 1);
}
/*
@@ -582,24 +582,24 @@ xfs_scrub_btree_block_keys(
* so that the caller can verify individual records.
*/
int
-xfs_scrub_btree(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- xfs_scrub_btree_rec_fn scrub_fn,
- struct xfs_owner_info *oinfo,
- void *private)
+xchk_btree(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ xchk_btree_rec_fn scrub_fn,
+ struct xfs_owner_info *oinfo,
+ void *private)
{
- struct xfs_scrub_btree bs = { NULL };
- union xfs_btree_ptr ptr;
- union xfs_btree_ptr *pp;
- union xfs_btree_rec *recp;
- struct xfs_btree_block *block;
- int level;
- struct xfs_buf *bp;
- struct check_owner *co;
- struct check_owner *n;
- int i;
- int error = 0;
+ struct xchk_btree bs = { NULL };
+ union xfs_btree_ptr ptr;
+ union xfs_btree_ptr *pp;
+ union xfs_btree_rec *recp;
+ struct xfs_btree_block *block;
+ int level;
+ struct xfs_buf *bp;
+ struct check_owner *co;
+ struct check_owner *n;
+ int i;
+ int error = 0;
/* Initialize scrub state */
bs.cur = cur;
@@ -614,7 +614,7 @@ xfs_scrub_btree(
/* Don't try to check a tree with a height we can't handle. */
if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
- xfs_scrub_btree_set_corrupt(sc, cur, 0);
+ xchk_btree_set_corrupt(sc, cur, 0);
goto out;
}
@@ -624,9 +624,9 @@ xfs_scrub_btree(
*/
level = cur->bc_nlevels - 1;
cur->bc_ops->init_ptr_from_cur(cur, &ptr);
- if (!xfs_scrub_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
+ if (!xchk_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
goto out;
- error = xfs_scrub_btree_get_block(&bs, level, &ptr, &block, &bp);
+ error = xchk_btree_get_block(&bs, level, &ptr, &block, &bp);
if (error || !block)
goto out;
@@ -639,7 +639,7 @@ xfs_scrub_btree(
/* End of leaf, pop back towards the root. */
if (cur->bc_ptrs[level] >
be16_to_cpu(block->bb_numrecs)) {
- xfs_scrub_btree_block_keys(&bs, level, block);
+ xchk_btree_block_keys(&bs, level, block);
if (level < cur->bc_nlevels - 1)
cur->bc_ptrs[level + 1]++;
level++;
@@ -647,14 +647,14 @@ xfs_scrub_btree(
}
/* Records in order for scrub? */
- xfs_scrub_btree_rec(&bs);
+ xchk_btree_rec(&bs);
/* Call out to the record checker. */
recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
error = bs.scrub_rec(&bs, recp);
if (error)
break;
- if (xfs_scrub_should_terminate(sc, &error) ||
+ if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break;
@@ -664,7 +664,7 @@ xfs_scrub_btree(
/* End of node, pop back towards the root. */
if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
- xfs_scrub_btree_block_keys(&bs, level, block);
+ xchk_btree_block_keys(&bs, level, block);
if (level < cur->bc_nlevels - 1)
cur->bc_ptrs[level + 1]++;
level++;
@@ -672,16 +672,16 @@ xfs_scrub_btree(
}
/* Keys in order for scrub? */
- xfs_scrub_btree_key(&bs, level);
+ xchk_btree_key(&bs, level);
/* Drill another level deeper. */
pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
- if (!xfs_scrub_btree_ptr_ok(&bs, level, pp)) {
+ if (!xchk_btree_ptr_ok(&bs, level, pp)) {
cur->bc_ptrs[level]++;
continue;
}
level--;
- error = xfs_scrub_btree_get_block(&bs, level, pp, &block, &bp);
+ error = xchk_btree_get_block(&bs, level, pp, &block, &bp);
if (error || !block)
goto out;
@@ -692,7 +692,7 @@ out:
/* Process deferred owner checks on btree blocks. */
list_for_each_entry_safe(co, n, &bs.to_check, list) {
if (!error && bs.cur)
- error = xfs_scrub_btree_check_block_owner(&bs,
+ error = xchk_btree_check_block_owner(&bs,
co->level, co->daddr);
list_del(&co->list);
kmem_free(co);
diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h
index 956627500f2c..aada763cd006 100644
--- a/fs/xfs/scrub/btree.h
+++ b/fs/xfs/scrub/btree.h
@@ -9,44 +9,43 @@
/* btree scrub */
/* Check for btree operation errors. */
-bool xfs_scrub_btree_process_error(struct xfs_scrub_context *sc,
+bool xchk_btree_process_error(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree xref operation errors. */
-bool xfs_scrub_btree_xref_process_error(struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur, int level,
- int *error);
+bool xchk_btree_xref_process_error(struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur, int level, int *error);
/* Check for btree corruption. */
-void xfs_scrub_btree_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_btree_set_corrupt(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level);
/* Check for btree xref discrepancies. */
-void xfs_scrub_btree_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_btree_xref_set_corrupt(struct xfs_scrub *sc,
struct xfs_btree_cur *cur, int level);
-struct xfs_scrub_btree;
-typedef int (*xfs_scrub_btree_rec_fn)(
- struct xfs_scrub_btree *bs,
+struct xchk_btree;
+typedef int (*xchk_btree_rec_fn)(
+ struct xchk_btree *bs,
union xfs_btree_rec *rec);
-struct xfs_scrub_btree {
+struct xchk_btree {
/* caller-provided scrub state */
- struct xfs_scrub_context *sc;
- struct xfs_btree_cur *cur;
- xfs_scrub_btree_rec_fn scrub_rec;
- struct xfs_owner_info *oinfo;
- void *private;
+ struct xfs_scrub *sc;
+ struct xfs_btree_cur *cur;
+ xchk_btree_rec_fn scrub_rec;
+ struct xfs_owner_info *oinfo;
+ void *private;
/* internal scrub state */
- union xfs_btree_rec lastrec;
- bool firstrec;
- union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
- bool firstkey[XFS_BTREE_MAXLEVELS];
- struct list_head to_check;
+ union xfs_btree_rec lastrec;
+ bool firstrec;
+ union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
+ bool firstkey[XFS_BTREE_MAXLEVELS];
+ struct list_head to_check;
};
-int xfs_scrub_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
- xfs_scrub_btree_rec_fn scrub_fn,
- struct xfs_owner_info *oinfo, void *private);
+int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
+ xchk_btree_rec_fn scrub_fn, struct xfs_owner_info *oinfo,
+ void *private);
#endif /* __XFS_SCRUB_BTREE_H__ */
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 70e70c69f83f..346b02abccf7 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -68,20 +68,20 @@
/* Check for operational errors. */
static bool
-__xfs_scrub_process_error(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- int *error,
- __u32 errflag,
- void *ret_ip)
+__xchk_process_error(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
switch (*error) {
case 0:
return true;
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
@@ -90,7 +90,7 @@ __xfs_scrub_process_error(
*error = 0;
/* fall through */
default:
- trace_xfs_scrub_op_error(sc, agno, bno, *error,
+ trace_xchk_op_error(sc, agno, bno, *error,
ret_ip);
break;
}
@@ -98,43 +98,43 @@ __xfs_scrub_process_error(
}
bool
-xfs_scrub_process_error(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- int *error)
+xchk_process_error(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
{
- return __xfs_scrub_process_error(sc, agno, bno, error,
+ return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
-xfs_scrub_xref_process_error(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- int *error)
+xchk_xref_process_error(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
{
- return __xfs_scrub_process_error(sc, agno, bno, error,
+ return __xchk_process_error(sc, agno, bno, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address);
}
/* Check for operational errors for a file offset. */
static bool
-__xfs_scrub_fblock_process_error(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset,
- int *error,
- __u32 errflag,
- void *ret_ip)
+__xchk_fblock_process_error(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
switch (*error) {
case 0:
return true;
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
@@ -143,7 +143,7 @@ __xfs_scrub_fblock_process_error(
*error = 0;
/* fall through */
default:
- trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error,
+ trace_xchk_file_op_error(sc, whichfork, offset, *error,
ret_ip);
break;
}
@@ -151,24 +151,24 @@ __xfs_scrub_fblock_process_error(
}
bool
-xfs_scrub_fblock_process_error(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset,
- int *error)
+xchk_fblock_process_error(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
{
- return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
+ return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_CORRUPT, __return_address);
}
bool
-xfs_scrub_fblock_xref_process_error(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset,
- int *error)
+xchk_fblock_xref_process_error(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
{
- return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
+ return __xchk_fblock_process_error(sc, whichfork, offset, error,
XFS_SCRUB_OFLAG_XFAIL, __return_address);
}
@@ -186,12 +186,12 @@ xfs_scrub_fblock_xref_process_error(
/* Record a block which could be optimized. */
void
-xfs_scrub_block_set_preen(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_block_set_preen(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
- trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address);
+ trace_xchk_block_preen(sc, bp->b_bn, __return_address);
}
/*
@@ -200,32 +200,32 @@ xfs_scrub_block_set_preen(
* the block location of the inode record itself.
*/
void
-xfs_scrub_ino_set_preen(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_ino_set_preen(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
- trace_xfs_scrub_ino_preen(sc, ino, __return_address);
+ trace_xchk_ino_preen(sc, ino, __return_address);
}
/* Record a corrupt block. */
void
-xfs_scrub_block_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_block_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
+ trace_xchk_block_error(sc, bp->b_bn, __return_address);
}
/* Record a corruption while cross-referencing. */
void
-xfs_scrub_block_xref_set_corrupt(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_block_xref_set_corrupt(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
- trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
+ trace_xchk_block_error(sc, bp->b_bn, __return_address);
}
/*
@@ -234,44 +234,44 @@ xfs_scrub_block_xref_set_corrupt(
* inode record itself.
*/
void
-xfs_scrub_ino_set_corrupt(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_ino_set_corrupt(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_ino_error(sc, ino, __return_address);
+ trace_xchk_ino_error(sc, ino, __return_address);
}
/* Record a corruption while cross-referencing with an inode. */
void
-xfs_scrub_ino_xref_set_corrupt(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_ino_xref_set_corrupt(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
- trace_xfs_scrub_ino_error(sc, ino, __return_address);
+ trace_xchk_ino_error(sc, ino, __return_address);
}
/* Record corruption in a block indexed by a file fork. */
void
-xfs_scrub_fblock_set_corrupt(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset)
+xchk_fblock_set_corrupt(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
+ trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
}
/* Record a corruption while cross-referencing a fork block. */
void
-xfs_scrub_fblock_xref_set_corrupt(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset)
+xchk_fblock_xref_set_corrupt(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
- trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
+ trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
}
/*
@@ -279,32 +279,32 @@ xfs_scrub_fblock_xref_set_corrupt(
* incorrect.
*/
void
-xfs_scrub_ino_set_warning(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_ino_set_warning(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
- trace_xfs_scrub_ino_warning(sc, ino, __return_address);
+ trace_xchk_ino_warning(sc, ino, __return_address);
}
/* Warn about a block indexed by a file fork that needs review. */
void
-xfs_scrub_fblock_set_warning(
- struct xfs_scrub_context *sc,
- int whichfork,
- xfs_fileoff_t offset)
+xchk_fblock_set_warning(
+ struct xfs_scrub *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
- trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address);
+ trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
}
/* Signal an incomplete scrub. */
void
-xfs_scrub_set_incomplete(
- struct xfs_scrub_context *sc)
+xchk_set_incomplete(
+ struct xfs_scrub *sc)
{
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
- trace_xfs_scrub_incomplete(sc, __return_address);
+ trace_xchk_incomplete(sc, __return_address);
}
/*
@@ -312,20 +312,20 @@ xfs_scrub_set_incomplete(
* at least according to the reverse mapping data.
*/
-struct xfs_scrub_rmap_ownedby_info {
+struct xchk_rmap_ownedby_info {
struct xfs_owner_info *oinfo;
xfs_filblks_t *blocks;
};
STATIC int
-xfs_scrub_count_rmap_ownedby_irec(
- struct xfs_btree_cur *cur,
- struct xfs_rmap_irec *rec,
- void *priv)
+xchk_count_rmap_ownedby_irec(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
{
- struct xfs_scrub_rmap_ownedby_info *sroi = priv;
- bool irec_attr;
- bool oinfo_attr;
+ struct xchk_rmap_ownedby_info *sroi = priv;
+ bool irec_attr;
+ bool oinfo_attr;
irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
@@ -344,19 +344,19 @@ xfs_scrub_count_rmap_ownedby_irec(
* The caller should pass us an rmapbt cursor.
*/
int
-xfs_scrub_count_rmap_ownedby_ag(
- struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- struct xfs_owner_info *oinfo,
- xfs_filblks_t *blocks)
+xchk_count_rmap_ownedby_ag(
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t *blocks)
{
- struct xfs_scrub_rmap_ownedby_info sroi;
+ struct xchk_rmap_ownedby_info sroi;
sroi.oinfo = oinfo;
*blocks = 0;
sroi.blocks = blocks;
- return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec,
+ return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
&sroi);
}
@@ -371,8 +371,8 @@ xfs_scrub_count_rmap_ownedby_ag(
/* Decide if we want to return an AG header read failure. */
static inline bool
want_ag_read_header_failure(
- struct xfs_scrub_context *sc,
- unsigned int type)
+ struct xfs_scrub *sc,
+ unsigned int type)
{
/* Return all AG header read failures when scanning btrees. */
if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
@@ -392,20 +392,20 @@ want_ag_read_header_failure(
/*
* Grab all the headers for an AG.
*
- * The headers should be released by xfs_scrub_ag_free, but as a fail
+ * The headers should be released by xchk_ag_free, but as a fail
* safe we attach all the buffers we grab to the scrub transaction so
* they'll all be freed when we cancel it.
*/
int
-xfs_scrub_ag_read_headers(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- struct xfs_buf **agi,
- struct xfs_buf **agf,
- struct xfs_buf **agfl)
-{
- struct xfs_mount *mp = sc->mp;
- int error;
+xchk_ag_read_headers(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ struct xfs_buf **agi,
+ struct xfs_buf **agf,
+ struct xfs_buf **agfl)
+{
+ struct xfs_mount *mp = sc->mp;
+ int error;
error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
@@ -425,8 +425,8 @@ out:
/* Release all the AG btree cursors. */
void
-xfs_scrub_ag_btcur_free(
- struct xfs_scrub_ag *sa)
+xchk_ag_btcur_free(
+ struct xchk_ag *sa)
{
if (sa->refc_cur)
xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
@@ -451,12 +451,12 @@ xfs_scrub_ag_btcur_free(
/* Initialize all the btree cursors for an AG. */
int
-xfs_scrub_ag_btcur_init(
- struct xfs_scrub_context *sc,
- struct xfs_scrub_ag *sa)
+xchk_ag_btcur_init(
+ struct xfs_scrub *sc,
+ struct xchk_ag *sa)
{
- struct xfs_mount *mp = sc->mp;
- xfs_agnumber_t agno = sa->agno;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sa->agno;
if (sa->agf_bp) {
/* Set up a bnobt cursor for cross-referencing. */
@@ -499,7 +499,7 @@ xfs_scrub_ag_btcur_init(
/* Set up a refcountbt cursor for cross-referencing. */
if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb)) {
sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
- sa->agf_bp, agno, NULL);
+ sa->agf_bp, agno);
if (!sa->refc_cur)
goto err;
}
@@ -511,11 +511,11 @@ err:
/* Release the AG header context and btree cursors. */
void
-xfs_scrub_ag_free(
- struct xfs_scrub_context *sc,
- struct xfs_scrub_ag *sa)
+xchk_ag_free(
+ struct xfs_scrub *sc,
+ struct xchk_ag *sa)
{
- xfs_scrub_ag_btcur_free(sa);
+ xchk_ag_btcur_free(sa);
if (sa->agfl_bp) {
xfs_trans_brelse(sc->tp, sa->agfl_bp);
sa->agfl_bp = NULL;
@@ -543,30 +543,30 @@ xfs_scrub_ag_free(
* transaction ourselves.
*/
int
-xfs_scrub_ag_init(
- struct xfs_scrub_context *sc,
- xfs_agnumber_t agno,
- struct xfs_scrub_ag *sa)
+xchk_ag_init(
+ struct xfs_scrub *sc,
+ xfs_agnumber_t agno,
+ struct xchk_ag *sa)
{
- int error;
+ int error;
sa->agno = agno;
- error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp,
+ error = xchk_ag_read_headers(sc, agno, &sa->agi_bp,
&sa->agf_bp, &sa->agfl_bp);
if (error)
return error;
- return xfs_scrub_ag_btcur_init(sc, sa);
+ return xchk_ag_btcur_init(sc, sa);
}
/*
* Grab the per-ag structure if we haven't already gotten it. Teardown of the
- * xfs_scrub_ag will release it for us.
+ * xchk_ag will release it for us.
*/
void
-xfs_scrub_perag_get(
+xchk_perag_get(
struct xfs_mount *mp,
- struct xfs_scrub_ag *sa)
+ struct xchk_ag *sa)
{
if (!sa->pag)
sa->pag = xfs_perag_get(mp, sa->agno);
@@ -585,9 +585,9 @@ xfs_scrub_perag_get(
* the metadata object.
*/
int
-xfs_scrub_trans_alloc(
- struct xfs_scrub_context *sc,
- uint resblks)
+xchk_trans_alloc(
+ struct xfs_scrub *sc,
+ uint resblks)
{
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
@@ -598,25 +598,25 @@ xfs_scrub_trans_alloc(
/* Set us up with a transaction and an empty context. */
int
-xfs_scrub_setup_fs(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_fs(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- uint resblks;
+ uint resblks;
- resblks = xfs_repair_calc_ag_resblks(sc);
- return xfs_scrub_trans_alloc(sc, resblks);
+ resblks = xrep_calc_ag_resblks(sc);
+ return xchk_trans_alloc(sc, resblks);
}
/* Set us up with AG headers and btree cursors. */
int
-xfs_scrub_setup_ag_btree(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip,
- bool force_log)
+xchk_setup_ag_btree(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip,
+ bool force_log)
{
- struct xfs_mount *mp = sc->mp;
- int error;
+ struct xfs_mount *mp = sc->mp;
+ int error;
/*
* If the caller asks us to checkpont the log, do so. This
@@ -625,21 +625,21 @@ xfs_scrub_setup_ag_btree(
* document why they need to do so.
*/
if (force_log) {
- error = xfs_scrub_checkpoint_log(mp);
+ error = xchk_checkpoint_log(mp);
if (error)
return error;
}
- error = xfs_scrub_setup_fs(sc, ip);
+ error = xchk_setup_fs(sc, ip);
if (error)
return error;
- return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa);
+ return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
}
/* Push everything out of the log onto disk. */
int
-xfs_scrub_checkpoint_log(
+xchk_checkpoint_log(
struct xfs_mount *mp)
{
int error;
@@ -657,14 +657,14 @@ xfs_scrub_checkpoint_log(
* The inode is not locked.
*/
int
-xfs_scrub_get_inode(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip_in)
+xchk_get_inode(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip_in)
{
- struct xfs_imap imap;
- struct xfs_mount *mp = sc->mp;
- struct xfs_inode *ip = NULL;
- int error;
+ struct xfs_imap imap;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = NULL;
+ int error;
/* We want to scan the inode we already had opened. */
if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
@@ -704,14 +704,14 @@ xfs_scrub_get_inode(
error = -EFSCORRUPTED;
/* fall through */
default:
- trace_xfs_scrub_op_error(sc,
+ trace_xchk_op_error(sc,
XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
error, __return_address);
return error;
}
if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
- iput(VFS_I(ip));
+ xfs_irele(ip);
return -ENOENT;
}
@@ -721,21 +721,21 @@ xfs_scrub_get_inode(
/* Set us up to scrub a file's contents. */
int
-xfs_scrub_setup_inode_contents(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip,
- unsigned int resblks)
+xchk_setup_inode_contents(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip,
+ unsigned int resblks)
{
- int error;
+ int error;
- error = xfs_scrub_get_inode(sc, ip);
+ error = xchk_get_inode(sc, ip);
if (error)
return error;
/* Got the inode, lock it and we're ready to go. */
sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags);
- error = xfs_scrub_trans_alloc(sc, resblks);
+ error = xchk_trans_alloc(sc, resblks);
if (error)
goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL;
@@ -752,13 +752,13 @@ out:
* the cursor and skip the check.
*/
bool
-xfs_scrub_should_check_xref(
- struct xfs_scrub_context *sc,
- int *error,
- struct xfs_btree_cur **curpp)
+xchk_should_check_xref(
+ struct xfs_scrub *sc,
+ int *error,
+ struct xfs_btree_cur **curpp)
{
/* No point in xref if we already know we're corrupt. */
- if (xfs_scrub_skip_xref(sc->sm))
+ if (xchk_skip_xref(sc->sm))
return false;
if (*error == 0)
@@ -775,7 +775,7 @@ xfs_scrub_should_check_xref(
}
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
- trace_xfs_scrub_xref_error(sc, *error, __return_address);
+ trace_xchk_xref_error(sc, *error, __return_address);
/*
* Errors encountered during cross-referencing with another
@@ -787,25 +787,25 @@ xfs_scrub_should_check_xref(
/* Run the structure verifiers on in-memory buffers to detect bad memory. */
void
-xfs_scrub_buffer_recheck(
- struct xfs_scrub_context *sc,
- struct xfs_buf *bp)
+xchk_buffer_recheck(
+ struct xfs_scrub *sc,
+ struct xfs_buf *bp)
{
- xfs_failaddr_t fa;
+ xfs_failaddr_t fa;
if (bp->b_ops == NULL) {
- xfs_scrub_block_set_corrupt(sc, bp);
+ xchk_block_set_corrupt(sc, bp);
return;
}
if (bp->b_ops->verify_struct == NULL) {
- xfs_scrub_set_incomplete(sc);
+ xchk_set_incomplete(sc);
return;
}
fa = bp->b_ops->verify_struct(bp);
if (!fa)
return;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_block_error(sc, bp->b_bn, fa);
+ trace_xchk_block_error(sc, bp->b_bn, fa);
}
/*
@@ -813,38 +813,38 @@ xfs_scrub_buffer_recheck(
* pointed to by sc->ip and the ILOCK must be held.
*/
int
-xfs_scrub_metadata_inode_forks(
- struct xfs_scrub_context *sc)
+xchk_metadata_inode_forks(
+ struct xfs_scrub *sc)
{
- __u32 smtype;
- bool shared;
- int error;
+ __u32 smtype;
+ bool shared;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return 0;
/* Metadata inodes don't live on the rt device. */
if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* They should never participate in reflink. */
if (xfs_is_reflink_inode(sc->ip)) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* They also should never have extended attributes. */
if (xfs_inode_hasattr(sc->ip)) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
return 0;
}
/* Invoke the data fork scrubber. */
smtype = sc->sm->sm_type;
sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
- error = xfs_scrub_bmap_data(sc);
+ error = xchk_bmap_data(sc);
sc->sm->sm_type = smtype;
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
@@ -853,11 +853,11 @@ xfs_scrub_metadata_inode_forks(
if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&shared);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error))
return error;
if (shared)
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
}
return error;
@@ -871,7 +871,7 @@ xfs_scrub_metadata_inode_forks(
* we can't.
*/
int
-xfs_scrub_ilock_inverted(
+xchk_ilock_inverted(
struct xfs_inode *ip,
uint lock_mode)
{
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 2172bd5361e2..2d4324d12f9a 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -12,8 +12,8 @@
* Note that we're careful not to make any judgements about *error.
*/
static inline bool
-xfs_scrub_should_terminate(
- struct xfs_scrub_context *sc,
+xchk_should_terminate(
+ struct xfs_scrub *sc,
int *error)
{
if (fatal_signal_pending(current)) {
@@ -24,121 +24,118 @@ xfs_scrub_should_terminate(
return false;
}
-int xfs_scrub_trans_alloc(struct xfs_scrub_context *sc, uint resblks);
-bool xfs_scrub_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
+bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int *error);
-bool xfs_scrub_fblock_process_error(struct xfs_scrub_context *sc, int whichfork,
+bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, int *error);
-bool xfs_scrub_xref_process_error(struct xfs_scrub_context *sc,
+bool xchk_xref_process_error(struct xfs_scrub *sc,
xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
-bool xfs_scrub_fblock_xref_process_error(struct xfs_scrub_context *sc,
+bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset, int *error);
-void xfs_scrub_block_set_preen(struct xfs_scrub_context *sc,
+void xchk_block_set_preen(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xfs_scrub_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino);
+void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
-void xfs_scrub_block_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_block_set_corrupt(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xfs_scrub_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino);
-void xfs_scrub_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork,
+void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
+void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset);
-void xfs_scrub_block_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
struct xfs_buf *bp);
-void xfs_scrub_ino_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
xfs_ino_t ino);
-void xfs_scrub_fblock_xref_set_corrupt(struct xfs_scrub_context *sc,
+void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset);
-void xfs_scrub_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino);
-void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
+void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
+void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset);
-void xfs_scrub_set_incomplete(struct xfs_scrub_context *sc);
-int xfs_scrub_checkpoint_log(struct xfs_mount *mp);
+void xchk_set_incomplete(struct xfs_scrub *sc);
+int xchk_checkpoint_log(struct xfs_mount *mp);
/* Are we set up for a cross-referencing check? */
-bool xfs_scrub_should_check_xref(struct xfs_scrub_context *sc, int *error,
+bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
struct xfs_btree_cur **curpp);
/* Setup functions */
-int xfs_scrub_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip);
-int xfs_scrub_setup_ag_allocbt(struct xfs_scrub_context *sc,
+int xchk_setup_fs(struct xfs_scrub *sc, struct xfs_inode *ip);
+int xchk_setup_ag_allocbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_ag_iallocbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_iallocbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_ag_rmapbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_rmapbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_ag_refcountbt(struct xfs_scrub_context *sc,
+int xchk_setup_ag_refcountbt(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_inode(struct xfs_scrub_context *sc,
+int xchk_setup_inode(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_inode_bmap(struct xfs_scrub_context *sc,
+int xchk_setup_inode_bmap(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_inode_bmap_data(struct xfs_scrub_context *sc,
+int xchk_setup_inode_bmap_data(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_directory(struct xfs_scrub_context *sc,
+int xchk_setup_directory(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_xattr(struct xfs_scrub_context *sc,
+int xchk_setup_xattr(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_symlink(struct xfs_scrub_context *sc,
+int xchk_setup_symlink(struct xfs_scrub *sc,
struct xfs_inode *ip);
-int xfs_scrub_setup_parent(struct xfs_scrub_context *sc,
+int xchk_setup_parent(struct xfs_scrub *sc,
struct xfs_inode *ip);
#ifdef CONFIG_XFS_RT
-int xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+int xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip);
#else
static inline int
-xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip)
+xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip)
{
return -ENOENT;
}
#endif
#ifdef CONFIG_XFS_QUOTA
-int xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+int xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip);
#else
static inline int
-xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip)
+xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip)
{
return -ENOENT;
}
#endif
-void xfs_scrub_ag_free(struct xfs_scrub_context *sc, struct xfs_scrub_ag *sa);
-int xfs_scrub_ag_init(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
- struct xfs_scrub_ag *sa);
-void xfs_scrub_perag_get(struct xfs_mount *mp, struct xfs_scrub_ag *sa);
-int xfs_scrub_ag_read_headers(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
- struct xfs_buf **agi, struct xfs_buf **agf,
- struct xfs_buf **agfl);
-void xfs_scrub_ag_btcur_free(struct xfs_scrub_ag *sa);
-int xfs_scrub_ag_btcur_init(struct xfs_scrub_context *sc,
- struct xfs_scrub_ag *sa);
-int xfs_scrub_count_rmap_ownedby_ag(struct xfs_scrub_context *sc,
- struct xfs_btree_cur *cur,
- struct xfs_owner_info *oinfo,
- xfs_filblks_t *blocks);
+void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
+int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
+ struct xchk_ag *sa);
+void xchk_perag_get(struct xfs_mount *mp, struct xchk_ag *sa);
+int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
+ struct xfs_buf **agi, struct xfs_buf **agf,
+ struct xfs_buf **agfl);
+void xchk_ag_btcur_free(struct xchk_ag *sa);
+int xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
+int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
+ struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
-int xfs_scrub_setup_ag_btree(struct xfs_scrub_context *sc,
- struct xfs_inode *ip, bool force_log);
-int xfs_scrub_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in);
-int xfs_scrub_setup_inode_contents(struct xfs_scrub_context *sc,
- struct xfs_inode *ip, unsigned int resblks);
-void xfs_scrub_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp);
+int xchk_setup_ag_btree(struct xfs_scrub *sc, struct xfs_inode *ip,
+ bool force_log);
+int xchk_get_inode(struct xfs_scrub *sc, struct xfs_inode *ip_in);
+int xchk_setup_inode_contents(struct xfs_scrub *sc, struct xfs_inode *ip,
+ unsigned int resblks);
+void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
/*
* Don't bother cross-referencing if we already found corruption or cross
* referencing discrepancies.
*/
-static inline bool xfs_scrub_skip_xref(struct xfs_scrub_metadata *sm)
+static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
{
return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XCORRUPT);
}
-int xfs_scrub_metadata_inode_forks(struct xfs_scrub_context *sc);
-int xfs_scrub_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
+int xchk_metadata_inode_forks(struct xfs_scrub *sc);
+int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
#endif /* __XFS_SCRUB_COMMON_H__ */
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index d700c4d4d4ef..f1260b4bfdee 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -35,12 +35,12 @@
* operational errors in common.c.
*/
bool
-xfs_scrub_da_process_error(
- struct xfs_scrub_da_btree *ds,
- int level,
- int *error)
+xchk_da_process_error(
+ struct xchk_da_btree *ds,
+ int level,
+ int *error)
{
- struct xfs_scrub_context *sc = ds->sc;
+ struct xfs_scrub *sc = ds->sc;
if (*error == 0)
return true;
@@ -48,7 +48,7 @@ xfs_scrub_da_process_error(
switch (*error) {
case -EDEADLOCK:
/* Used to restart an op with deadlock avoidance. */
- trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
break;
case -EFSBADCRC:
case -EFSCORRUPTED:
@@ -57,7 +57,7 @@ xfs_scrub_da_process_error(
*error = 0;
/* fall through */
default:
- trace_xfs_scrub_file_op_error(sc, ds->dargs.whichfork,
+ trace_xchk_file_op_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo,
ds->state->path.blk[level].blkno),
*error, __return_address);
@@ -71,15 +71,15 @@ xfs_scrub_da_process_error(
* operational errors in common.c.
*/
void
-xfs_scrub_da_set_corrupt(
- struct xfs_scrub_da_btree *ds,
- int level)
+xchk_da_set_corrupt(
+ struct xchk_da_btree *ds,
+ int level)
{
- struct xfs_scrub_context *sc = ds->sc;
+ struct xfs_scrub *sc = ds->sc;
sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
- trace_xfs_scrub_fblock_error(sc, ds->dargs.whichfork,
+ trace_xchk_fblock_error(sc, ds->dargs.whichfork,
xfs_dir2_da_to_db(ds->dargs.geo,
ds->state->path.blk[level].blkno),
__return_address);
@@ -87,14 +87,14 @@ xfs_scrub_da_set_corrupt(
/* Find an entry at a certain level in a da btree. */
STATIC void *
-xfs_scrub_da_btree_entry(
- struct xfs_scrub_da_btree *ds,
- int level,
- int rec)
+xchk_da_btree_entry(
+ struct xchk_da_btree *ds,
+ int level,
+ int rec)
{
- char *ents;
- struct xfs_da_state_blk *blk;
- void *baddr;
+ char *ents;
+ struct xfs_da_state_blk *blk;
+ void *baddr;
/* Dispatch the entry finding function. */
blk = &ds->state->path.blk[level];
@@ -123,8 +123,8 @@ xfs_scrub_da_btree_entry(
/* Scrub a da btree hash (key). */
int
-xfs_scrub_da_btree_hash(
- struct xfs_scrub_da_btree *ds,
+xchk_da_btree_hash(
+ struct xchk_da_btree *ds,
int level,
__be32 *hashp)
{
@@ -136,7 +136,7 @@ xfs_scrub_da_btree_hash(
/* Is this hash in order? */
hash = be32_to_cpu(*hashp);
if (hash < ds->hashes[level])
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
ds->hashes[level] = hash;
if (level == 0)
@@ -144,10 +144,10 @@ xfs_scrub_da_btree_hash(
/* Is this hash no larger than the parent hash? */
blks = ds->state->path.blk;
- entry = xfs_scrub_da_btree_entry(ds, level - 1, blks[level - 1].index);
+ entry = xchk_da_btree_entry(ds, level - 1, blks[level - 1].index);
parent_hash = be32_to_cpu(entry->hashval);
if (parent_hash < hash)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return 0;
}
@@ -157,13 +157,13 @@ xfs_scrub_da_btree_hash(
* pointer.
*/
STATIC bool
-xfs_scrub_da_btree_ptr_ok(
- struct xfs_scrub_da_btree *ds,
- int level,
- xfs_dablk_t blkno)
+xchk_da_btree_ptr_ok(
+ struct xchk_da_btree *ds,
+ int level,
+ xfs_dablk_t blkno)
{
if (blkno < ds->lowest || (ds->highest != 0 && blkno >= ds->highest)) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return false;
}
@@ -176,7 +176,7 @@ xfs_scrub_da_btree_ptr_ok(
* leaf1, we must multiplex the verifiers.
*/
static void
-xfs_scrub_da_btree_read_verify(
+xchk_da_btree_read_verify(
struct xfs_buf *bp)
{
struct xfs_da_blkinfo *info = bp->b_addr;
@@ -198,7 +198,7 @@ xfs_scrub_da_btree_read_verify(
}
}
static void
-xfs_scrub_da_btree_write_verify(
+xchk_da_btree_write_verify(
struct xfs_buf *bp)
{
struct xfs_da_blkinfo *info = bp->b_addr;
@@ -220,7 +220,7 @@ xfs_scrub_da_btree_write_verify(
}
}
static void *
-xfs_scrub_da_btree_verify(
+xchk_da_btree_verify(
struct xfs_buf *bp)
{
struct xfs_da_blkinfo *info = bp->b_addr;
@@ -236,23 +236,23 @@ xfs_scrub_da_btree_verify(
}
}
-static const struct xfs_buf_ops xfs_scrub_da_btree_buf_ops = {
- .name = "xfs_scrub_da_btree",
- .verify_read = xfs_scrub_da_btree_read_verify,
- .verify_write = xfs_scrub_da_btree_write_verify,
- .verify_struct = xfs_scrub_da_btree_verify,
+static const struct xfs_buf_ops xchk_da_btree_buf_ops = {
+ .name = "xchk_da_btree",
+ .verify_read = xchk_da_btree_read_verify,
+ .verify_write = xchk_da_btree_write_verify,
+ .verify_struct = xchk_da_btree_verify,
};
/* Check a block's sibling. */
STATIC int
-xfs_scrub_da_btree_block_check_sibling(
- struct xfs_scrub_da_btree *ds,
- int level,
- int direction,
- xfs_dablk_t sibling)
+xchk_da_btree_block_check_sibling(
+ struct xchk_da_btree *ds,
+ int level,
+ int direction,
+ xfs_dablk_t sibling)
{
- int retval;
- int error;
+ int retval;
+ int error;
memcpy(&ds->state->altpath, &ds->state->path,
sizeof(ds->state->altpath));
@@ -265,7 +265,7 @@ xfs_scrub_da_btree_block_check_sibling(
error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
direction, false, &retval);
if (error == 0 && retval == 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
error = 0;
goto out;
}
@@ -273,19 +273,19 @@ xfs_scrub_da_btree_block_check_sibling(
/* Move the alternate cursor one block in the direction given. */
error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
direction, false, &retval);
- if (!xfs_scrub_da_process_error(ds, level, &error))
+ if (!xchk_da_process_error(ds, level, &error))
return error;
if (retval) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return error;
}
if (ds->state->altpath.blk[level].bp)
- xfs_scrub_buffer_recheck(ds->sc,
+ xchk_buffer_recheck(ds->sc,
ds->state->altpath.blk[level].bp);
/* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
out:
return error;
@@ -293,14 +293,14 @@ out:
/* Check a block's sibling pointers. */
STATIC int
-xfs_scrub_da_btree_block_check_siblings(
- struct xfs_scrub_da_btree *ds,
- int level,
- struct xfs_da_blkinfo *hdr)
+xchk_da_btree_block_check_siblings(
+ struct xchk_da_btree *ds,
+ int level,
+ struct xfs_da_blkinfo *hdr)
{
- xfs_dablk_t forw;
- xfs_dablk_t back;
- int error = 0;
+ xfs_dablk_t forw;
+ xfs_dablk_t back;
+ int error = 0;
forw = be32_to_cpu(hdr->forw);
back = be32_to_cpu(hdr->back);
@@ -308,7 +308,7 @@ xfs_scrub_da_btree_block_check_siblings(
/* Top level blocks should not have sibling pointers. */
if (level == 0) {
if (forw != 0 || back != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
return 0;
}
@@ -316,10 +316,10 @@ xfs_scrub_da_btree_block_check_siblings(
* Check back (left) and forw (right) pointers. These functions
* absorb error codes for us.
*/
- error = xfs_scrub_da_btree_block_check_sibling(ds, level, 0, back);
+ error = xchk_da_btree_block_check_sibling(ds, level, 0, back);
if (error)
goto out;
- error = xfs_scrub_da_btree_block_check_sibling(ds, level, 1, forw);
+ error = xchk_da_btree_block_check_sibling(ds, level, 1, forw);
out:
memset(&ds->state->altpath, 0, sizeof(ds->state->altpath));
@@ -328,8 +328,8 @@ out:
/* Load a dir/attribute block from a btree. */
STATIC int
-xfs_scrub_da_btree_block(
- struct xfs_scrub_da_btree *ds,
+xchk_da_btree_block(
+ struct xchk_da_btree *ds,
int level,
xfs_dablk_t blkno)
{
@@ -355,17 +355,17 @@ xfs_scrub_da_btree_block(
/* Check the pointer. */
blk->blkno = blkno;
- if (!xfs_scrub_da_btree_ptr_ok(ds, level, blkno))
+ if (!xchk_da_btree_ptr_ok(ds, level, blkno))
goto out_nobuf;
/* Read the buffer. */
error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2,
&blk->bp, dargs->whichfork,
- &xfs_scrub_da_btree_buf_ops);
- if (!xfs_scrub_da_process_error(ds, level, &error))
+ &xchk_da_btree_buf_ops);
+ if (!xchk_da_process_error(ds, level, &error))
goto out_nobuf;
if (blk->bp)
- xfs_scrub_buffer_recheck(ds->sc, blk->bp);
+ xchk_buffer_recheck(ds->sc, blk->bp);
/*
* We didn't find a dir btree root block, which means that
@@ -378,7 +378,7 @@ xfs_scrub_da_btree_block(
/* It's /not/ ok for attr trees not to have a da btree. */
if (blk->bp == NULL) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out_nobuf;
}
@@ -388,17 +388,17 @@ xfs_scrub_da_btree_block(
/* We only started zeroing the header on v5 filesystems. */
if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb) && hdr3->hdr.pad)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
/* Check the owner. */
if (xfs_sb_version_hascrc(&ip->i_mount->m_sb)) {
owner = be64_to_cpu(hdr3->owner);
if (owner != ip->i_ino)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
}
/* Check the siblings. */
- error = xfs_scrub_da_btree_block_check_siblings(ds, level, &hdr3->hdr);
+ error = xchk_da_btree_block_check_siblings(ds, level, &hdr3->hdr);
if (error)
goto out;
@@ -411,7 +411,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, pmaxrecs);
if (ds->tree_level != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
break;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
@@ -420,7 +420,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
if (ds->tree_level != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
break;
case XFS_DIR2_LEAF1_MAGIC:
case XFS_DIR3_LEAF1_MAGIC:
@@ -429,7 +429,7 @@ xfs_scrub_da_btree_block(
blk->magic = XFS_DIR2_LEAF1_MAGIC;
blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
if (ds->tree_level != 0)
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
break;
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
@@ -443,13 +443,13 @@ xfs_scrub_da_btree_block(
blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval);
if (level == 0) {
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out_freebp;
}
ds->tree_level = nodehdr.level;
} else {
if (ds->tree_level != nodehdr.level) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out_freebp;
}
}
@@ -457,7 +457,7 @@ xfs_scrub_da_btree_block(
/* XXX: Check hdr3.pad32 once we know how to fix it. */
break;
default:
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out_freebp;
}
@@ -473,13 +473,13 @@ out_nobuf:
/* Visit all nodes and leaves of a da btree. */
int
-xfs_scrub_da_btree(
- struct xfs_scrub_context *sc,
+xchk_da_btree(
+ struct xfs_scrub *sc,
int whichfork,
- xfs_scrub_da_btree_rec_fn scrub_fn,
+ xchk_da_btree_rec_fn scrub_fn,
void *private)
{
- struct xfs_scrub_da_btree ds = {};
+ struct xchk_da_btree ds = {};
struct xfs_mount *mp = sc->mp;
struct xfs_da_state_blk *blks;
struct xfs_da_node_entry *key;
@@ -517,7 +517,7 @@ xfs_scrub_da_btree(
/* Find the root of the da tree, if present. */
blks = ds.state->path.blk;
- error = xfs_scrub_da_btree_block(&ds, level, blkno);
+ error = xchk_da_btree_block(&ds, level, blkno);
if (error)
goto out_state;
/*
@@ -542,12 +542,12 @@ xfs_scrub_da_btree(
}
/* Dispatch record scrubbing. */
- rec = xfs_scrub_da_btree_entry(&ds, level,
+ rec = xchk_da_btree_entry(&ds, level,
blks[level].index);
error = scrub_fn(&ds, level, rec);
if (error)
break;
- if (xfs_scrub_should_terminate(sc, &error) ||
+ if (xchk_should_terminate(sc, &error) ||
(sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
break;
@@ -566,8 +566,8 @@ xfs_scrub_da_btree(
}
/* Hashes in order for scrub? */
- key = xfs_scrub_da_btree_entry(&ds, level, blks[level].index);
- error = xfs_scrub_da_btree_hash(&ds, level, &key->hashval);
+ key = xchk_da_btree_entry(&ds, level, blks[level].index);
+ error = xchk_da_btree_hash(&ds, level, &key->hashval);
if (error)
goto out;
@@ -575,7 +575,7 @@ xfs_scrub_da_btree(
blkno = be32_to_cpu(key->before);
level++;
ds.tree_level--;
- error = xfs_scrub_da_btree_block(&ds, level, blkno);
+ error = xchk_da_btree_block(&ds, level, blkno);
if (error)
goto out;
if (blks[level].bp == NULL)
diff --git a/fs/xfs/scrub/dabtree.h b/fs/xfs/scrub/dabtree.h
index 365f9f0019e6..cb3f0003245b 100644
--- a/fs/xfs/scrub/dabtree.h
+++ b/fs/xfs/scrub/dabtree.h
@@ -8,13 +8,13 @@
/* dir/attr btree */
-struct xfs_scrub_da_btree {
- struct xfs_da_args dargs;
- xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH];
- int maxrecs[XFS_DA_NODE_MAXDEPTH];
- struct xfs_da_state *state;
- struct xfs_scrub_context *sc;
- void *private;
+struct xchk_da_btree {
+ struct xfs_da_args dargs;
+ xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH];
+ int maxrecs[XFS_DA_NODE_MAXDEPTH];
+ struct xfs_da_state *state;
+ struct xfs_scrub *sc;
+ void *private;
/*
* Lowest and highest directory block address in which we expect
@@ -22,24 +22,23 @@ struct xfs_scrub_da_btree {
* (presumably) means between LEAF_OFFSET and FREE_OFFSET; for
* attributes there is no limit.
*/
- xfs_dablk_t lowest;
- xfs_dablk_t highest;
+ xfs_dablk_t lowest;
+ xfs_dablk_t highest;
- int tree_level;
+ int tree_level;
};
-typedef int (*xfs_scrub_da_btree_rec_fn)(struct xfs_scrub_da_btree *ds,
+typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds,
int level, void *rec);
/* Check for da btree operation errors. */
-bool xfs_scrub_da_process_error(struct xfs_scrub_da_btree *ds, int level, int *error);
+bool xchk_da_process_error(struct xchk_da_btree *ds, int level, int *error);
/* Check for da btree corruption. */
-void xfs_scrub_da_set_corrupt(struct xfs_scrub_da_btree *ds, int level);
+void xchk_da_set_corrupt(struct xchk_da_btree *ds, int level);
-int xfs_scrub_da_btree_hash(struct xfs_scrub_da_btree *ds, int level,
- __be32 *hashp);
-int xfs_scrub_da_btree(struct xfs_scrub_context *sc, int whichfork,
- xfs_scrub_da_btree_rec_fn scrub_fn, void *private);
+int xchk_da_btree_hash(struct xchk_da_btree *ds, int level, __be32 *hashp);
+int xchk_da_btree(struct xfs_scrub *sc, int whichfork,
+ xchk_da_btree_rec_fn scrub_fn, void *private);
#endif /* __XFS_SCRUB_DABTREE_H__ */
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 86324775fc9b..cd3e4d768a18 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -31,40 +31,40 @@
/* Set us up to scrub directories. */
int
-xfs_scrub_setup_directory(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_directory(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, ip, 0);
}
/* Directories */
/* Scrub a directory entry. */
-struct xfs_scrub_dir_ctx {
+struct xchk_dir_ctx {
/* VFS fill-directory iterator */
- struct dir_context dir_iter;
+ struct dir_context dir_iter;
- struct xfs_scrub_context *sc;
+ struct xfs_scrub *sc;
};
/* Check that an inode's mode matches a given DT_ type. */
STATIC int
-xfs_scrub_dir_check_ftype(
- struct xfs_scrub_dir_ctx *sdc,
- xfs_fileoff_t offset,
- xfs_ino_t inum,
- int dtype)
+xchk_dir_check_ftype(
+ struct xchk_dir_ctx *sdc,
+ xfs_fileoff_t offset,
+ xfs_ino_t inum,
+ int dtype)
{
- struct xfs_mount *mp = sdc->sc->mp;
- struct xfs_inode *ip;
- int ino_dtype;
- int error = 0;
+ struct xfs_mount *mp = sdc->sc->mp;
+ struct xfs_inode *ip;
+ int ino_dtype;
+ int error = 0;
if (!xfs_sb_version_hasftype(&mp->m_sb)) {
if (dtype != DT_UNKNOWN && dtype != DT_DIR)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
goto out;
}
@@ -78,7 +78,7 @@ xfs_scrub_dir_check_ftype(
* inodes can trigger immediate inactive cleanup of the inode.
*/
error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip);
- if (!xfs_scrub_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ if (!xchk_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error))
goto out;
@@ -86,8 +86,8 @@ xfs_scrub_dir_check_ftype(
ino_dtype = xfs_dir3_get_dtype(mp,
xfs_mode_to_ftype(VFS_I(ip)->i_mode));
if (ino_dtype != dtype)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
- iput(VFS_I(ip));
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ xfs_irele(ip);
out:
return error;
}
@@ -101,23 +101,23 @@ out:
* we can look up this filename. Finally, we check the ftype.
*/
STATIC int
-xfs_scrub_dir_actor(
- struct dir_context *dir_iter,
- const char *name,
- int namelen,
- loff_t pos,
- u64 ino,
- unsigned type)
+xchk_dir_actor(
+ struct dir_context *dir_iter,
+ const char *name,
+ int namelen,
+ loff_t pos,
+ u64 ino,
+ unsigned type)
{
- struct xfs_mount *mp;
- struct xfs_inode *ip;
- struct xfs_scrub_dir_ctx *sdc;
- struct xfs_name xname;
- xfs_ino_t lookup_ino;
- xfs_dablk_t offset;
- int error = 0;
-
- sdc = container_of(dir_iter, struct xfs_scrub_dir_ctx, dir_iter);
+ struct xfs_mount *mp;
+ struct xfs_inode *ip;
+ struct xchk_dir_ctx *sdc;
+ struct xfs_name xname;
+ xfs_ino_t lookup_ino;
+ xfs_dablk_t offset;
+ int error = 0;
+
+ sdc = container_of(dir_iter, struct xchk_dir_ctx, dir_iter);
ip = sdc->sc->ip;
mp = ip->i_mount;
offset = xfs_dir2_db_to_da(mp->m_dir_geo,
@@ -125,17 +125,17 @@ xfs_scrub_dir_actor(
/* Does this inode number make sense? */
if (!xfs_verify_dir_ino(mp, ino)) {
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
goto out;
}
if (!strncmp(".", name, namelen)) {
/* If this is "." then check that the inum matches the dir. */
if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
if (ino != ip->i_ino)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
} else if (!strncmp("..", name, namelen)) {
/*
@@ -143,10 +143,10 @@ xfs_scrub_dir_actor(
* matches this dir.
*/
if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
if (ip->i_ino == mp->m_sb.sb_rootino && ino != ip->i_ino)
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
offset);
}
@@ -156,23 +156,23 @@ xfs_scrub_dir_actor(
xname.type = XFS_DIR3_FT_UNKNOWN;
error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
- if (!xfs_scrub_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error))
goto out;
if (lookup_ino != ino) {
- xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
goto out;
}
/* Verify the file type. This function absorbs error codes. */
- error = xfs_scrub_dir_check_ftype(sdc, offset, lookup_ino, type);
+ error = xchk_dir_check_ftype(sdc, offset, lookup_ino, type);
if (error)
goto out;
out:
/*
* A negative error code returned here is supposed to cause the
* dir_emit caller (xfs_readdir) to abort the directory iteration
- * and return zero to xfs_scrub_directory.
+ * and return zero to xchk_directory.
*/
if (error == 0 && sdc->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return -EFSCORRUPTED;
@@ -181,8 +181,8 @@ out:
/* Scrub a directory btree record. */
STATIC int
-xfs_scrub_dir_rec(
- struct xfs_scrub_da_btree *ds,
+xchk_dir_rec(
+ struct xchk_da_btree *ds,
int level,
void *rec)
{
@@ -203,7 +203,7 @@ xfs_scrub_dir_rec(
int error;
/* Check the hash of the entry. */
- error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval);
+ error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error)
goto out;
@@ -218,18 +218,18 @@ xfs_scrub_dir_rec(
rec_bno = xfs_dir2_db_to_da(mp->m_dir_geo, db);
if (rec_bno >= mp->m_dir_geo->leafblk) {
- xfs_scrub_da_set_corrupt(ds, level);
+ xchk_da_set_corrupt(ds, level);
goto out;
}
error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, -2, &bp);
- if (!xfs_scrub_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
+ if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
&error))
goto out;
if (!bp) {
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out;
}
- xfs_scrub_buffer_recheck(ds->sc, bp);
+ xchk_buffer_recheck(ds->sc, bp);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_relse;
@@ -240,7 +240,7 @@ xfs_scrub_dir_rec(
p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
if (!endp) {
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse;
}
while (p < endp) {
@@ -258,7 +258,7 @@ xfs_scrub_dir_rec(
p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
}
if (p >= endp) {
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse;
}
@@ -267,14 +267,14 @@ xfs_scrub_dir_rec(
hash = be32_to_cpu(ent->hashval);
tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
if (!xfs_verify_dir_ino(mp, ino) || tag != off)
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
if (dent->namelen == 0) {
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse;
}
calc_hash = xfs_da_hashname(dent->name, dent->namelen);
if (calc_hash != hash)
- xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
out_relse:
xfs_trans_brelse(ds->dargs.trans, bp);
@@ -288,8 +288,8 @@ out:
* shortest, and that there aren't any bogus entries.
*/
STATIC void
-xfs_scrub_directory_check_free_entry(
- struct xfs_scrub_context *sc,
+xchk_directory_check_free_entry(
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
struct xfs_dir2_data_free *bf,
struct xfs_dir2_data_unused *dup)
@@ -308,13 +308,13 @@ xfs_scrub_directory_check_free_entry(
return;
/* Unused entry should be in the bestfrees but wasn't found. */
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
}
/* Check free space info in a directory data block. */
STATIC int
-xfs_scrub_directory_data_bestfree(
- struct xfs_scrub_context *sc,
+xchk_directory_data_bestfree(
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
bool is_block)
{
@@ -339,15 +339,15 @@ xfs_scrub_directory_data_bestfree(
if (is_block) {
/* dir block format */
if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
} else {
/* dir data format */
error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, -1, &bp);
}
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
- xfs_scrub_buffer_recheck(sc, bp);
+ xchk_buffer_recheck(sc, bp);
/* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
@@ -362,7 +362,7 @@ xfs_scrub_directory_data_bestfree(
if (offset == 0)
continue;
if (offset >= mp->m_dir_geo->blksize) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
dup = (struct xfs_dir2_data_unused *)(bp->b_addr + offset);
@@ -372,13 +372,13 @@ xfs_scrub_directory_data_bestfree(
if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) ||
be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) ||
tag != ((char *)dup - (char *)bp->b_addr)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
/* bestfree records should be ordered largest to smallest */
if (smallest_bestfree < be16_to_cpu(dfp->length)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
@@ -400,7 +400,7 @@ xfs_scrub_directory_data_bestfree(
dep = (struct xfs_dir2_data_entry *)ptr;
newlen = d_ops->data_entsize(dep->namelen);
if (newlen <= 0) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
lblk);
goto out_buf;
}
@@ -411,7 +411,7 @@ xfs_scrub_directory_data_bestfree(
/* Spot check this free entry */
tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
if (tag != ((char *)dup - (char *)bp->b_addr)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
@@ -419,14 +419,14 @@ xfs_scrub_directory_data_bestfree(
* Either this entry is a bestfree or it's smaller than
* any of the bestfrees.
*/
- xfs_scrub_directory_check_free_entry(sc, lblk, bf, dup);
+ xchk_directory_check_free_entry(sc, lblk, bf, dup);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_buf;
/* Move on. */
newlen = be16_to_cpu(dup->length);
if (newlen <= 0) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
ptr += newlen;
@@ -436,11 +436,11 @@ xfs_scrub_directory_data_bestfree(
/* We're required to fill all the space. */
if (ptr != endptr)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
/* Did we see at least as many free slots as there are bestfrees? */
if (nr_frees < nr_bestfrees)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
out_buf:
xfs_trans_brelse(sc->tp, bp);
out:
@@ -454,8 +454,8 @@ out:
* array is in order.
*/
STATIC void
-xfs_scrub_directory_check_freesp(
- struct xfs_scrub_context *sc,
+xchk_directory_check_freesp(
+ struct xfs_scrub *sc,
xfs_dablk_t lblk,
struct xfs_buf *dbp,
unsigned int len)
@@ -465,16 +465,16 @@ xfs_scrub_directory_check_freesp(
dfp = sc->ip->d_ops->data_bestfree_p(dbp->b_addr);
if (len != be16_to_cpu(dfp->length))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
if (len > 0 && be16_to_cpu(dfp->offset) == 0)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
}
/* Check free space info in a directory leaf1 block. */
STATIC int
-xfs_scrub_directory_leaf1_bestfree(
- struct xfs_scrub_context *sc,
+xchk_directory_leaf1_bestfree(
+ struct xfs_scrub *sc,
struct xfs_da_args *args,
xfs_dablk_t lblk)
{
@@ -497,9 +497,9 @@ xfs_scrub_directory_leaf1_bestfree(
/* Read the free space block. */
error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
- xfs_scrub_buffer_recheck(sc, bp);
+ xchk_buffer_recheck(sc, bp);
leaf = bp->b_addr;
d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
@@ -512,7 +512,7 @@ xfs_scrub_directory_leaf1_bestfree(
struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
if (hdr3->pad != cpu_to_be32(0))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
}
/*
@@ -520,19 +520,19 @@ xfs_scrub_directory_leaf1_bestfree(
* blocks that can fit under i_size.
*/
if (bestcount != xfs_dir2_byte_to_db(geo, sc->ip->i_d.di_size)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
/* Is the leaf count even remotely sane? */
if (leafhdr.count > d_ops->leaf_max_ents(geo)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
/* Leaves and bests don't overlap in leaf format. */
if ((char *)&ents[leafhdr.count] > (char *)bestp) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
@@ -540,13 +540,13 @@ xfs_scrub_directory_leaf1_bestfree(
for (i = 0; i < leafhdr.count; i++) {
hash = be32_to_cpu(ents[i].hashval);
if (i > 0 && lasthash > hash)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
lasthash = hash;
if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
if (leafhdr.stale != stale)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -557,10 +557,10 @@ xfs_scrub_directory_leaf1_bestfree(
continue;
error = xfs_dir3_data_read(sc->tp, sc->ip,
i * args->geo->fsbcount, -1, &dbp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
break;
- xfs_scrub_directory_check_freesp(sc, lblk, dbp, best);
+ xchk_directory_check_freesp(sc, lblk, dbp, best);
xfs_trans_brelse(sc->tp, dbp);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -571,8 +571,8 @@ out:
/* Check free space info in a directory freespace block. */
STATIC int
-xfs_scrub_directory_free_bestfree(
- struct xfs_scrub_context *sc,
+xchk_directory_free_bestfree(
+ struct xfs_scrub *sc,
struct xfs_da_args *args,
xfs_dablk_t lblk)
{
@@ -587,15 +587,15 @@ xfs_scrub_directory_free_bestfree(
/* Read the free space block */
error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
- xfs_scrub_buffer_recheck(sc, bp);
+ xchk_buffer_recheck(sc, bp);
if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
if (hdr3->pad != cpu_to_be32(0))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
}
/* Check all the entries. */
@@ -610,36 +610,36 @@ xfs_scrub_directory_free_bestfree(
error = xfs_dir3_data_read(sc->tp, sc->ip,
(freehdr.firstdb + i) * args->geo->fsbcount,
-1, &dbp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
break;
- xfs_scrub_directory_check_freesp(sc, lblk, dbp, best);
+ xchk_directory_check_freesp(sc, lblk, dbp, best);
xfs_trans_brelse(sc->tp, dbp);
}
if (freehdr.nused + stale != freehdr.nvalid)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
out:
return error;
}
/* Check free space information in directories. */
STATIC int
-xfs_scrub_directory_blocks(
- struct xfs_scrub_context *sc)
+xchk_directory_blocks(
+ struct xfs_scrub *sc)
{
- struct xfs_bmbt_irec got;
- struct xfs_da_args args;
- struct xfs_ifork *ifp;
- struct xfs_mount *mp = sc->mp;
- xfs_fileoff_t leaf_lblk;
- xfs_fileoff_t free_lblk;
- xfs_fileoff_t lblk;
- struct xfs_iext_cursor icur;
- xfs_dablk_t dabno;
- bool found;
- int is_block = 0;
- int error;
+ struct xfs_bmbt_irec got;
+ struct xfs_da_args args;
+ struct xfs_ifork *ifp;
+ struct xfs_mount *mp = sc->mp;
+ xfs_fileoff_t leaf_lblk;
+ xfs_fileoff_t free_lblk;
+ xfs_fileoff_t lblk;
+ struct xfs_iext_cursor icur;
+ xfs_dablk_t dabno;
+ bool found;
+ int is_block = 0;
+ int error;
/* Ignore local format directories. */
if (sc->ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
@@ -656,7 +656,7 @@ xfs_scrub_directory_blocks(
args.geo = mp->m_dir_geo;
args.trans = sc->tp;
error = xfs_dir2_isblock(&args, &is_block);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
/* Iterate all the data extents in the directory... */
@@ -666,7 +666,7 @@ xfs_scrub_directory_blocks(
if (is_block &&
(got.br_startoff > 0 ||
got.br_blockcount != args.geo->fsbcount)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
got.br_startoff);
break;
}
@@ -690,7 +690,7 @@ xfs_scrub_directory_blocks(
args.geo->fsbcount);
lblk < got.br_startoff + got.br_blockcount;
lblk += args.geo->fsbcount) {
- error = xfs_scrub_directory_data_bestfree(sc, lblk,
+ error = xchk_directory_data_bestfree(sc, lblk,
is_block);
if (error)
goto out;
@@ -709,10 +709,10 @@ xfs_scrub_directory_blocks(
got.br_blockcount == args.geo->fsbcount &&
!xfs_iext_next_extent(ifp, &icur, &got)) {
if (is_block) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
- error = xfs_scrub_directory_leaf1_bestfree(sc, &args,
+ error = xchk_directory_leaf1_bestfree(sc, &args,
leaf_lblk);
if (error)
goto out;
@@ -731,11 +731,11 @@ xfs_scrub_directory_blocks(
*/
lblk = got.br_startoff;
if (lblk & ~0xFFFFFFFFULL) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
if (is_block) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
@@ -754,7 +754,7 @@ xfs_scrub_directory_blocks(
args.geo->fsbcount);
lblk < got.br_startoff + got.br_blockcount;
lblk += args.geo->fsbcount) {
- error = xfs_scrub_directory_free_bestfree(sc, &args,
+ error = xchk_directory_free_bestfree(sc, &args,
lblk);
if (error)
goto out;
@@ -769,29 +769,29 @@ out:
/* Scrub a whole directory. */
int
-xfs_scrub_directory(
- struct xfs_scrub_context *sc)
+xchk_directory(
+ struct xfs_scrub *sc)
{
- struct xfs_scrub_dir_ctx sdc = {
- .dir_iter.actor = xfs_scrub_dir_actor,
+ struct xchk_dir_ctx sdc = {
+ .dir_iter.actor = xchk_dir_actor,
.dir_iter.pos = 0,
.sc = sc,
};
- size_t bufsize;
- loff_t oldpos;
- int error = 0;
+ size_t bufsize;
+ loff_t oldpos;
+ int error = 0;
if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
return -ENOENT;
/* Plausible size? */
if (sc->ip->i_d.di_size < xfs_dir2_sf_hdr_size(0)) {
- xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_set_corrupt(sc, sc->ip->i_ino);
goto out;
}
/* Check directory tree structure */
- error = xfs_scrub_da_btree(sc, XFS_DATA_FORK, xfs_scrub_dir_rec, NULL);
+ error = xchk_da_btree(sc, XFS_DATA_FORK, xchk_dir_rec, NULL);
if (error)
return error;
@@ -799,7 +799,7 @@ xfs_scrub_directory(
return error;
/* Check the freespace. */
- error = xfs_scrub_directory_blocks(sc);
+ error = xchk_directory_blocks(sc);
if (error)
return error;
@@ -816,7 +816,7 @@ xfs_scrub_directory(
/*
* Look up every name in this directory by hash.
*
- * Use the xfs_readdir function to call xfs_scrub_dir_actor on
+ * Use the xfs_readdir function to call xchk_dir_actor on
* every directory entry in this directory. In _actor, we check
* the name, inode number, and ftype (if applicable) of the
* entry. xfs_readdir uses the VFS filldir functions to provide
@@ -834,7 +834,7 @@ xfs_scrub_directory(
xfs_iunlock(sc->ip, XFS_ILOCK_EXCL);
while (true) {
error = xfs_readdir(sc->tp, sc->ip, &sdc.dir_iter, bufsize);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
&error))
goto out;
if (oldpos == sdc.dir_iter.pos)
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 13d43d108574..224dba937492 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -35,11 +35,11 @@
* try again after forcing logged inode cores out to disk.
*/
int
-xfs_scrub_setup_ag_iallocbt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_ag_iallocbt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_ag_btree(sc, ip, sc->try_harder);
+ return xchk_setup_ag_btree(sc, ip, sc->try_harder);
}
/* Inode btree scrubber. */
@@ -50,8 +50,8 @@ xfs_scrub_setup_ag_iallocbt(
* we have a record or not depending on freecount.
*/
static inline void
-xfs_scrub_iallocbt_chunk_xref_other(
- struct xfs_scrub_context *sc,
+xchk_iallocbt_chunk_xref_other(
+ struct xfs_scrub *sc,
struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino)
{
@@ -66,17 +66,17 @@ xfs_scrub_iallocbt_chunk_xref_other(
if (!(*pcur))
return;
error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
- if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ if (!xchk_should_check_xref(sc, &error, pcur))
return;
if (((irec->ir_freecount > 0 && !has_irec) ||
(irec->ir_freecount == 0 && has_irec)))
- xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ xchk_btree_xref_set_corrupt(sc, *pcur, 0);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_iallocbt_chunk_xref(
- struct xfs_scrub_context *sc,
+xchk_iallocbt_chunk_xref(
+ struct xfs_scrub *sc,
struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino,
xfs_agblock_t agbno,
@@ -87,17 +87,17 @@ xfs_scrub_iallocbt_chunk_xref(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, len);
- xfs_scrub_iallocbt_chunk_xref_other(sc, irec, agino);
+ xchk_xref_is_used_space(sc, agbno, len);
+ xchk_iallocbt_chunk_xref_other(sc, irec, agino);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- xfs_scrub_xref_is_owned_by(sc, agbno, len, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, len);
+ xchk_xref_is_owned_by(sc, agbno, len, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, len);
}
/* Is this chunk worth checking? */
STATIC bool
-xfs_scrub_iallocbt_chunk(
- struct xfs_scrub_btree *bs,
+xchk_iallocbt_chunk(
+ struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino,
xfs_extlen_t len)
@@ -110,16 +110,16 @@ xfs_scrub_iallocbt_chunk(
if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- xfs_scrub_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
+ xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
return true;
}
/* Count the number of free inodes. */
static unsigned int
-xfs_scrub_iallocbt_freecount(
+xchk_iallocbt_freecount(
xfs_inofree_t freemask)
{
BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
@@ -128,8 +128,8 @@ xfs_scrub_iallocbt_freecount(
/* Check a particular inode with ir_free. */
STATIC int
-xfs_scrub_iallocbt_check_cluster_freemask(
- struct xfs_scrub_btree *bs,
+xchk_iallocbt_check_cluster_freemask(
+ struct xchk_btree *bs,
xfs_ino_t fsino,
xfs_agino_t chunkino,
xfs_agino_t clusterino,
@@ -143,14 +143,14 @@ xfs_scrub_iallocbt_check_cluster_freemask(
bool inuse;
int error = 0;
- if (xfs_scrub_should_terminate(bs->sc, &error))
+ if (xchk_should_terminate(bs->sc, &error))
return error;
dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize);
if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
(dip->di_version >= 3 &&
be64_to_cpu(dip->di_ino) != fsino + clusterino)) {
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out;
}
@@ -175,15 +175,15 @@ xfs_scrub_iallocbt_check_cluster_freemask(
freemask_ok = inode_is_free ^ inuse;
}
if (!freemask_ok)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
out:
return 0;
}
/* Make sure the free mask is consistent with what the inodes think. */
STATIC int
-xfs_scrub_iallocbt_check_freemask(
- struct xfs_scrub_btree *bs,
+xchk_iallocbt_check_freemask(
+ struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec)
{
struct xfs_owner_info oinfo;
@@ -223,18 +223,18 @@ xfs_scrub_iallocbt_check_freemask(
/* The whole cluster must be a hole or not a hole. */
ir_holemask = (irec->ir_holemask & holemask);
if (ir_holemask != holemask && ir_holemask != 0) {
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
continue;
}
/* If any part of this is a hole, skip it. */
if (ir_holemask) {
- xfs_scrub_xref_is_not_owned_by(bs->sc, agbno,
+ xchk_xref_is_not_owned_by(bs->sc, agbno,
blks_per_cluster, &oinfo);
continue;
}
- xfs_scrub_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
+ xchk_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
&oinfo);
/* Grab the inode cluster buffer. */
@@ -245,13 +245,13 @@ xfs_scrub_iallocbt_check_freemask(
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
&dip, &bp, 0, 0);
- if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur, 0,
+ if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
&error))
continue;
/* Which inodes are free? */
for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
- error = xfs_scrub_iallocbt_check_cluster_freemask(bs,
+ error = xchk_iallocbt_check_cluster_freemask(bs,
fsino, chunkino, clusterino, irec, bp);
if (error) {
xfs_trans_brelse(bs->cur->bc_tp, bp);
@@ -267,8 +267,8 @@ xfs_scrub_iallocbt_check_freemask(
/* Scrub an inobt/finobt record. */
STATIC int
-xfs_scrub_iallocbt_rec(
- struct xfs_scrub_btree *bs,
+xchk_iallocbt_rec(
+ struct xchk_btree *bs,
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
@@ -289,18 +289,18 @@ xfs_scrub_iallocbt_rec(
if (irec.ir_count > XFS_INODES_PER_CHUNK ||
irec.ir_freecount > XFS_INODES_PER_CHUNK)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
real_freecount = irec.ir_freecount +
(XFS_INODES_PER_CHUNK - irec.ir_count);
- if (real_freecount != xfs_scrub_iallocbt_freecount(irec.ir_free))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
agino = irec.ir_startino;
/* Record has to be properly aligned within the AG. */
if (!xfs_verify_agino(mp, agno, agino) ||
!xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
goto out;
}
@@ -308,7 +308,7 @@ xfs_scrub_iallocbt_rec(
agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) ||
(agbno & (xfs_icluster_size_fsb(mp) - 1)))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
*inode_blocks += XFS_B_TO_FSB(mp,
irec.ir_count * mp->m_sb.sb_inodesize);
@@ -318,9 +318,9 @@ xfs_scrub_iallocbt_rec(
len = XFS_B_TO_FSB(mp,
XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
if (irec.ir_count != XFS_INODES_PER_CHUNK)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len))
+ if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
goto out;
goto check_freemask;
}
@@ -333,12 +333,12 @@ xfs_scrub_iallocbt_rec(
holes = ~xfs_inobt_irec_to_allocmask(&irec);
if ((holes & irec.ir_free) != holes ||
irec.ir_freecount > irec.ir_count)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
if (holemask & 1)
holecount += XFS_INODES_PER_HOLEMASK_BIT;
- else if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len))
+ else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
break;
holemask >>= 1;
agino += XFS_INODES_PER_HOLEMASK_BIT;
@@ -346,10 +346,10 @@ xfs_scrub_iallocbt_rec(
if (holecount > XFS_INODES_PER_CHUNK ||
holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
check_freemask:
- error = xfs_scrub_iallocbt_check_freemask(bs, &irec);
+ error = xchk_iallocbt_check_freemask(bs, &irec);
if (error)
goto out;
@@ -362,39 +362,39 @@ out:
* Don't bother if we're missing btree cursors, as we're already corrupt.
*/
STATIC void
-xfs_scrub_iallocbt_xref_rmap_btreeblks(
- struct xfs_scrub_context *sc,
- int which)
+xchk_iallocbt_xref_rmap_btreeblks(
+ struct xfs_scrub *sc,
+ int which)
{
- struct xfs_owner_info oinfo;
- xfs_filblks_t blocks;
- xfs_extlen_t inobt_blocks = 0;
- xfs_extlen_t finobt_blocks = 0;
- int error;
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t blocks;
+ xfs_extlen_t inobt_blocks = 0;
+ xfs_extlen_t finobt_blocks = 0;
+ int error;
if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
(xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
- xfs_scrub_skip_xref(sc->sm))
+ xchk_skip_xref(sc->sm))
return;
/* Check that we saw as many inobt blocks as the rmap says. */
error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
- if (!xfs_scrub_process_error(sc, 0, 0, &error))
+ if (!xchk_process_error(sc, 0, 0, &error))
return;
if (sc->sa.fino_cur) {
error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
- if (!xfs_scrub_process_error(sc, 0, 0, &error))
+ if (!xchk_process_error(sc, 0, 0, &error))
return;
}
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
- error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
&blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != inobt_blocks + finobt_blocks)
- xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
+ xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
}
/*
@@ -402,47 +402,47 @@ xfs_scrub_iallocbt_xref_rmap_btreeblks(
* the rmap says are owned by inodes.
*/
STATIC void
-xfs_scrub_iallocbt_xref_rmap_inodes(
- struct xfs_scrub_context *sc,
- int which,
- xfs_filblks_t inode_blocks)
+xchk_iallocbt_xref_rmap_inodes(
+ struct xfs_scrub *sc,
+ int which,
+ xfs_filblks_t inode_blocks)
{
- struct xfs_owner_info oinfo;
- xfs_filblks_t blocks;
- int error;
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t blocks;
+ int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Check that we saw as many inode blocks as the rmap knows about. */
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
&blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != inode_blocks)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* Scrub the inode btrees for some AG. */
STATIC int
-xfs_scrub_iallocbt(
- struct xfs_scrub_context *sc,
- xfs_btnum_t which)
+xchk_iallocbt(
+ struct xfs_scrub *sc,
+ xfs_btnum_t which)
{
- struct xfs_btree_cur *cur;
- struct xfs_owner_info oinfo;
- xfs_filblks_t inode_blocks = 0;
- int error;
+ struct xfs_btree_cur *cur;
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t inode_blocks = 0;
+ int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
- error = xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo,
+ error = xchk_btree(sc, cur, xchk_iallocbt_rec, &oinfo,
&inode_blocks);
if (error)
return error;
- xfs_scrub_iallocbt_xref_rmap_btreeblks(sc, which);
+ xchk_iallocbt_xref_rmap_btreeblks(sc, which);
/*
* If we're scrubbing the inode btree, inode_blocks is the number of
@@ -452,64 +452,64 @@ xfs_scrub_iallocbt(
* to inode chunks with free inodes.
*/
if (which == XFS_BTNUM_INO)
- xfs_scrub_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
+ xchk_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
return error;
}
int
-xfs_scrub_inobt(
- struct xfs_scrub_context *sc)
+xchk_inobt(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_iallocbt(sc, XFS_BTNUM_INO);
+ return xchk_iallocbt(sc, XFS_BTNUM_INO);
}
int
-xfs_scrub_finobt(
- struct xfs_scrub_context *sc)
+xchk_finobt(
+ struct xfs_scrub *sc)
{
- return xfs_scrub_iallocbt(sc, XFS_BTNUM_FINO);
+ return xchk_iallocbt(sc, XFS_BTNUM_FINO);
}
/* See if an inode btree has (or doesn't have) an inode chunk record. */
static inline void
-xfs_scrub_xref_inode_check(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len,
- struct xfs_btree_cur **icur,
- bool should_have_inodes)
+xchk_xref_inode_check(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ struct xfs_btree_cur **icur,
+ bool should_have_inodes)
{
- bool has_inodes;
- int error;
+ bool has_inodes;
+ int error;
- if (!(*icur) || xfs_scrub_skip_xref(sc->sm))
+ if (!(*icur) || xchk_skip_xref(sc->sm))
return;
error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
- if (!xfs_scrub_should_check_xref(sc, &error, icur))
+ if (!xchk_should_check_xref(sc, &error, icur))
return;
if (has_inodes != should_have_inodes)
- xfs_scrub_btree_xref_set_corrupt(sc, *icur, 0);
+ xchk_btree_xref_set_corrupt(sc, *icur, 0);
}
/* xref check that the extent is not covered by inodes */
void
-xfs_scrub_xref_is_not_inode_chunk(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_xref_is_not_inode_chunk(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
- xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
+ xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
+ xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
}
/* xref check that the extent is covered by inodes */
void
-xfs_scrub_xref_is_inode_chunk(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_xref_is_inode_chunk(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
+ xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
}
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 7a6208505980..5b3b177c0fc9 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -37,23 +37,23 @@
* the goal.
*/
int
-xfs_scrub_setup_inode(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_inode(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- int error;
+ int error;
/*
* Try to get the inode. If the verifiers fail, we try again
* in raw mode.
*/
- error = xfs_scrub_get_inode(sc, ip);
+ error = xchk_get_inode(sc, ip);
switch (error) {
case 0:
break;
case -EFSCORRUPTED:
case -EFSBADCRC:
- return xfs_scrub_trans_alloc(sc, 0);
+ return xchk_trans_alloc(sc, 0);
default:
return error;
}
@@ -61,7 +61,7 @@ xfs_scrub_setup_inode(
/* Got the inode, lock it and we're ready to go. */
sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags);
- error = xfs_scrub_trans_alloc(sc, 0);
+ error = xchk_trans_alloc(sc, 0);
if (error)
goto out;
sc->ilock_flags |= XFS_ILOCK_EXCL;
@@ -76,19 +76,19 @@ out:
/* Validate di_extsize hint. */
STATIC void
-xfs_scrub_inode_extsize(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino,
- uint16_t mode,
- uint16_t flags)
+xchk_inode_extsize(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags)
{
- xfs_failaddr_t fa;
+ xfs_failaddr_t fa;
fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize),
mode, flags);
if (fa)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/*
@@ -98,33 +98,33 @@ xfs_scrub_inode_extsize(
* These functions must be kept in sync with each other.
*/
STATIC void
-xfs_scrub_inode_cowextsize(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino,
- uint16_t mode,
- uint16_t flags,
- uint64_t flags2)
+xchk_inode_cowextsize(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags,
+ uint64_t flags2)
{
- xfs_failaddr_t fa;
+ xfs_failaddr_t fa;
fa = xfs_inode_validate_cowextsize(sc->mp,
be32_to_cpu(dip->di_cowextsize), mode, flags,
flags2);
if (fa)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/* Make sure the di_flags make sense for the inode. */
STATIC void
-xfs_scrub_inode_flags(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino,
- uint16_t mode,
- uint16_t flags)
+xchk_inode_flags(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags)
{
- struct xfs_mount *mp = sc->mp;
+ struct xfs_mount *mp = sc->mp;
if (flags & ~XFS_DIFLAG_ANY)
goto bad;
@@ -157,20 +157,20 @@ xfs_scrub_inode_flags(
return;
bad:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/* Make sure the di_flags2 make sense for the inode. */
STATIC void
-xfs_scrub_inode_flags2(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino,
- uint16_t mode,
- uint16_t flags,
- uint64_t flags2)
+xchk_inode_flags2(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags,
+ uint64_t flags2)
{
- struct xfs_mount *mp = sc->mp;
+ struct xfs_mount *mp = sc->mp;
if (flags2 & ~XFS_DIFLAG2_ANY)
goto bad;
@@ -200,23 +200,23 @@ xfs_scrub_inode_flags2(
return;
bad:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/* Scrub all the ondisk inode fields. */
STATIC void
-xfs_scrub_dinode(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip,
- xfs_ino_t ino)
+xchk_dinode(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino)
{
- struct xfs_mount *mp = sc->mp;
- size_t fork_recs;
- unsigned long long isize;
- uint64_t flags2;
- uint32_t nextents;
- uint16_t flags;
- uint16_t mode;
+ struct xfs_mount *mp = sc->mp;
+ size_t fork_recs;
+ unsigned long long isize;
+ uint64_t flags2;
+ uint32_t nextents;
+ uint16_t flags;
+ uint16_t mode;
flags = be16_to_cpu(dip->di_flags);
if (dip->di_version >= 3)
@@ -237,7 +237,7 @@ xfs_scrub_dinode(
/* mode is recognized */
break;
default:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
}
@@ -248,22 +248,22 @@ xfs_scrub_dinode(
* We autoconvert v1 inodes into v2 inodes on writeout,
* so just mark this inode for preening.
*/
- xfs_scrub_ino_set_preen(sc, ino);
+ xchk_ino_set_preen(sc, ino);
break;
case 2:
case 3:
if (dip->di_onlink != 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (dip->di_mode == 0 && sc->ip)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (dip->di_projid_hi != 0 &&
!xfs_sb_version_hasprojid32bit(&mp->m_sb))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
default:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
return;
}
@@ -273,40 +273,40 @@ xfs_scrub_dinode(
*/
if (dip->di_uid == cpu_to_be32(-1U) ||
dip->di_gid == cpu_to_be32(-1U))
- xfs_scrub_ino_set_warning(sc, ino);
+ xchk_ino_set_warning(sc, ino);
/* di_format */
switch (dip->di_format) {
case XFS_DINODE_FMT_DEV:
if (!S_ISCHR(mode) && !S_ISBLK(mode) &&
!S_ISFIFO(mode) && !S_ISSOCK(mode))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_LOCAL:
if (!S_ISDIR(mode) && !S_ISLNK(mode))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_EXTENTS:
if (!S_ISREG(mode) && !S_ISDIR(mode) && !S_ISLNK(mode))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_BTREE:
if (!S_ISREG(mode) && !S_ISDIR(mode))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_UUID:
default:
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
}
/* di_[amc]time.nsec */
if (be32_to_cpu(dip->di_atime.t_nsec) >= NSEC_PER_SEC)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (be32_to_cpu(dip->di_mtime.t_nsec) >= NSEC_PER_SEC)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (be32_to_cpu(dip->di_ctime.t_nsec) >= NSEC_PER_SEC)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/*
* di_size. xfs_dinode_verify checks for things that screw up
@@ -315,19 +315,19 @@ xfs_scrub_dinode(
*/
isize = be64_to_cpu(dip->di_size);
if (isize & (1ULL << 63))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* Devices, fifos, and sockets must have zero size */
if (!S_ISDIR(mode) && !S_ISREG(mode) && !S_ISLNK(mode) && isize != 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* Directories can't be larger than the data section size (32G) */
if (S_ISDIR(mode) && (isize == 0 || isize >= XFS_DIR2_SPACE_SIZE))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* Symlinks can't be larger than SYMLINK_MAXLEN */
if (S_ISLNK(mode) && (isize == 0 || isize >= XFS_SYMLINK_MAXLEN))
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/*
* Warn if the running kernel can't handle the kinds of offsets
@@ -336,7 +336,7 @@ xfs_scrub_dinode(
* overly large offsets, flag the inode for admin review.
*/
if (isize >= mp->m_super->s_maxbytes)
- xfs_scrub_ino_set_warning(sc, ino);
+ xchk_ino_set_warning(sc, ino);
/* di_nblocks */
if (flags2 & XFS_DIFLAG2_REFLINK) {
@@ -351,15 +351,15 @@ xfs_scrub_dinode(
*/
if (be64_to_cpu(dip->di_nblocks) >=
mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
} else {
if (be64_to_cpu(dip->di_nblocks) >= mp->m_sb.sb_dblocks)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
- xfs_scrub_inode_flags(sc, dip, ino, mode, flags);
+ xchk_inode_flags(sc, dip, ino, mode, flags);
- xfs_scrub_inode_extsize(sc, dip, ino, mode, flags);
+ xchk_inode_extsize(sc, dip, ino, mode, flags);
/* di_nextents */
nextents = be32_to_cpu(dip->di_nextents);
@@ -367,31 +367,31 @@ xfs_scrub_dinode(
switch (dip->di_format) {
case XFS_DINODE_FMT_EXTENTS:
if (nextents > fork_recs)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_BTREE:
if (nextents <= fork_recs)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
default:
if (nextents != 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
}
/* di_forkoff */
if (XFS_DFORK_APTR(dip) >= (char *)dip + mp->m_sb.sb_inodesize)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (dip->di_anextents != 0 && dip->di_forkoff == 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
if (dip->di_forkoff == 0 && dip->di_aformat != XFS_DINODE_FMT_EXTENTS)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* di_aformat */
if (dip->di_aformat != XFS_DINODE_FMT_LOCAL &&
dip->di_aformat != XFS_DINODE_FMT_EXTENTS &&
dip->di_aformat != XFS_DINODE_FMT_BTREE)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
/* di_anextents */
nextents = be16_to_cpu(dip->di_anextents);
@@ -399,22 +399,22 @@ xfs_scrub_dinode(
switch (dip->di_aformat) {
case XFS_DINODE_FMT_EXTENTS:
if (nextents > fork_recs)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
case XFS_DINODE_FMT_BTREE:
if (nextents <= fork_recs)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
break;
default:
if (nextents != 0)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
if (dip->di_version >= 3) {
if (be32_to_cpu(dip->di_crtime.t_nsec) >= NSEC_PER_SEC)
- xfs_scrub_ino_set_corrupt(sc, ino);
- xfs_scrub_inode_flags2(sc, dip, ino, mode, flags, flags2);
- xfs_scrub_inode_cowextsize(sc, dip, ino, mode, flags,
+ xchk_ino_set_corrupt(sc, ino);
+ xchk_inode_flags2(sc, dip, ino, mode, flags, flags2);
+ xchk_inode_cowextsize(sc, dip, ino, mode, flags,
flags2);
}
}
@@ -425,8 +425,8 @@ xfs_scrub_dinode(
* IGET_UNTRUSTED, which checks the inobt for us.
*/
static void
-xfs_scrub_inode_xref_finobt(
- struct xfs_scrub_context *sc,
+xchk_inode_xref_finobt(
+ struct xfs_scrub *sc,
xfs_ino_t ino)
{
struct xfs_inobt_rec_incore rec;
@@ -434,7 +434,7 @@ xfs_scrub_inode_xref_finobt(
int has_record;
int error;
- if (!sc->sa.fino_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.fino_cur || xchk_skip_xref(sc->sm))
return;
agino = XFS_INO_TO_AGINO(sc->mp, ino);
@@ -445,12 +445,12 @@ xfs_scrub_inode_xref_finobt(
*/
error = xfs_inobt_lookup(sc->sa.fino_cur, agino, XFS_LOOKUP_LE,
&has_record);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
!has_record)
return;
error = xfs_inobt_get_rec(sc->sa.fino_cur, &rec, &has_record);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
!has_record)
return;
@@ -463,54 +463,54 @@ xfs_scrub_inode_xref_finobt(
return;
if (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0);
}
/* Cross reference the inode fields with the forks. */
STATIC void
-xfs_scrub_inode_xref_bmap(
- struct xfs_scrub_context *sc,
- struct xfs_dinode *dip)
+xchk_inode_xref_bmap(
+ struct xfs_scrub *sc,
+ struct xfs_dinode *dip)
{
- xfs_extnum_t nextents;
- xfs_filblks_t count;
- xfs_filblks_t acount;
- int error;
+ xfs_extnum_t nextents;
+ xfs_filblks_t count;
+ xfs_filblks_t acount;
+ int error;
- if (xfs_scrub_skip_xref(sc->sm))
+ if (xchk_skip_xref(sc->sm))
return;
/* Walk all the extents to check nextents/naextents/nblocks. */
error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK,
&nextents, &count);
- if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ if (!xchk_should_check_xref(sc, &error, NULL))
return;
if (nextents < be32_to_cpu(dip->di_nextents))
- xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK,
&nextents, &acount);
- if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ if (!xchk_should_check_xref(sc, &error, NULL))
return;
if (nextents != be16_to_cpu(dip->di_anextents))
- xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
/* Check nblocks against the inode. */
if (count + acount != be64_to_cpu(dip->di_nblocks))
- xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino);
+ xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_inode_xref(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino,
- struct xfs_dinode *dip)
+xchk_inode_xref(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino,
+ struct xfs_dinode *dip)
{
- struct xfs_owner_info oinfo;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- int error;
+ struct xfs_owner_info oinfo;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ int error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
@@ -518,18 +518,18 @@ xfs_scrub_inode_xref(
agno = XFS_INO_TO_AGNO(sc->mp, ino);
agbno = XFS_INO_TO_AGBNO(sc->mp, ino);
- error = xfs_scrub_ag_init(sc, agno, &sc->sa);
- if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ error = xchk_ag_init(sc, agno, &sc->sa);
+ if (!xchk_xref_process_error(sc, agno, agbno, &error))
return;
- xfs_scrub_xref_is_used_space(sc, agbno, 1);
- xfs_scrub_inode_xref_finobt(sc, ino);
+ xchk_xref_is_used_space(sc, agbno, 1);
+ xchk_inode_xref_finobt(sc, ino);
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
- xfs_scrub_xref_is_not_shared(sc, agbno, 1);
- xfs_scrub_inode_xref_bmap(sc, dip);
+ xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_inode_xref_bmap(sc, dip);
- xfs_scrub_ag_free(sc, &sc->sa);
+ xchk_ag_free(sc, &sc->sa);
}
/*
@@ -539,35 +539,35 @@ xfs_scrub_inode_xref(
* reflink filesystem.
*/
static void
-xfs_scrub_inode_check_reflink_iflag(
- struct xfs_scrub_context *sc,
- xfs_ino_t ino)
+xchk_inode_check_reflink_iflag(
+ struct xfs_scrub *sc,
+ xfs_ino_t ino)
{
- struct xfs_mount *mp = sc->mp;
- bool has_shared;
- int error;
+ struct xfs_mount *mp = sc->mp;
+ bool has_shared;
+ int error;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return;
error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
&has_shared);
- if (!xfs_scrub_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
+ if (!xchk_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
XFS_INO_TO_AGBNO(mp, ino), &error))
return;
if (xfs_is_reflink_inode(sc->ip) && !has_shared)
- xfs_scrub_ino_set_preen(sc, ino);
+ xchk_ino_set_preen(sc, ino);
else if (!xfs_is_reflink_inode(sc->ip) && has_shared)
- xfs_scrub_ino_set_corrupt(sc, ino);
+ xchk_ino_set_corrupt(sc, ino);
}
/* Scrub an inode. */
int
-xfs_scrub_inode(
- struct xfs_scrub_context *sc)
+xchk_inode(
+ struct xfs_scrub *sc)
{
- struct xfs_dinode di;
- int error = 0;
+ struct xfs_dinode di;
+ int error = 0;
/*
* If sc->ip is NULL, that means that the setup function called
@@ -575,13 +575,13 @@ xfs_scrub_inode(
* and a NULL inode, so flag the corruption error and return.
*/
if (!sc->ip) {
- xfs_scrub_ino_set_corrupt(sc, sc->sm->sm_ino);
+ xchk_ino_set_corrupt(sc, sc->sm->sm_ino);
return 0;
}
/* Scrub the inode core. */
xfs_inode_to_disk(sc->ip, &di, 0);
- xfs_scrub_dinode(sc, &di, sc->ip->i_ino);
+ xchk_dinode(sc, &di, sc->ip->i_ino);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -591,9 +591,9 @@ xfs_scrub_inode(
* we scrubbed the dinode.
*/
if (S_ISREG(VFS_I(sc->ip)->i_mode))
- xfs_scrub_inode_check_reflink_iflag(sc, sc->ip->i_ino);
+ xchk_inode_check_reflink_iflag(sc, sc->ip->i_ino);
- xfs_scrub_inode_xref(sc, sc->ip->i_ino, &di);
+ xchk_inode_xref(sc, sc->ip->i_ino, &di);
out:
return error;
}
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index e2bda58c32f0..1c9d7c7f64f5 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -27,36 +27,36 @@
/* Set us up to scrub parents. */
int
-xfs_scrub_setup_parent(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_parent(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, ip, 0);
}
/* Parent pointers */
/* Look for an entry in a parent pointing to this inode. */
-struct xfs_scrub_parent_ctx {
- struct dir_context dc;
- xfs_ino_t ino;
- xfs_nlink_t nlink;
+struct xchk_parent_ctx {
+ struct dir_context dc;
+ xfs_ino_t ino;
+ xfs_nlink_t nlink;
};
/* Look for a single entry in a directory pointing to an inode. */
STATIC int
-xfs_scrub_parent_actor(
- struct dir_context *dc,
- const char *name,
- int namelen,
- loff_t pos,
- u64 ino,
- unsigned type)
+xchk_parent_actor(
+ struct dir_context *dc,
+ const char *name,
+ int namelen,
+ loff_t pos,
+ u64 ino,
+ unsigned type)
{
- struct xfs_scrub_parent_ctx *spc;
+ struct xchk_parent_ctx *spc;
- spc = container_of(dc, struct xfs_scrub_parent_ctx, dc);
+ spc = container_of(dc, struct xchk_parent_ctx, dc);
if (spc->ino == ino)
spc->nlink++;
return 0;
@@ -64,21 +64,21 @@ xfs_scrub_parent_actor(
/* Count the number of dentries in the parent dir that point to this inode. */
STATIC int
-xfs_scrub_parent_count_parent_dentries(
- struct xfs_scrub_context *sc,
- struct xfs_inode *parent,
- xfs_nlink_t *nlink)
+xchk_parent_count_parent_dentries(
+ struct xfs_scrub *sc,
+ struct xfs_inode *parent,
+ xfs_nlink_t *nlink)
{
- struct xfs_scrub_parent_ctx spc = {
- .dc.actor = xfs_scrub_parent_actor,
+ struct xchk_parent_ctx spc = {
+ .dc.actor = xchk_parent_actor,
.dc.pos = 0,
.ino = sc->ip->i_ino,
.nlink = 0,
};
- size_t bufsize;
- loff_t oldpos;
- uint lock_mode;
- int error = 0;
+ size_t bufsize;
+ loff_t oldpos;
+ uint lock_mode;
+ int error = 0;
/*
* If there are any blocks, read-ahead block 0 as we're almost
@@ -120,16 +120,16 @@ out:
* entry pointing back to the inode being scrubbed.
*/
STATIC int
-xfs_scrub_parent_validate(
- struct xfs_scrub_context *sc,
- xfs_ino_t dnum,
- bool *try_again)
+xchk_parent_validate(
+ struct xfs_scrub *sc,
+ xfs_ino_t dnum,
+ bool *try_again)
{
- struct xfs_mount *mp = sc->mp;
- struct xfs_inode *dp = NULL;
- xfs_nlink_t expected_nlink;
- xfs_nlink_t nlink;
- int error = 0;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *dp = NULL;
+ xfs_nlink_t expected_nlink;
+ xfs_nlink_t nlink;
+ int error = 0;
*try_again = false;
@@ -138,7 +138,7 @@ xfs_scrub_parent_validate(
/* '..' must not point to ourselves. */
if (sc->ip->i_ino == dnum) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
@@ -165,13 +165,13 @@ xfs_scrub_parent_validate(
error = xfs_iget(mp, sc->tp, dnum, XFS_IGET_UNTRUSTED, 0, &dp);
if (error == -EINVAL) {
error = -EFSCORRUPTED;
- xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error);
+ xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error);
goto out;
}
- if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_rele;
}
@@ -183,12 +183,12 @@ xfs_scrub_parent_validate(
* the child inodes.
*/
if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) {
- error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
- if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
+ error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
+ if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
&error))
goto out_unlock;
if (nlink != expected_nlink)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_unlock;
}
@@ -200,18 +200,18 @@ xfs_scrub_parent_validate(
*/
xfs_iunlock(sc->ip, sc->ilock_flags);
sc->ilock_flags = 0;
- error = xfs_scrub_ilock_inverted(dp, XFS_IOLOCK_SHARED);
+ error = xchk_ilock_inverted(dp, XFS_IOLOCK_SHARED);
if (error)
goto out_rele;
/* Go looking for our dentry. */
- error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
- if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
+ error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
+ if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_unlock;
/* Drop the parent lock, relock this inode. */
xfs_iunlock(dp, XFS_IOLOCK_SHARED);
- error = xfs_scrub_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL);
+ error = xchk_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL);
if (error)
goto out_rele;
sc->ilock_flags = XFS_IOLOCK_EXCL;
@@ -225,43 +225,43 @@ xfs_scrub_parent_validate(
/* Look up '..' to see if the inode changed. */
error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_rele;
/* Drat, parent changed. Try again! */
if (dnum != dp->i_ino) {
- iput(VFS_I(dp));
+ xfs_irele(dp);
*try_again = true;
return 0;
}
- iput(VFS_I(dp));
+ xfs_irele(dp);
/*
* '..' didn't change, so check that there was only one entry
* for us in the parent.
*/
if (nlink != expected_nlink)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return error;
out_unlock:
xfs_iunlock(dp, XFS_IOLOCK_SHARED);
out_rele:
- iput(VFS_I(dp));
+ xfs_irele(dp);
out:
return error;
}
/* Scrub a parent pointer. */
int
-xfs_scrub_parent(
- struct xfs_scrub_context *sc)
+xchk_parent(
+ struct xfs_scrub *sc)
{
- struct xfs_mount *mp = sc->mp;
- xfs_ino_t dnum;
- bool try_again;
- int tries = 0;
- int error = 0;
+ struct xfs_mount *mp = sc->mp;
+ xfs_ino_t dnum;
+ bool try_again;
+ int tries = 0;
+ int error = 0;
/*
* If we're a directory, check that the '..' link points up to
@@ -272,7 +272,7 @@ xfs_scrub_parent(
/* We're not a special inode, are we? */
if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
@@ -288,10 +288,10 @@ xfs_scrub_parent(
/* Look up '..' */
error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
if (!xfs_verify_dir_ino(mp, dnum)) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
@@ -299,12 +299,12 @@ xfs_scrub_parent(
if (sc->ip == mp->m_rootip) {
if (sc->ip->i_ino != mp->m_sb.sb_rootino ||
sc->ip->i_ino != dnum)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
do {
- error = xfs_scrub_parent_validate(sc, dnum, &try_again);
+ error = xchk_parent_validate(sc, dnum, &try_again);
if (error)
goto out;
} while (try_again && ++tries < 20);
@@ -314,7 +314,7 @@ xfs_scrub_parent(
* incomplete. Userspace can decide if it wants to try again.
*/
if (try_again && tries == 20)
- xfs_scrub_set_incomplete(sc);
+ xchk_set_incomplete(sc);
out:
/*
* If we failed to lock the parent inode even after a retry, just mark
@@ -322,7 +322,7 @@ out:
*/
if (sc->try_harder && error == -EDEADLOCK) {
error = 0;
- xfs_scrub_set_incomplete(sc);
+ xchk_set_incomplete(sc);
}
return error;
}
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 6ff906aa0a3b..782d582d3edd 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -30,8 +30,8 @@
/* Convert a scrub type code to a DQ flag, or return 0 if error. */
static inline uint
-xfs_scrub_quota_to_dqtype(
- struct xfs_scrub_context *sc)
+xchk_quota_to_dqtype(
+ struct xfs_scrub *sc)
{
switch (sc->sm->sm_type) {
case XFS_SCRUB_TYPE_UQUOTA:
@@ -47,24 +47,24 @@ xfs_scrub_quota_to_dqtype(
/* Set us up to scrub a quota. */
int
-xfs_scrub_setup_quota(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_quota(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- uint dqtype;
- int error;
+ uint dqtype;
+ int error;
if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
return -ENOENT;
- dqtype = xfs_scrub_quota_to_dqtype(sc);
+ dqtype = xchk_quota_to_dqtype(sc);
if (dqtype == 0)
return -EINVAL;
sc->has_quotaofflock = true;
mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
if (!xfs_this_quota_on(sc->mp, dqtype))
return -ENOENT;
- error = xfs_scrub_setup_fs(sc, ip);
+ error = xchk_setup_fs(sc, ip);
if (error)
return error;
sc->ip = xfs_quota_inode(sc->mp, dqtype);
@@ -75,35 +75,35 @@ xfs_scrub_setup_quota(
/* Quotas. */
-struct xfs_scrub_quota_info {
- struct xfs_scrub_context *sc;
- xfs_dqid_t last_id;
+struct xchk_quota_info {
+ struct xfs_scrub *sc;
+ xfs_dqid_t last_id;
};
/* Scrub the fields in an individual quota item. */
STATIC int
-xfs_scrub_quota_item(
- struct xfs_dquot *dq,
- uint dqtype,
- void *priv)
+xchk_quota_item(
+ struct xfs_dquot *dq,
+ uint dqtype,
+ void *priv)
{
- struct xfs_scrub_quota_info *sqi = priv;
- struct xfs_scrub_context *sc = sqi->sc;
- struct xfs_mount *mp = sc->mp;
- struct xfs_disk_dquot *d = &dq->q_core;
- struct xfs_quotainfo *qi = mp->m_quotainfo;
- xfs_fileoff_t offset;
- unsigned long long bsoft;
- unsigned long long isoft;
- unsigned long long rsoft;
- unsigned long long bhard;
- unsigned long long ihard;
- unsigned long long rhard;
- unsigned long long bcount;
- unsigned long long icount;
- unsigned long long rcount;
- xfs_ino_t fs_icount;
- xfs_dqid_t id = be32_to_cpu(d->d_id);
+ struct xchk_quota_info *sqi = priv;
+ struct xfs_scrub *sc = sqi->sc;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_disk_dquot *d = &dq->q_core;
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ xfs_fileoff_t offset;
+ unsigned long long bsoft;
+ unsigned long long isoft;
+ unsigned long long rsoft;
+ unsigned long long bhard;
+ unsigned long long ihard;
+ unsigned long long rhard;
+ unsigned long long bcount;
+ unsigned long long icount;
+ unsigned long long rcount;
+ xfs_ino_t fs_icount;
+ xfs_dqid_t id = be32_to_cpu(d->d_id);
/*
* Except for the root dquot, the actual dquot we got must either have
@@ -111,16 +111,16 @@ xfs_scrub_quota_item(
*/
offset = id / qi->qi_dqperchunk;
if (id && id <= sqi->last_id)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
sqi->last_id = id;
/* Did we get the dquot type we wanted? */
if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the limits. */
bhard = be64_to_cpu(d->d_blk_hardlimit);
@@ -140,19 +140,19 @@ xfs_scrub_quota_item(
* the hard limit.
*/
if (bhard > mp->m_sb.sb_dblocks)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (bsoft > bhard)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (ihard > mp->m_maxicount)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (isoft > ihard)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
if (rhard > mp->m_sb.sb_rblocks)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (rsoft > rhard)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the resource counts. */
bcount = be64_to_cpu(d->d_bcount);
@@ -167,15 +167,15 @@ xfs_scrub_quota_item(
*/
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
if (mp->m_sb.sb_dblocks < bcount)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK,
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK,
offset);
} else {
if (mp->m_sb.sb_dblocks < bcount)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
offset);
}
if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/*
* We can violate the hard limits if the admin suddenly sets a
@@ -183,29 +183,29 @@ xfs_scrub_quota_item(
* admin review.
*/
if (id != 0 && bhard != 0 && bcount > bhard)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (id != 0 && ihard != 0 && icount > ihard)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
if (id != 0 && rhard != 0 && rcount > rhard)
- xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
return 0;
}
/* Check the quota's data fork. */
STATIC int
-xfs_scrub_quota_data_fork(
- struct xfs_scrub_context *sc)
+xchk_quota_data_fork(
+ struct xfs_scrub *sc)
{
- struct xfs_bmbt_irec irec = { 0 };
- struct xfs_iext_cursor icur;
- struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
- struct xfs_ifork *ifp;
- xfs_fileoff_t max_dqid_off;
- int error = 0;
+ struct xfs_bmbt_irec irec = { 0 };
+ struct xfs_iext_cursor icur;
+ struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
+ struct xfs_ifork *ifp;
+ xfs_fileoff_t max_dqid_off;
+ int error = 0;
/* Invoke the fork scrubber. */
- error = xfs_scrub_metadata_inode_forks(sc);
+ error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
@@ -213,7 +213,7 @@ xfs_scrub_quota_data_fork(
max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
for_each_xfs_iext(ifp, &icur, &irec) {
- if (xfs_scrub_should_terminate(sc, &error))
+ if (xchk_should_terminate(sc, &error))
break;
/*
* delalloc extents or blocks mapped above the highest
@@ -222,7 +222,7 @@ xfs_scrub_quota_data_fork(
if (isnullstartblock(irec.br_startblock) ||
irec.br_startoff > max_dqid_off ||
irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
irec.br_startoff);
break;
}
@@ -233,19 +233,19 @@ xfs_scrub_quota_data_fork(
/* Scrub all of a quota type's items. */
int
-xfs_scrub_quota(
- struct xfs_scrub_context *sc)
+xchk_quota(
+ struct xfs_scrub *sc)
{
- struct xfs_scrub_quota_info sqi;
- struct xfs_mount *mp = sc->mp;
- struct xfs_quotainfo *qi = mp->m_quotainfo;
- uint dqtype;
- int error = 0;
+ struct xchk_quota_info sqi;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ uint dqtype;
+ int error = 0;
- dqtype = xfs_scrub_quota_to_dqtype(sc);
+ dqtype = xchk_quota_to_dqtype(sc);
/* Look for problem extents. */
- error = xfs_scrub_quota_data_fork(sc);
+ error = xchk_quota_data_fork(sc);
if (error)
goto out;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
@@ -260,10 +260,10 @@ xfs_scrub_quota(
sc->ilock_flags = 0;
sqi.sc = sc;
sqi.last_id = 0;
- error = xfs_qm_dqiterate(mp, dqtype, xfs_scrub_quota_item, &sqi);
+ error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi);
sc->ilock_flags = XFS_ILOCK_EXCL;
xfs_ilock(sc->ip, sc->ilock_flags);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK,
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK,
sqi.last_id * qi->qi_dqperchunk, &error))
goto out;
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 607a9faa8ecc..e8c82b026083 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -28,11 +28,11 @@
* Set us up to scrub reference count btrees.
*/
int
-xfs_scrub_setup_ag_refcountbt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_ag_refcountbt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_ag_btree(sc, ip, false);
+ return xchk_setup_ag_btree(sc, ip, false);
}
/* Reference count btree scrubber. */
@@ -73,22 +73,22 @@ xfs_scrub_setup_ag_refcountbt(
* If the refcount is correct, all the check conditions in the algorithm
* should always hold true. If not, the refcount is incorrect.
*/
-struct xfs_scrub_refcnt_frag {
- struct list_head list;
- struct xfs_rmap_irec rm;
+struct xchk_refcnt_frag {
+ struct list_head list;
+ struct xfs_rmap_irec rm;
};
-struct xfs_scrub_refcnt_check {
- struct xfs_scrub_context *sc;
- struct list_head fragments;
+struct xchk_refcnt_check {
+ struct xfs_scrub *sc;
+ struct list_head fragments;
/* refcount extent we're examining */
- xfs_agblock_t bno;
- xfs_extlen_t len;
- xfs_nlink_t refcount;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_nlink_t refcount;
/* number of owners seen */
- xfs_nlink_t seen;
+ xfs_nlink_t seen;
};
/*
@@ -99,18 +99,18 @@ struct xfs_scrub_refcnt_check {
* fragments as the refcountbt says we should have.
*/
STATIC int
-xfs_scrub_refcountbt_rmap_check(
+xchk_refcountbt_rmap_check(
struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec,
void *priv)
{
- struct xfs_scrub_refcnt_check *refchk = priv;
- struct xfs_scrub_refcnt_frag *frag;
+ struct xchk_refcnt_check *refchk = priv;
+ struct xchk_refcnt_frag *frag;
xfs_agblock_t rm_last;
xfs_agblock_t rc_last;
int error = 0;
- if (xfs_scrub_should_terminate(refchk->sc, &error))
+ if (xchk_should_terminate(refchk->sc, &error))
return error;
rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
@@ -118,7 +118,7 @@ xfs_scrub_refcountbt_rmap_check(
/* Confirm that a single-owner refc extent is a CoW stage. */
if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
- xfs_scrub_btree_xref_set_corrupt(refchk->sc, cur, 0);
+ xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
return 0;
}
@@ -135,7 +135,7 @@ xfs_scrub_refcountbt_rmap_check(
* is healthy each rmap_irec we see will be in agbno order
* so we don't need insertion sort here.
*/
- frag = kmem_alloc(sizeof(struct xfs_scrub_refcnt_frag),
+ frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
KM_MAYFAIL);
if (!frag)
return -ENOMEM;
@@ -154,12 +154,12 @@ xfs_scrub_refcountbt_rmap_check(
* we have a refcountbt error.
*/
STATIC void
-xfs_scrub_refcountbt_process_rmap_fragments(
- struct xfs_scrub_refcnt_check *refchk)
+xchk_refcountbt_process_rmap_fragments(
+ struct xchk_refcnt_check *refchk)
{
struct list_head worklist;
- struct xfs_scrub_refcnt_frag *frag;
- struct xfs_scrub_refcnt_frag *n;
+ struct xchk_refcnt_frag *frag;
+ struct xchk_refcnt_frag *n;
xfs_agblock_t bno;
xfs_agblock_t rbno;
xfs_agblock_t next_rbno;
@@ -277,13 +277,13 @@ done:
/* Use the rmap entries covering this extent to verify the refcount. */
STATIC void
-xfs_scrub_refcountbt_xref_rmap(
- struct xfs_scrub_context *sc,
+xchk_refcountbt_xref_rmap(
+ struct xfs_scrub *sc,
xfs_agblock_t bno,
xfs_extlen_t len,
xfs_nlink_t refcount)
{
- struct xfs_scrub_refcnt_check refchk = {
+ struct xchk_refcnt_check refchk = {
.sc = sc,
.bno = bno,
.len = len,
@@ -292,11 +292,11 @@ xfs_scrub_refcountbt_xref_rmap(
};
struct xfs_rmap_irec low;
struct xfs_rmap_irec high;
- struct xfs_scrub_refcnt_frag *frag;
- struct xfs_scrub_refcnt_frag *n;
+ struct xchk_refcnt_frag *frag;
+ struct xchk_refcnt_frag *n;
int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Cross-reference with the rmapbt to confirm the refcount. */
@@ -307,13 +307,13 @@ xfs_scrub_refcountbt_xref_rmap(
INIT_LIST_HEAD(&refchk.fragments);
error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
- &xfs_scrub_refcountbt_rmap_check, &refchk);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ &xchk_refcountbt_rmap_check, &refchk);
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
goto out_free;
- xfs_scrub_refcountbt_process_rmap_fragments(&refchk);
+ xchk_refcountbt_process_rmap_fragments(&refchk);
if (refcount != refchk.seen)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
out_free:
list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
@@ -324,34 +324,34 @@ out_free:
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_refcountbt_xref(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len,
- xfs_nlink_t refcount)
+xchk_refcountbt_xref(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ xfs_nlink_t refcount)
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, len);
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
- xfs_scrub_refcountbt_xref_rmap(sc, agbno, len, refcount);
+ xchk_xref_is_used_space(sc, agbno, len);
+ xchk_xref_is_not_inode_chunk(sc, agbno, len);
+ xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
}
/* Scrub a refcountbt record. */
STATIC int
-xfs_scrub_refcountbt_rec(
- struct xfs_scrub_btree *bs,
- union xfs_btree_rec *rec)
+xchk_refcountbt_rec(
+ struct xchk_btree *bs,
+ union xfs_btree_rec *rec)
{
- struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_agblock_t *cow_blocks = bs->private;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
- xfs_agblock_t bno;
- xfs_extlen_t len;
- xfs_nlink_t refcount;
- bool has_cowflag;
- int error = 0;
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agblock_t *cow_blocks = bs->private;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_nlink_t refcount;
+ bool has_cowflag;
+ int error = 0;
bno = be32_to_cpu(rec->refc.rc_startblock);
len = be32_to_cpu(rec->refc.rc_blockcount);
@@ -360,7 +360,7 @@ xfs_scrub_refcountbt_rec(
/* Only CoW records can have refcount == 1. */
has_cowflag = (bno & XFS_REFC_COW_START);
if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (has_cowflag)
(*cow_blocks) += len;
@@ -369,75 +369,75 @@ xfs_scrub_refcountbt_rec(
if (bno + len <= bno ||
!xfs_verify_agbno(mp, agno, bno) ||
!xfs_verify_agbno(mp, agno, bno + len - 1))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (refcount == 0)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- xfs_scrub_refcountbt_xref(bs->sc, bno, len, refcount);
+ xchk_refcountbt_xref(bs->sc, bno, len, refcount);
return error;
}
/* Make sure we have as many refc blocks as the rmap says. */
STATIC void
-xfs_scrub_refcount_xref_rmap(
- struct xfs_scrub_context *sc,
- struct xfs_owner_info *oinfo,
- xfs_filblks_t cow_blocks)
+xchk_refcount_xref_rmap(
+ struct xfs_scrub *sc,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t cow_blocks)
{
- xfs_extlen_t refcbt_blocks = 0;
- xfs_filblks_t blocks;
- int error;
+ xfs_extlen_t refcbt_blocks = 0;
+ xfs_filblks_t blocks;
+ int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Check that we saw as many refcbt blocks as the rmap knows about. */
error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
- if (!xfs_scrub_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
+ if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
return;
- error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
&blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != refcbt_blocks)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
/* Check that we saw as many cow blocks as the rmap knows about. */
xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
- error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
&blocks);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != cow_blocks)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* Scrub the refcount btree for some AG. */
int
-xfs_scrub_refcountbt(
- struct xfs_scrub_context *sc)
+xchk_refcountbt(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
- xfs_agblock_t cow_blocks = 0;
- int error;
+ struct xfs_owner_info oinfo;
+ xfs_agblock_t cow_blocks = 0;
+ int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
- error = xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
+ error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
&oinfo, &cow_blocks);
if (error)
return error;
- xfs_scrub_refcount_xref_rmap(sc, &oinfo, cow_blocks);
+ xchk_refcount_xref_rmap(sc, &oinfo, cow_blocks);
return 0;
}
/* xref check that a cow staging extent is marked in the refcountbt. */
void
-xfs_scrub_xref_is_cow_staging(
- struct xfs_scrub_context *sc,
+xchk_xref_is_cow_staging(
+ struct xfs_scrub *sc,
xfs_agblock_t agbno,
xfs_extlen_t len)
{
@@ -446,35 +446,35 @@ xfs_scrub_xref_is_cow_staging(
int has_refcount;
int error;
- if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
/* Find the CoW staging extent. */
error = xfs_refcount_lookup_le(sc->sa.refc_cur,
agbno + XFS_REFC_COW_START, &has_refcount);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (!has_refcount) {
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
return;
}
error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (!has_refcount) {
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
return;
}
/* CoW flag must be set, refcount must be 1. */
has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
if (!has_cowflag || rc.rc_refcount != 1)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
/* Must be at least as long as what was passed in */
if (rc.rc_blockcount < len)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
/*
@@ -482,20 +482,20 @@ xfs_scrub_xref_is_cow_staging(
* can have multiple owners.
*/
void
-xfs_scrub_xref_is_not_shared(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno,
- xfs_extlen_t len)
+xchk_xref_is_not_shared(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
{
- bool shared;
- int error;
+ bool shared;
+ int error;
- if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (shared)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 326be4e8b71e..9f08dd9bf1d5 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -34,6 +34,7 @@
#include "scrub/common.h"
#include "scrub/trace.h"
#include "scrub/repair.h"
+#include "scrub/bitmap.h"
/*
* Attempt to repair some metadata, if the metadata is corrupt and userspace
@@ -41,21 +42,21 @@
* and will set *fixed to true if it thinks it repaired anything.
*/
int
-xfs_repair_attempt(
- struct xfs_inode *ip,
- struct xfs_scrub_context *sc,
- bool *fixed)
+xrep_attempt(
+ struct xfs_inode *ip,
+ struct xfs_scrub *sc,
+ bool *fixed)
{
- int error = 0;
+ int error = 0;
- trace_xfs_repair_attempt(ip, sc->sm, error);
+ trace_xrep_attempt(ip, sc->sm, error);
- xfs_scrub_ag_btcur_free(&sc->sa);
+ xchk_ag_btcur_free(&sc->sa);
/* Repair whatever's broken. */
ASSERT(sc->ops->repair);
error = sc->ops->repair(sc);
- trace_xfs_repair_done(ip, sc->sm, error);
+ trace_xrep_done(ip, sc->sm, error);
switch (error) {
case 0:
/*
@@ -93,8 +94,8 @@ xfs_repair_attempt(
* structure to track rate limiting information.
*/
void
-xfs_repair_failure(
- struct xfs_mount *mp)
+xrep_failure(
+ struct xfs_mount *mp)
{
xfs_alert_ratelimited(mp,
"Corruption not fixed during online repair. Unmount and run xfs_repair.");
@@ -105,12 +106,12 @@ xfs_repair_failure(
* given mountpoint.
*/
int
-xfs_repair_probe(
- struct xfs_scrub_context *sc)
+xrep_probe(
+ struct xfs_scrub *sc)
{
- int error = 0;
+ int error = 0;
- if (xfs_scrub_should_terminate(sc, &error))
+ if (xchk_should_terminate(sc, &error))
return error;
return 0;
@@ -121,15 +122,18 @@ xfs_repair_probe(
* the btree cursors.
*/
int
-xfs_repair_roll_ag_trans(
- struct xfs_scrub_context *sc)
+xrep_roll_ag_trans(
+ struct xfs_scrub *sc)
{
- int error;
+ int error;
/* Keep the AG header buffers locked so we can keep going. */
- xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
- xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
- xfs_trans_bhold(sc->tp, sc->sa.agfl_bp);
+ if (sc->sa.agi_bp)
+ xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
+ if (sc->sa.agf_bp)
+ xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
+ if (sc->sa.agfl_bp)
+ xfs_trans_bhold(sc->tp, sc->sa.agfl_bp);
/* Roll the transaction. */
error = xfs_trans_roll(&sc->tp);
@@ -137,9 +141,12 @@ xfs_repair_roll_ag_trans(
goto out_release;
/* Join AG headers to the new transaction. */
- xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
- xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
- xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp);
+ if (sc->sa.agi_bp)
+ xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
+ if (sc->sa.agf_bp)
+ xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
+ if (sc->sa.agfl_bp)
+ xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp);
return 0;
@@ -149,9 +156,12 @@ out_release:
* buffers will be released during teardown on our way out
* of the kernel.
*/
- xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp);
- xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp);
- xfs_trans_bhold_release(sc->tp, sc->sa.agfl_bp);
+ if (sc->sa.agi_bp)
+ xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp);
+ if (sc->sa.agf_bp)
+ xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp);
+ if (sc->sa.agfl_bp)
+ xfs_trans_bhold_release(sc->tp, sc->sa.agfl_bp);
return error;
}
@@ -162,10 +172,10 @@ out_release:
* in AG reservations) to construct a whole btree.
*/
bool
-xfs_repair_ag_has_space(
- struct xfs_perag *pag,
- xfs_extlen_t nr_blocks,
- enum xfs_ag_resv_type type)
+xrep_ag_has_space(
+ struct xfs_perag *pag,
+ xfs_extlen_t nr_blocks,
+ enum xfs_ag_resv_type type)
{
return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
!xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
@@ -178,15 +188,15 @@ xfs_repair_ag_has_space(
* any type of per-AG btree.
*/
xfs_extlen_t
-xfs_repair_calc_ag_resblks(
- struct xfs_scrub_context *sc)
+xrep_calc_ag_resblks(
+ struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
struct xfs_scrub_metadata *sm = sc->sm;
struct xfs_perag *pag;
struct xfs_buf *bp;
- xfs_agino_t icount = 0;
- xfs_extlen_t aglen = 0;
+ xfs_agino_t icount = NULLAGINO;
+ xfs_extlen_t aglen = NULLAGBLOCK;
xfs_extlen_t usedlen;
xfs_extlen_t freelen;
xfs_extlen_t bnobt_sz;
@@ -198,20 +208,14 @@ xfs_repair_calc_ag_resblks(
if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
return 0;
- /* Use in-core counters if possible. */
pag = xfs_perag_get(mp, sm->sm_agno);
- if (pag->pagi_init)
+ if (pag->pagi_init) {
+ /* Use in-core icount if possible. */
icount = pag->pagi_count;
-
- /*
- * Otherwise try to get the actual counters from disk; if not, make
- * some worst case assumptions.
- */
- if (icount == 0) {
+ } else {
+ /* Try to get the actual counters from disk. */
error = xfs_ialloc_read_agi(mp, NULL, sm->sm_agno, &bp);
- if (error) {
- icount = mp->m_sb.sb_agblocks / mp->m_sb.sb_inopblock;
- } else {
+ if (!error) {
icount = pag->pagi_count;
xfs_buf_relse(bp);
}
@@ -219,19 +223,33 @@ xfs_repair_calc_ag_resblks(
/* Now grab the block counters from the AGF. */
error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp);
- if (error) {
- aglen = mp->m_sb.sb_agblocks;
- freelen = aglen;
- usedlen = aglen;
- } else {
+ if (!error) {
aglen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_length);
- freelen = pag->pagf_freeblks;
+ freelen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_freeblks);
usedlen = aglen - freelen;
xfs_buf_relse(bp);
}
xfs_perag_put(pag);
- trace_xfs_repair_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
+ /* If the icount is impossible, make some worst-case assumptions. */
+ if (icount == NULLAGINO ||
+ !xfs_verify_agino(mp, sm->sm_agno, icount)) {
+ xfs_agino_t first, last;
+
+ xfs_agino_range(mp, sm->sm_agno, &first, &last);
+ icount = last - first + 1;
+ }
+
+ /* If the block counts are impossible, make worst-case assumptions. */
+ if (aglen == NULLAGBLOCK ||
+ aglen != xfs_ag_block_count(mp, sm->sm_agno) ||
+ freelen >= aglen) {
+ aglen = xfs_ag_block_count(mp, sm->sm_agno);
+ freelen = aglen;
+ usedlen = aglen;
+ }
+
+ trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
freelen, usedlen);
/*
@@ -270,7 +288,7 @@ xfs_repair_calc_ag_resblks(
rmapbt_sz = 0;
}
- trace_xfs_repair_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
+ trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
inobt_sz, rmapbt_sz, refcbt_sz);
return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
@@ -278,15 +296,15 @@ xfs_repair_calc_ag_resblks(
/* Allocate a block in an AG. */
int
-xfs_repair_alloc_ag_block(
- struct xfs_scrub_context *sc,
- struct xfs_owner_info *oinfo,
- xfs_fsblock_t *fsbno,
- enum xfs_ag_resv_type resv)
+xrep_alloc_ag_block(
+ struct xfs_scrub *sc,
+ struct xfs_owner_info *oinfo,
+ xfs_fsblock_t *fsbno,
+ enum xfs_ag_resv_type resv)
{
- struct xfs_alloc_arg args = {0};
- xfs_agblock_t bno;
- int error;
+ struct xfs_alloc_arg args = {0};
+ xfs_agblock_t bno;
+ int error;
switch (resv) {
case XFS_AG_RESV_AGFL:
@@ -329,8 +347,8 @@ xfs_repair_alloc_ag_block(
/* Initialize a new AG btree root block with zero entries. */
int
-xfs_repair_init_btblock(
- struct xfs_scrub_context *sc,
+xrep_init_btblock(
+ struct xfs_scrub *sc,
xfs_fsblock_t fsb,
struct xfs_buf **bpp,
xfs_btnum_t btnum,
@@ -340,7 +358,7 @@ xfs_repair_init_btblock(
struct xfs_mount *mp = sc->mp;
struct xfs_buf *bp;
- trace_xfs_repair_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
+ trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
XFS_FSB_TO_AGBNO(mp, fsb), btnum);
ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
@@ -367,222 +385,29 @@ xfs_repair_init_btblock(
*
* However, that leaves the matter of removing all the metadata describing the
* old broken structure. For primary metadata we use the rmap data to collect
- * every extent with a matching rmap owner (exlist); we then iterate all other
+ * every extent with a matching rmap owner (bitmap); we then iterate all other
* metadata structures with the same rmap owner to collect the extents that
- * cannot be removed (sublist). We then subtract sublist from exlist to
+ * cannot be removed (sublist). We then subtract sublist from bitmap to
* derive the blocks that were used by the old btree. These blocks can be
* reaped.
*
* For rmapbt reconstructions we must use different tactics for extent
* collection. First we iterate all primary metadata (this excludes the old
* rmapbt, obviously) to generate new rmap records. The gaps in the rmap
- * records are collected as exlist. The bnobt records are collected as
- * sublist. As with the other btrees we subtract sublist from exlist, and the
+ * records are collected as bitmap. The bnobt records are collected as
+ * sublist. As with the other btrees we subtract sublist from bitmap, and the
* result (since the rmapbt lives in the free space) are the blocks from the
* old rmapbt.
- */
-
-/* Collect a dead btree extent for later disposal. */
-int
-xfs_repair_collect_btree_extent(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
- xfs_fsblock_t fsbno,
- xfs_extlen_t len)
-{
- struct xfs_repair_extent *rex;
-
- trace_xfs_repair_collect_btree_extent(sc->mp,
- XFS_FSB_TO_AGNO(sc->mp, fsbno),
- XFS_FSB_TO_AGBNO(sc->mp, fsbno), len);
-
- rex = kmem_alloc(sizeof(struct xfs_repair_extent), KM_MAYFAIL);
- if (!rex)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&rex->list);
- rex->fsbno = fsbno;
- rex->len = len;
- list_add_tail(&rex->list, &exlist->list);
-
- return 0;
-}
-
-/*
- * An error happened during the rebuild so the transaction will be cancelled.
- * The fs will shut down, and the administrator has to unmount and run repair.
- * Therefore, free all the memory associated with the list so we can die.
- */
-void
-xfs_repair_cancel_btree_extents(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist)
-{
- struct xfs_repair_extent *rex;
- struct xfs_repair_extent *n;
-
- for_each_xfs_repair_extent_safe(rex, n, exlist) {
- list_del(&rex->list);
- kmem_free(rex);
- }
-}
-
-/* Compare two btree extents. */
-static int
-xfs_repair_btree_extent_cmp(
- void *priv,
- struct list_head *a,
- struct list_head *b)
-{
- struct xfs_repair_extent *ap;
- struct xfs_repair_extent *bp;
-
- ap = container_of(a, struct xfs_repair_extent, list);
- bp = container_of(b, struct xfs_repair_extent, list);
-
- if (ap->fsbno > bp->fsbno)
- return 1;
- if (ap->fsbno < bp->fsbno)
- return -1;
- return 0;
-}
-
-/*
- * Remove all the blocks mentioned in @sublist from the extents in @exlist.
*
- * The intent is that callers will iterate the rmapbt for all of its records
- * for a given owner to generate @exlist; and iterate all the blocks of the
- * metadata structures that are not being rebuilt and have the same rmapbt
- * owner to generate @sublist. This routine subtracts all the extents
- * mentioned in sublist from all the extents linked in @exlist, which leaves
- * @exlist as the list of blocks that are not accounted for, which we assume
- * are the dead blocks of the old metadata structure. The blocks mentioned in
- * @exlist can be reaped.
- */
-#define LEFT_ALIGNED (1 << 0)
-#define RIGHT_ALIGNED (1 << 1)
-int
-xfs_repair_subtract_extents(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
- struct xfs_repair_extent_list *sublist)
-{
- struct list_head *lp;
- struct xfs_repair_extent *ex;
- struct xfs_repair_extent *newex;
- struct xfs_repair_extent *subex;
- xfs_fsblock_t sub_fsb;
- xfs_extlen_t sub_len;
- int state;
- int error = 0;
-
- if (list_empty(&exlist->list) || list_empty(&sublist->list))
- return 0;
- ASSERT(!list_empty(&sublist->list));
-
- list_sort(NULL, &exlist->list, xfs_repair_btree_extent_cmp);
- list_sort(NULL, &sublist->list, xfs_repair_btree_extent_cmp);
-
- /*
- * Now that we've sorted both lists, we iterate exlist once, rolling
- * forward through sublist and/or exlist as necessary until we find an
- * overlap or reach the end of either list. We do not reset lp to the
- * head of exlist nor do we reset subex to the head of sublist. The
- * list traversal is similar to merge sort, but we're deleting
- * instead. In this manner we avoid O(n^2) operations.
- */
- subex = list_first_entry(&sublist->list, struct xfs_repair_extent,
- list);
- lp = exlist->list.next;
- while (lp != &exlist->list) {
- ex = list_entry(lp, struct xfs_repair_extent, list);
-
- /*
- * Advance subex and/or ex until we find a pair that
- * intersect or we run out of extents.
- */
- while (subex->fsbno + subex->len <= ex->fsbno) {
- if (list_is_last(&subex->list, &sublist->list))
- goto out;
- subex = list_next_entry(subex, list);
- }
- if (subex->fsbno >= ex->fsbno + ex->len) {
- lp = lp->next;
- continue;
- }
-
- /* trim subex to fit the extent we have */
- sub_fsb = subex->fsbno;
- sub_len = subex->len;
- if (subex->fsbno < ex->fsbno) {
- sub_len -= ex->fsbno - subex->fsbno;
- sub_fsb = ex->fsbno;
- }
- if (sub_len > ex->len)
- sub_len = ex->len;
-
- state = 0;
- if (sub_fsb == ex->fsbno)
- state |= LEFT_ALIGNED;
- if (sub_fsb + sub_len == ex->fsbno + ex->len)
- state |= RIGHT_ALIGNED;
- switch (state) {
- case LEFT_ALIGNED:
- /* Coincides with only the left. */
- ex->fsbno += sub_len;
- ex->len -= sub_len;
- break;
- case RIGHT_ALIGNED:
- /* Coincides with only the right. */
- ex->len -= sub_len;
- lp = lp->next;
- break;
- case LEFT_ALIGNED | RIGHT_ALIGNED:
- /* Total overlap, just delete ex. */
- lp = lp->next;
- list_del(&ex->list);
- kmem_free(ex);
- break;
- case 0:
- /*
- * Deleting from the middle: add the new right extent
- * and then shrink the left extent.
- */
- newex = kmem_alloc(sizeof(struct xfs_repair_extent),
- KM_MAYFAIL);
- if (!newex) {
- error = -ENOMEM;
- goto out;
- }
- INIT_LIST_HEAD(&newex->list);
- newex->fsbno = sub_fsb + sub_len;
- newex->len = ex->fsbno + ex->len - newex->fsbno;
- list_add(&newex->list, &ex->list);
- ex->len = sub_fsb - ex->fsbno;
- lp = lp->next;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
-
-out:
- return error;
-}
-#undef LEFT_ALIGNED
-#undef RIGHT_ALIGNED
-
-/*
* Disposal of Blocks from Old per-AG Btrees
*
* Now that we've constructed a new btree to replace the damaged one, we want
* to dispose of the blocks that (we think) the old btree was using.
- * Previously, we used the rmapbt to collect the extents (exlist) with the
+ * Previously, we used the rmapbt to collect the extents (bitmap) with the
* rmap owner corresponding to the tree we rebuilt, collected extents for any
* blocks with the same rmap owner that are owned by another data structure
- * (sublist), and subtracted sublist from exlist. In theory the extents
- * remaining in exlist are the old btree's blocks.
+ * (sublist), and subtracted sublist from bitmap. In theory the extents
+ * remaining in bitmap are the old btree's blocks.
*
* Unfortunately, it's possible that the btree was crosslinked with other
* blocks on disk. The rmap data can tell us if there are multiple owners, so
@@ -598,7 +423,7 @@ out:
* If there are no rmap records at all, we also free the block. If the btree
* being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't
* supposed to be a rmap record and everything is ok. For other btrees there
- * had to have been an rmap entry for the block to have ended up on @exlist,
+ * had to have been an rmap entry for the block to have ended up on @bitmap,
* so if it's gone now there's something wrong and the fs will shut down.
*
* Note: If there are multiple rmap records with only the same rmap owner as
@@ -611,7 +436,7 @@ out:
* The caller is responsible for locking the AG headers for the entire rebuild
* operation so that nothing else can sneak in and change the AG state while
* we're not looking. We also assume that the caller already invalidated any
- * buffers associated with @exlist.
+ * buffers associated with @bitmap.
*/
/*
@@ -619,15 +444,14 @@ out:
* is not intended for use with file data repairs; we have bunmapi for that.
*/
int
-xfs_repair_invalidate_blocks(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist)
+xrep_invalidate_blocks(
+ struct xfs_scrub *sc,
+ struct xfs_bitmap *bitmap)
{
- struct xfs_repair_extent *rex;
- struct xfs_repair_extent *n;
- struct xfs_buf *bp;
- xfs_fsblock_t fsbno;
- xfs_agblock_t i;
+ struct xfs_bitmap_range *bmr;
+ struct xfs_bitmap_range *n;
+ struct xfs_buf *bp;
+ xfs_fsblock_t fsbno;
/*
* For each block in each extent, see if there's an incore buffer for
@@ -637,18 +461,16 @@ xfs_repair_invalidate_blocks(
* because we never own those; and if we can't TRYLOCK the buffer we
* assume it's owned by someone else.
*/
- for_each_xfs_repair_extent_safe(rex, n, exlist) {
- for (fsbno = rex->fsbno, i = rex->len; i > 0; fsbno++, i--) {
- /* Skip AG headers and post-EOFS blocks */
- if (!xfs_verify_fsbno(sc->mp, fsbno))
- continue;
- bp = xfs_buf_incore(sc->mp->m_ddev_targp,
- XFS_FSB_TO_DADDR(sc->mp, fsbno),
- XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK);
- if (bp) {
- xfs_trans_bjoin(sc->tp, bp);
- xfs_trans_binval(sc->tp, bp);
- }
+ for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
+ /* Skip AG headers and post-EOFS blocks */
+ if (!xfs_verify_fsbno(sc->mp, fsbno))
+ continue;
+ bp = xfs_buf_incore(sc->mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(sc->mp, fsbno),
+ XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK);
+ if (bp) {
+ xfs_trans_bjoin(sc->tp, bp);
+ xfs_trans_binval(sc->tp, bp);
}
}
@@ -657,11 +479,11 @@ xfs_repair_invalidate_blocks(
/* Ensure the freelist is the correct size. */
int
-xfs_repair_fix_freelist(
- struct xfs_scrub_context *sc,
- bool can_shrink)
+xrep_fix_freelist(
+ struct xfs_scrub *sc,
+ bool can_shrink)
{
- struct xfs_alloc_arg args = {0};
+ struct xfs_alloc_arg args = {0};
args.mp = sc->mp;
args.tp = sc->tp;
@@ -677,15 +499,15 @@ xfs_repair_fix_freelist(
* Put a block back on the AGFL.
*/
STATIC int
-xfs_repair_put_freelist(
- struct xfs_scrub_context *sc,
- xfs_agblock_t agbno)
+xrep_put_freelist(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno)
{
- struct xfs_owner_info oinfo;
- int error;
+ struct xfs_owner_info oinfo;
+ int error;
/* Make sure there's space on the freelist. */
- error = xfs_repair_fix_freelist(sc, true);
+ error = xrep_fix_freelist(sc, true);
if (error)
return error;
@@ -711,20 +533,20 @@ xfs_repair_put_freelist(
return 0;
}
-/* Dispose of a single metadata block. */
+/* Dispose of a single block. */
STATIC int
-xfs_repair_dispose_btree_block(
- struct xfs_scrub_context *sc,
- xfs_fsblock_t fsbno,
- struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type resv)
+xrep_reap_block(
+ struct xfs_scrub *sc,
+ xfs_fsblock_t fsbno,
+ struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type resv)
{
- struct xfs_btree_cur *cur;
- struct xfs_buf *agf_bp = NULL;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- bool has_other_rmap;
- int error;
+ struct xfs_btree_cur *cur;
+ struct xfs_buf *agf_bp = NULL;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ bool has_other_rmap;
+ int error;
agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
@@ -747,9 +569,9 @@ xfs_repair_dispose_btree_block(
/* Can we find any other rmappings? */
error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
+ xfs_btree_del_cursor(cur, error);
if (error)
- goto out_cur;
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ goto out_free;
/*
* If there are other rmappings, this block is cross linked and must
@@ -767,7 +589,7 @@ xfs_repair_dispose_btree_block(
if (has_other_rmap)
error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
else if (resv == XFS_AG_RESV_AGFL)
- error = xfs_repair_put_freelist(sc, agbno);
+ error = xrep_put_freelist(sc, agbno);
else
error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv);
if (agf_bp != sc->sa.agf_bp)
@@ -777,50 +599,43 @@ xfs_repair_dispose_btree_block(
if (sc->ip)
return xfs_trans_roll_inode(&sc->tp, sc->ip);
- return xfs_repair_roll_ag_trans(sc);
+ return xrep_roll_ag_trans(sc);
-out_cur:
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+out_free:
if (agf_bp != sc->sa.agf_bp)
xfs_trans_brelse(sc->tp, agf_bp);
return error;
}
-/* Dispose of btree blocks from an old per-AG btree. */
+/* Dispose of every block of every extent in the bitmap. */
int
-xfs_repair_reap_btree_extents(
- struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
- struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type type)
+xrep_reap_extents(
+ struct xfs_scrub *sc,
+ struct xfs_bitmap *bitmap,
+ struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type type)
{
- struct xfs_repair_extent *rex;
- struct xfs_repair_extent *n;
- int error = 0;
+ struct xfs_bitmap_range *bmr;
+ struct xfs_bitmap_range *n;
+ xfs_fsblock_t fsbno;
+ int error = 0;
ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
- /* Dispose of every block from the old btree. */
- for_each_xfs_repair_extent_safe(rex, n, exlist) {
+ for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) {
ASSERT(sc->ip != NULL ||
- XFS_FSB_TO_AGNO(sc->mp, rex->fsbno) == sc->sa.agno);
-
- trace_xfs_repair_dispose_btree_extent(sc->mp,
- XFS_FSB_TO_AGNO(sc->mp, rex->fsbno),
- XFS_FSB_TO_AGBNO(sc->mp, rex->fsbno), rex->len);
+ XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
+ trace_xrep_dispose_btree_extent(sc->mp,
+ XFS_FSB_TO_AGNO(sc->mp, fsbno),
+ XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
- for (; rex->len > 0; rex->len--, rex->fsbno++) {
- error = xfs_repair_dispose_btree_block(sc, rex->fsbno,
- oinfo, type);
- if (error)
- goto out;
- }
- list_del(&rex->list);
- kmem_free(rex);
+ error = xrep_reap_block(sc, fsbno, oinfo, type);
+ if (error)
+ goto out;
}
out:
- xfs_repair_cancel_btree_extents(sc, exlist);
+ xfs_bitmap_destroy(bitmap);
return error;
}
@@ -832,12 +647,12 @@ out:
* btree roots. This is not guaranteed to work if the AG is heavily damaged
* or the rmap data are corrupt.
*
- * Callers of xfs_repair_find_ag_btree_roots must lock the AGF and AGFL
+ * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
* buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
* AGI is being rebuilt. It must maintain these locks until it's safe for
* other threads to change the btrees' shapes. The caller provides
* information about the btrees to look for by passing in an array of
- * xfs_repair_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
+ * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
* The (root, height) fields will be set on return if anything is found. The
* last element of the array should have a NULL buf_ops to mark the end of the
* array.
@@ -851,30 +666,30 @@ out:
* should be the roots.
*/
-struct xfs_repair_findroot {
- struct xfs_scrub_context *sc;
+struct xrep_findroot {
+ struct xfs_scrub *sc;
struct xfs_buf *agfl_bp;
struct xfs_agf *agf;
- struct xfs_repair_find_ag_btree *btree_info;
+ struct xrep_find_ag_btree *btree_info;
};
/* See if our block is in the AGFL. */
STATIC int
-xfs_repair_findroot_agfl_walk(
- struct xfs_mount *mp,
- xfs_agblock_t bno,
- void *priv)
+xrep_findroot_agfl_walk(
+ struct xfs_mount *mp,
+ xfs_agblock_t bno,
+ void *priv)
{
- xfs_agblock_t *agbno = priv;
+ xfs_agblock_t *agbno = priv;
return (*agbno == bno) ? XFS_BTREE_QUERY_RANGE_ABORT : 0;
}
/* Does this block match the btree information passed in? */
STATIC int
-xfs_repair_findroot_block(
- struct xfs_repair_findroot *ri,
- struct xfs_repair_find_ag_btree *fab,
+xrep_findroot_block(
+ struct xrep_findroot *ri,
+ struct xrep_find_ag_btree *fab,
uint64_t owner,
xfs_agblock_t agbno,
bool *found_it)
@@ -895,7 +710,7 @@ xfs_repair_findroot_block(
*/
if (owner == XFS_RMAP_OWN_AG) {
error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
- xfs_repair_findroot_agfl_walk, &agbno);
+ xrep_findroot_agfl_walk, &agbno);
if (error == XFS_BTREE_QUERY_RANGE_ABORT)
return 0;
if (error)
@@ -933,7 +748,7 @@ xfs_repair_findroot_block(
fab->height = xfs_btree_get_level(btblock) + 1;
*found_it = true;
- trace_xfs_repair_findroot_block(mp, ri->sc->sa.agno, agbno,
+ trace_xrep_findroot_block(mp, ri->sc->sa.agno, agbno,
be32_to_cpu(btblock->bb_magic), fab->height - 1);
out:
xfs_trans_brelse(ri->sc->tp, bp);
@@ -945,13 +760,13 @@ out:
* looking for?
*/
STATIC int
-xfs_repair_findroot_rmap(
+xrep_findroot_rmap(
struct xfs_btree_cur *cur,
struct xfs_rmap_irec *rec,
void *priv)
{
- struct xfs_repair_findroot *ri = priv;
- struct xfs_repair_find_ag_btree *fab;
+ struct xrep_findroot *ri = priv;
+ struct xrep_find_ag_btree *fab;
xfs_agblock_t b;
bool found_it;
int error = 0;
@@ -966,7 +781,7 @@ xfs_repair_findroot_rmap(
for (fab = ri->btree_info; fab->buf_ops; fab++) {
if (rec->rm_owner != fab->rmap_owner)
continue;
- error = xfs_repair_findroot_block(ri, fab,
+ error = xrep_findroot_block(ri, fab,
rec->rm_owner, rec->rm_startblock + b,
&found_it);
if (error)
@@ -981,15 +796,15 @@ xfs_repair_findroot_rmap(
/* Find the roots of the per-AG btrees described in btree_info. */
int
-xfs_repair_find_ag_btree_roots(
- struct xfs_scrub_context *sc,
+xrep_find_ag_btree_roots(
+ struct xfs_scrub *sc,
struct xfs_buf *agf_bp,
- struct xfs_repair_find_ag_btree *btree_info,
+ struct xrep_find_ag_btree *btree_info,
struct xfs_buf *agfl_bp)
{
struct xfs_mount *mp = sc->mp;
- struct xfs_repair_findroot ri;
- struct xfs_repair_find_ag_btree *fab;
+ struct xrep_findroot ri;
+ struct xrep_find_ag_btree *fab;
struct xfs_btree_cur *cur;
int error;
@@ -1008,19 +823,19 @@ xfs_repair_find_ag_btree_roots(
}
cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
- error = xfs_rmap_query_all(cur, xfs_repair_findroot_rmap, &ri);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
+ xfs_btree_del_cursor(cur, error);
return error;
}
/* Force a quotacheck the next time we mount. */
void
-xfs_repair_force_quotacheck(
- struct xfs_scrub_context *sc,
- uint dqtype)
+xrep_force_quotacheck(
+ struct xfs_scrub *sc,
+ uint dqtype)
{
- uint flag;
+ uint flag;
flag = xfs_quota_chkd_flag(dqtype);
if (!(flag & sc->mp->m_qflags))
@@ -1044,10 +859,10 @@ xfs_repair_force_quotacheck(
* repair corruptions in the quota metadata.
*/
int
-xfs_repair_ino_dqattach(
- struct xfs_scrub_context *sc)
+xrep_ino_dqattach(
+ struct xfs_scrub *sc)
{
- int error;
+ int error;
error = xfs_qm_dqattach_locked(sc->ip, false);
switch (error) {
@@ -1058,11 +873,11 @@ xfs_repair_ino_dqattach(
"inode %llu repair encountered quota error %d, quotacheck forced.",
(unsigned long long)sc->ip->i_ino, error);
if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
- xfs_repair_force_quotacheck(sc, XFS_DQ_USER);
+ xrep_force_quotacheck(sc, XFS_DQ_USER);
if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
- xfs_repair_force_quotacheck(sc, XFS_DQ_GROUP);
+ xrep_force_quotacheck(sc, XFS_DQ_GROUP);
if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
- xfs_repair_force_quotacheck(sc, XFS_DQ_PROJ);
+ xrep_force_quotacheck(sc, XFS_DQ_PROJ);
/* fall through */
case -ESRCH:
error = 0;
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index ef47826b6725..9de321eee4ab 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -6,7 +6,7 @@
#ifndef __XFS_SCRUB_REPAIR_H__
#define __XFS_SCRUB_REPAIR_H__
-static inline int xfs_repair_notsupported(struct xfs_scrub_context *sc)
+static inline int xrep_notsupported(struct xfs_scrub *sc)
{
return -EOPNOTSUPP;
}
@@ -15,55 +15,26 @@ static inline int xfs_repair_notsupported(struct xfs_scrub_context *sc)
/* Repair helpers */
-int xfs_repair_attempt(struct xfs_inode *ip, struct xfs_scrub_context *sc,
- bool *fixed);
-void xfs_repair_failure(struct xfs_mount *mp);
-int xfs_repair_roll_ag_trans(struct xfs_scrub_context *sc);
-bool xfs_repair_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks,
+int xrep_attempt(struct xfs_inode *ip, struct xfs_scrub *sc, bool *fixed);
+void xrep_failure(struct xfs_mount *mp);
+int xrep_roll_ag_trans(struct xfs_scrub *sc);
+bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks,
enum xfs_ag_resv_type type);
-xfs_extlen_t xfs_repair_calc_ag_resblks(struct xfs_scrub_context *sc);
-int xfs_repair_alloc_ag_block(struct xfs_scrub_context *sc,
- struct xfs_owner_info *oinfo, xfs_fsblock_t *fsbno,
- enum xfs_ag_resv_type resv);
-int xfs_repair_init_btblock(struct xfs_scrub_context *sc, xfs_fsblock_t fsb,
+xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc);
+int xrep_alloc_ag_block(struct xfs_scrub *sc, struct xfs_owner_info *oinfo,
+ xfs_fsblock_t *fsbno, enum xfs_ag_resv_type resv);
+int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
struct xfs_buf **bpp, xfs_btnum_t btnum,
const struct xfs_buf_ops *ops);
-struct xfs_repair_extent {
- struct list_head list;
- xfs_fsblock_t fsbno;
- xfs_extlen_t len;
-};
-
-struct xfs_repair_extent_list {
- struct list_head list;
-};
-
-static inline void
-xfs_repair_init_extent_list(
- struct xfs_repair_extent_list *exlist)
-{
- INIT_LIST_HEAD(&exlist->list);
-}
+struct xfs_bitmap;
-#define for_each_xfs_repair_extent_safe(rbe, n, exlist) \
- list_for_each_entry_safe((rbe), (n), &(exlist)->list, list)
-int xfs_repair_collect_btree_extent(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *btlist, xfs_fsblock_t fsbno,
- xfs_extlen_t len);
-void xfs_repair_cancel_btree_extents(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *btlist);
-int xfs_repair_subtract_extents(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
- struct xfs_repair_extent_list *sublist);
-int xfs_repair_fix_freelist(struct xfs_scrub_context *sc, bool can_shrink);
-int xfs_repair_invalidate_blocks(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *btlist);
-int xfs_repair_reap_btree_extents(struct xfs_scrub_context *sc,
- struct xfs_repair_extent_list *exlist,
+int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
+int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xfs_bitmap *btlist);
+int xrep_reap_extents(struct xfs_scrub *sc, struct xfs_bitmap *exlist,
struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
-struct xfs_repair_find_ag_btree {
+struct xrep_find_ag_btree {
/* in: rmap owner of the btree we're looking for */
uint64_t rmap_owner;
@@ -78,40 +49,44 @@ struct xfs_repair_find_ag_btree {
unsigned int height;
};
-int xfs_repair_find_ag_btree_roots(struct xfs_scrub_context *sc,
- struct xfs_buf *agf_bp,
- struct xfs_repair_find_ag_btree *btree_info,
- struct xfs_buf *agfl_bp);
-void xfs_repair_force_quotacheck(struct xfs_scrub_context *sc, uint dqtype);
-int xfs_repair_ino_dqattach(struct xfs_scrub_context *sc);
+int xrep_find_ag_btree_roots(struct xfs_scrub *sc, struct xfs_buf *agf_bp,
+ struct xrep_find_ag_btree *btree_info, struct xfs_buf *agfl_bp);
+void xrep_force_quotacheck(struct xfs_scrub *sc, uint dqtype);
+int xrep_ino_dqattach(struct xfs_scrub *sc);
/* Metadata repairers */
-int xfs_repair_probe(struct xfs_scrub_context *sc);
-int xfs_repair_superblock(struct xfs_scrub_context *sc);
+int xrep_probe(struct xfs_scrub *sc);
+int xrep_superblock(struct xfs_scrub *sc);
+int xrep_agf(struct xfs_scrub *sc);
+int xrep_agfl(struct xfs_scrub *sc);
+int xrep_agi(struct xfs_scrub *sc);
#else
-static inline int xfs_repair_attempt(
- struct xfs_inode *ip,
- struct xfs_scrub_context *sc,
- bool *fixed)
+static inline int xrep_attempt(
+ struct xfs_inode *ip,
+ struct xfs_scrub *sc,
+ bool *fixed)
{
return -EOPNOTSUPP;
}
-static inline void xfs_repair_failure(struct xfs_mount *mp) {}
+static inline void xrep_failure(struct xfs_mount *mp) {}
static inline xfs_extlen_t
-xfs_repair_calc_ag_resblks(
- struct xfs_scrub_context *sc)
+xrep_calc_ag_resblks(
+ struct xfs_scrub *sc)
{
ASSERT(!(sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR));
return 0;
}
-#define xfs_repair_probe xfs_repair_notsupported
-#define xfs_repair_superblock xfs_repair_notsupported
+#define xrep_probe xrep_notsupported
+#define xrep_superblock xrep_notsupported
+#define xrep_agf xrep_notsupported
+#define xrep_agfl xrep_notsupported
+#define xrep_agi xrep_notsupported
#endif /* CONFIG_XFS_ONLINE_REPAIR */
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index c6d763236ba7..5e293c129813 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -29,30 +29,30 @@
* Set us up to scrub reverse mapping btrees.
*/
int
-xfs_scrub_setup_ag_rmapbt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_ag_rmapbt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- return xfs_scrub_setup_ag_btree(sc, ip, false);
+ return xchk_setup_ag_btree(sc, ip, false);
}
/* Reverse-mapping scrubber. */
/* Cross-reference a rmap against the refcount btree. */
STATIC void
-xfs_scrub_rmapbt_xref_refc(
- struct xfs_scrub_context *sc,
- struct xfs_rmap_irec *irec)
+xchk_rmapbt_xref_refc(
+ struct xfs_scrub *sc,
+ struct xfs_rmap_irec *irec)
{
- xfs_agblock_t fbno;
- xfs_extlen_t flen;
- bool non_inode;
- bool is_bmbt;
- bool is_attr;
- bool is_unwritten;
- int error;
-
- if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ bool non_inode;
+ bool is_bmbt;
+ bool is_attr;
+ bool is_unwritten;
+ int error;
+
+ if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
@@ -63,58 +63,58 @@ xfs_scrub_rmapbt_xref_refc(
/* If this is shared, must be a data fork extent. */
error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
irec->rm_blockcount, &fbno, &flen, false);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
/* Cross-reference with the other btrees. */
STATIC void
-xfs_scrub_rmapbt_xref(
- struct xfs_scrub_context *sc,
- struct xfs_rmap_irec *irec)
+xchk_rmapbt_xref(
+ struct xfs_scrub *sc,
+ struct xfs_rmap_irec *irec)
{
- xfs_agblock_t agbno = irec->rm_startblock;
- xfs_extlen_t len = irec->rm_blockcount;
+ xfs_agblock_t agbno = irec->rm_startblock;
+ xfs_extlen_t len = irec->rm_blockcount;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
- xfs_scrub_xref_is_used_space(sc, agbno, len);
+ xchk_xref_is_used_space(sc, agbno, len);
if (irec->rm_owner == XFS_RMAP_OWN_INODES)
- xfs_scrub_xref_is_inode_chunk(sc, agbno, len);
+ xchk_xref_is_inode_chunk(sc, agbno, len);
else
- xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
+ xchk_xref_is_not_inode_chunk(sc, agbno, len);
if (irec->rm_owner == XFS_RMAP_OWN_COW)
- xfs_scrub_xref_is_cow_staging(sc, irec->rm_startblock,
+ xchk_xref_is_cow_staging(sc, irec->rm_startblock,
irec->rm_blockcount);
else
- xfs_scrub_rmapbt_xref_refc(sc, irec);
+ xchk_rmapbt_xref_refc(sc, irec);
}
/* Scrub an rmapbt record. */
STATIC int
-xfs_scrub_rmapbt_rec(
- struct xfs_scrub_btree *bs,
- union xfs_btree_rec *rec)
+xchk_rmapbt_rec(
+ struct xchk_btree *bs,
+ union xfs_btree_rec *rec)
{
- struct xfs_mount *mp = bs->cur->bc_mp;
- struct xfs_rmap_irec irec;
- xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
- bool non_inode;
- bool is_unwritten;
- bool is_bmbt;
- bool is_attr;
- int error;
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ struct xfs_rmap_irec irec;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ bool non_inode;
+ bool is_unwritten;
+ bool is_bmbt;
+ bool is_attr;
+ int error;
error = xfs_rmap_btrec_to_irec(rec, &irec);
- if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, 0, &error))
+ if (!xchk_btree_process_error(bs->sc, bs->cur, 0, &error))
goto out;
/* Check extent. */
if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (irec.rm_owner == XFS_RMAP_OWN_FS) {
/*
@@ -124,7 +124,7 @@ xfs_scrub_rmapbt_rec(
*/
if (irec.rm_startblock != 0 ||
irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} else {
/*
* Otherwise we must point somewhere past the static metadata
@@ -133,7 +133,7 @@ xfs_scrub_rmapbt_rec(
if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) ||
!xfs_verify_agbno(mp, agno, irec.rm_startblock +
irec.rm_blockcount - 1))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
}
/* Check flags. */
@@ -143,105 +143,105 @@ xfs_scrub_rmapbt_rec(
is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN;
if (is_bmbt && irec.rm_offset != 0)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (non_inode && irec.rm_offset != 0)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (is_unwritten && (is_bmbt || non_inode || is_attr))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (non_inode && (is_bmbt || is_unwritten || is_attr))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
if (!non_inode) {
if (!xfs_verify_ino(mp, irec.rm_owner))
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
} else {
/* Non-inode owner within the magic values? */
if (irec.rm_owner <= XFS_RMAP_OWN_MIN ||
irec.rm_owner > XFS_RMAP_OWN_FS)
- xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
}
- xfs_scrub_rmapbt_xref(bs->sc, &irec);
+ xchk_rmapbt_xref(bs->sc, &irec);
out:
return error;
}
/* Scrub the rmap btree for some AG. */
int
-xfs_scrub_rmapbt(
- struct xfs_scrub_context *sc)
+xchk_rmapbt(
+ struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
+ struct xfs_owner_info oinfo;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
- return xfs_scrub_btree(sc, sc->sa.rmap_cur, xfs_scrub_rmapbt_rec,
+ return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec,
&oinfo, NULL);
}
/* xref check that the extent is owned by a given owner */
static inline void
-xfs_scrub_xref_check_owner(
- struct xfs_scrub_context *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo,
- bool should_have_rmap)
+xchk_xref_check_owner(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo,
+ bool should_have_rmap)
{
- bool has_rmap;
- int error;
+ bool has_rmap;
+ int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
&has_rmap);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (has_rmap != should_have_rmap)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
/* xref check that the extent is owned by a given owner */
void
-xfs_scrub_xref_is_owned_by(
- struct xfs_scrub_context *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo)
+xchk_xref_is_owned_by(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
{
- xfs_scrub_xref_check_owner(sc, bno, len, oinfo, true);
+ xchk_xref_check_owner(sc, bno, len, oinfo, true);
}
/* xref check that the extent is not owned by a given owner */
void
-xfs_scrub_xref_is_not_owned_by(
- struct xfs_scrub_context *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo)
+xchk_xref_is_not_owned_by(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
{
- xfs_scrub_xref_check_owner(sc, bno, len, oinfo, false);
+ xchk_xref_check_owner(sc, bno, len, oinfo, false);
}
/* xref check that the extent has no reverse mapping at all */
void
-xfs_scrub_xref_has_no_owner(
- struct xfs_scrub_context *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len)
+xchk_xref_has_no_owner(
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len)
{
- bool has_rmap;
- int error;
+ bool has_rmap;
+ int error;
- if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
+ if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
- if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (has_rmap)
- xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index 1f86e02a07ca..665d4bbb17cc 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -25,13 +25,13 @@
/* Set us up with the realtime metadata locked. */
int
-xfs_scrub_setup_rt(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_rt(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
- int error;
+ int error;
- error = xfs_scrub_setup_fs(sc, ip);
+ error = xchk_setup_fs(sc, ip);
if (error)
return error;
@@ -46,14 +46,14 @@ xfs_scrub_setup_rt(
/* Scrub a free extent record from the realtime bitmap. */
STATIC int
-xfs_scrub_rtbitmap_rec(
- struct xfs_trans *tp,
- struct xfs_rtalloc_rec *rec,
- void *priv)
+xchk_rtbitmap_rec(
+ struct xfs_trans *tp,
+ struct xfs_rtalloc_rec *rec,
+ void *priv)
{
- struct xfs_scrub_context *sc = priv;
- xfs_rtblock_t startblock;
- xfs_rtblock_t blockcount;
+ struct xfs_scrub *sc = priv;
+ xfs_rtblock_t startblock;
+ xfs_rtblock_t blockcount;
startblock = rec->ar_startext * tp->t_mountp->m_sb.sb_rextsize;
blockcount = rec->ar_extcount * tp->t_mountp->m_sb.sb_rextsize;
@@ -61,24 +61,24 @@ xfs_scrub_rtbitmap_rec(
if (startblock + blockcount <= startblock ||
!xfs_verify_rtbno(sc->mp, startblock) ||
!xfs_verify_rtbno(sc->mp, startblock + blockcount - 1))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
return 0;
}
/* Scrub the realtime bitmap. */
int
-xfs_scrub_rtbitmap(
- struct xfs_scrub_context *sc)
+xchk_rtbitmap(
+ struct xfs_scrub *sc)
{
- int error;
+ int error;
/* Invoke the fork scrubber. */
- error = xfs_scrub_metadata_inode_forks(sc);
+ error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
- error = xfs_rtalloc_query_all(sc->tp, xfs_scrub_rtbitmap_rec, sc);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ error = xfs_rtalloc_query_all(sc->tp, xchk_rtbitmap_rec, sc);
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
out:
@@ -87,13 +87,13 @@ out:
/* Scrub the realtime summary. */
int
-xfs_scrub_rtsummary(
- struct xfs_scrub_context *sc)
+xchk_rtsummary(
+ struct xfs_scrub *sc)
{
- struct xfs_inode *rsumip = sc->mp->m_rsumip;
- struct xfs_inode *old_ip = sc->ip;
- uint old_ilock_flags = sc->ilock_flags;
- int error = 0;
+ struct xfs_inode *rsumip = sc->mp->m_rsumip;
+ struct xfs_inode *old_ip = sc->ip;
+ uint old_ilock_flags = sc->ilock_flags;
+ int error = 0;
/*
* We ILOCK'd the rt bitmap ip in the setup routine, now lock the
@@ -107,12 +107,12 @@ xfs_scrub_rtsummary(
xfs_ilock(sc->ip, sc->ilock_flags);
/* Invoke the fork scrubber. */
- error = xfs_scrub_metadata_inode_forks(sc);
+ error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
goto out;
/* XXX: implement this some day */
- xfs_scrub_set_incomplete(sc);
+ xchk_set_incomplete(sc);
out:
/* Switch back to the rtbitmap inode and lock flags. */
xfs_iunlock(sc->ip, sc->ilock_flags);
@@ -124,18 +124,18 @@ out:
/* xref check that the extent is not free in the rtbitmap */
void
-xfs_scrub_xref_is_used_rt_space(
- struct xfs_scrub_context *sc,
- xfs_rtblock_t fsbno,
- xfs_extlen_t len)
+xchk_xref_is_used_rt_space(
+ struct xfs_scrub *sc,
+ xfs_rtblock_t fsbno,
+ xfs_extlen_t len)
{
- xfs_rtblock_t startext;
- xfs_rtblock_t endext;
- xfs_rtblock_t extcount;
- bool is_free;
- int error;
+ xfs_rtblock_t startext;
+ xfs_rtblock_t endext;
+ xfs_rtblock_t extcount;
+ bool is_free;
+ int error;
- if (xfs_scrub_skip_xref(sc->sm))
+ if (xchk_skip_xref(sc->sm))
return;
startext = fsbno;
@@ -147,10 +147,10 @@ xfs_scrub_xref_is_used_rt_space(
xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext, extcount,
&is_free);
- if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ if (!xchk_should_check_xref(sc, &error, NULL))
goto out_unlock;
if (is_free)
- xfs_scrub_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
+ xchk_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
out_unlock:
xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 58ae76b3a421..4bfae1e61d30 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -131,6 +131,12 @@
* optimize the structure so that the rebuild knows what to do. The
* second check evaluates the completeness of the repair; that is what
* is reported to userspace.
+ *
+ * A quick note on symbol prefixes:
+ * - "xfs_" are general XFS symbols.
+ * - "xchk_" are symbols related to metadata checking.
+ * - "xrep_" are symbols related to metadata repair.
+ * - "xfs_scrub_" are symbols that tie online fsck to the rest of XFS.
*/
/*
@@ -144,12 +150,12 @@
* supported by the running kernel.
*/
static int
-xfs_scrub_probe(
- struct xfs_scrub_context *sc)
+xchk_probe(
+ struct xfs_scrub *sc)
{
- int error = 0;
+ int error = 0;
- if (xfs_scrub_should_terminate(sc, &error))
+ if (xchk_should_terminate(sc, &error))
return error;
return 0;
@@ -159,12 +165,12 @@ xfs_scrub_probe(
/* Free all the resources and finish the transactions. */
STATIC int
-xfs_scrub_teardown(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip_in,
- int error)
+xchk_teardown(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip_in,
+ int error)
{
- xfs_scrub_ag_free(sc, &sc->sa);
+ xchk_ag_free(sc, &sc->sa);
if (sc->tp) {
if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
error = xfs_trans_commit(sc->tp);
@@ -177,7 +183,7 @@ xfs_scrub_teardown(
xfs_iunlock(sc->ip, sc->ilock_flags);
if (sc->ip != ip_in &&
!xfs_internal_inum(sc->mp, sc->ip->i_ino))
- iput(VFS_I(sc->ip));
+ xfs_irele(sc->ip);
sc->ip = NULL;
}
if (sc->has_quotaofflock)
@@ -191,165 +197,165 @@ xfs_scrub_teardown(
/* Scrubbing dispatch. */
-static const struct xfs_scrub_meta_ops meta_scrub_ops[] = {
+static const struct xchk_meta_ops meta_scrub_ops[] = {
[XFS_SCRUB_TYPE_PROBE] = { /* ioctl presence test */
.type = ST_NONE,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_probe,
- .repair = xfs_repair_probe,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_probe,
+ .repair = xrep_probe,
},
[XFS_SCRUB_TYPE_SB] = { /* superblock */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_superblock,
- .repair = xfs_repair_superblock,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_superblock,
+ .repair = xrep_superblock,
},
[XFS_SCRUB_TYPE_AGF] = { /* agf */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_agf,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_agf,
+ .repair = xrep_agf,
},
[XFS_SCRUB_TYPE_AGFL]= { /* agfl */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_agfl,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_agfl,
+ .repair = xrep_agfl,
},
[XFS_SCRUB_TYPE_AGI] = { /* agi */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_fs,
- .scrub = xfs_scrub_agi,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_fs,
+ .scrub = xchk_agi,
+ .repair = xrep_agi,
},
[XFS_SCRUB_TYPE_BNOBT] = { /* bnobt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_allocbt,
- .scrub = xfs_scrub_bnobt,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_ag_allocbt,
+ .scrub = xchk_bnobt,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_CNTBT] = { /* cntbt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_allocbt,
- .scrub = xfs_scrub_cntbt,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_ag_allocbt,
+ .scrub = xchk_cntbt,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_INOBT] = { /* inobt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_iallocbt,
- .scrub = xfs_scrub_inobt,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_ag_iallocbt,
+ .scrub = xchk_inobt,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_FINOBT] = { /* finobt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_iallocbt,
- .scrub = xfs_scrub_finobt,
+ .setup = xchk_setup_ag_iallocbt,
+ .scrub = xchk_finobt,
.has = xfs_sb_version_hasfinobt,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_RMAPBT] = { /* rmapbt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_rmapbt,
- .scrub = xfs_scrub_rmapbt,
+ .setup = xchk_setup_ag_rmapbt,
+ .scrub = xchk_rmapbt,
.has = xfs_sb_version_hasrmapbt,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */
.type = ST_PERAG,
- .setup = xfs_scrub_setup_ag_refcountbt,
- .scrub = xfs_scrub_refcountbt,
+ .setup = xchk_setup_ag_refcountbt,
+ .scrub = xchk_refcountbt,
.has = xfs_sb_version_hasreflink,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_INODE] = { /* inode record */
.type = ST_INODE,
- .setup = xfs_scrub_setup_inode,
- .scrub = xfs_scrub_inode,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_inode,
+ .scrub = xchk_inode,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_BMBTD] = { /* inode data fork */
.type = ST_INODE,
- .setup = xfs_scrub_setup_inode_bmap,
- .scrub = xfs_scrub_bmap_data,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_inode_bmap,
+ .scrub = xchk_bmap_data,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */
.type = ST_INODE,
- .setup = xfs_scrub_setup_inode_bmap,
- .scrub = xfs_scrub_bmap_attr,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_inode_bmap,
+ .scrub = xchk_bmap_attr,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */
.type = ST_INODE,
- .setup = xfs_scrub_setup_inode_bmap,
- .scrub = xfs_scrub_bmap_cow,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_inode_bmap,
+ .scrub = xchk_bmap_cow,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_DIR] = { /* directory */
.type = ST_INODE,
- .setup = xfs_scrub_setup_directory,
- .scrub = xfs_scrub_directory,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_directory,
+ .scrub = xchk_directory,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_XATTR] = { /* extended attributes */
.type = ST_INODE,
- .setup = xfs_scrub_setup_xattr,
- .scrub = xfs_scrub_xattr,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_xattr,
+ .scrub = xchk_xattr,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_SYMLINK] = { /* symbolic link */
.type = ST_INODE,
- .setup = xfs_scrub_setup_symlink,
- .scrub = xfs_scrub_symlink,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_symlink,
+ .scrub = xchk_symlink,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_PARENT] = { /* parent pointers */
.type = ST_INODE,
- .setup = xfs_scrub_setup_parent,
- .scrub = xfs_scrub_parent,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_parent,
+ .scrub = xchk_parent,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */
.type = ST_FS,
- .setup = xfs_scrub_setup_rt,
- .scrub = xfs_scrub_rtbitmap,
+ .setup = xchk_setup_rt,
+ .scrub = xchk_rtbitmap,
.has = xfs_sb_version_hasrealtime,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */
.type = ST_FS,
- .setup = xfs_scrub_setup_rt,
- .scrub = xfs_scrub_rtsummary,
+ .setup = xchk_setup_rt,
+ .scrub = xchk_rtsummary,
.has = xfs_sb_version_hasrealtime,
- .repair = xfs_repair_notsupported,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_UQUOTA] = { /* user quota */
.type = ST_FS,
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_quota,
+ .scrub = xchk_quota,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_GQUOTA] = { /* group quota */
.type = ST_FS,
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_quota,
+ .scrub = xchk_quota,
+ .repair = xrep_notsupported,
},
[XFS_SCRUB_TYPE_PQUOTA] = { /* project quota */
.type = ST_FS,
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
- .repair = xfs_repair_notsupported,
+ .setup = xchk_setup_quota,
+ .scrub = xchk_quota,
+ .repair = xrep_notsupported,
},
};
/* This isn't a stable feature, warn once per day. */
static inline void
-xfs_scrub_experimental_warning(
+xchk_experimental_warning(
struct xfs_mount *mp)
{
static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
- "xfs_scrub_warning", 86400 * HZ, 1);
+ "xchk_warning", 86400 * HZ, 1);
ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
if (__ratelimit(&scrub_warning))
@@ -358,12 +364,12 @@ xfs_scrub_experimental_warning(
}
static int
-xfs_scrub_validate_inputs(
+xchk_validate_inputs(
struct xfs_mount *mp,
struct xfs_scrub_metadata *sm)
{
int error;
- const struct xfs_scrub_meta_ops *ops;
+ const struct xchk_meta_ops *ops;
error = -EINVAL;
/* Check our inputs. */
@@ -441,7 +447,7 @@ out:
}
#ifdef CONFIG_XFS_ONLINE_REPAIR
-static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc)
+static inline void xchk_postmortem(struct xfs_scrub *sc)
{
/*
* Userspace asked us to repair something, we repaired it, rescanned
@@ -451,10 +457,10 @@ static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc)
if ((sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) &&
(sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XCORRUPT)))
- xfs_repair_failure(sc->mp);
+ xrep_failure(sc->mp);
}
#else
-static inline void xfs_scrub_postmortem(struct xfs_scrub_context *sc)
+static inline void xchk_postmortem(struct xfs_scrub *sc)
{
/*
* Userspace asked us to scrub something, it's broken, and we have no
@@ -473,16 +479,16 @@ xfs_scrub_metadata(
struct xfs_inode *ip,
struct xfs_scrub_metadata *sm)
{
- struct xfs_scrub_context sc;
+ struct xfs_scrub sc;
struct xfs_mount *mp = ip->i_mount;
bool try_harder = false;
bool already_fixed = false;
int error = 0;
BUILD_BUG_ON(sizeof(meta_scrub_ops) !=
- (sizeof(struct xfs_scrub_meta_ops) * XFS_SCRUB_TYPE_NR));
+ (sizeof(struct xchk_meta_ops) * XFS_SCRUB_TYPE_NR));
- trace_xfs_scrub_start(ip, sm, error);
+ trace_xchk_start(ip, sm, error);
/* Forbidden if we are shut down or mounted norecovery. */
error = -ESHUTDOWN;
@@ -492,11 +498,11 @@ xfs_scrub_metadata(
if (mp->m_flags & XFS_MOUNT_NORECOVERY)
goto out;
- error = xfs_scrub_validate_inputs(mp, sm);
+ error = xchk_validate_inputs(mp, sm);
if (error)
goto out;
- xfs_scrub_experimental_warning(mp);
+ xchk_experimental_warning(mp);
retry_op:
/* Set up for the operation. */
@@ -518,7 +524,7 @@ retry_op:
* Tear down everything we hold, then set up again with
* preparation for worst-case scenarios.
*/
- error = xfs_scrub_teardown(&sc, ip, 0);
+ error = xchk_teardown(&sc, ip, 0);
if (error)
goto out;
try_harder = true;
@@ -549,13 +555,13 @@ retry_op:
* If it's broken, userspace wants us to fix it, and we haven't
* already tried to fix it, then attempt a repair.
*/
- error = xfs_repair_attempt(ip, &sc, &already_fixed);
+ error = xrep_attempt(ip, &sc, &already_fixed);
if (error == -EAGAIN) {
if (sc.try_harder)
try_harder = true;
- error = xfs_scrub_teardown(&sc, ip, 0);
+ error = xchk_teardown(&sc, ip, 0);
if (error) {
- xfs_repair_failure(mp);
+ xrep_failure(mp);
goto out;
}
goto retry_op;
@@ -563,11 +569,11 @@ retry_op:
}
out_nofix:
- xfs_scrub_postmortem(&sc);
+ xchk_postmortem(&sc);
out_teardown:
- error = xfs_scrub_teardown(&sc, ip, error);
+ error = xchk_teardown(&sc, ip, error);
out:
- trace_xfs_scrub_done(ip, sm, error);
+ trace_xchk_done(ip, sm, error);
if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
error = 0;
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index b295edd5fc0e..af323b229c4b 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -6,58 +6,58 @@
#ifndef __XFS_SCRUB_SCRUB_H__
#define __XFS_SCRUB_SCRUB_H__
-struct xfs_scrub_context;
+struct xfs_scrub;
/* Type info and names for the scrub types. */
-enum xfs_scrub_type {
+enum xchk_type {
ST_NONE = 1, /* disabled */
ST_PERAG, /* per-AG metadata */
ST_FS, /* per-FS metadata */
ST_INODE, /* per-inode metadata */
};
-struct xfs_scrub_meta_ops {
+struct xchk_meta_ops {
/* Acquire whatever resources are needed for the operation. */
- int (*setup)(struct xfs_scrub_context *,
+ int (*setup)(struct xfs_scrub *,
struct xfs_inode *);
/* Examine metadata for errors. */
- int (*scrub)(struct xfs_scrub_context *);
+ int (*scrub)(struct xfs_scrub *);
/* Repair or optimize the metadata. */
- int (*repair)(struct xfs_scrub_context *);
+ int (*repair)(struct xfs_scrub *);
/* Decide if we even have this piece of metadata. */
bool (*has)(struct xfs_sb *);
/* type describing required/allowed inputs */
- enum xfs_scrub_type type;
+ enum xchk_type type;
};
/* Buffer pointers and btree cursors for an entire AG. */
-struct xfs_scrub_ag {
- xfs_agnumber_t agno;
- struct xfs_perag *pag;
+struct xchk_ag {
+ xfs_agnumber_t agno;
+ struct xfs_perag *pag;
/* AG btree roots */
- struct xfs_buf *agf_bp;
- struct xfs_buf *agfl_bp;
- struct xfs_buf *agi_bp;
+ struct xfs_buf *agf_bp;
+ struct xfs_buf *agfl_bp;
+ struct xfs_buf *agi_bp;
/* AG btrees */
- struct xfs_btree_cur *bno_cur;
- struct xfs_btree_cur *cnt_cur;
- struct xfs_btree_cur *ino_cur;
- struct xfs_btree_cur *fino_cur;
- struct xfs_btree_cur *rmap_cur;
- struct xfs_btree_cur *refc_cur;
+ struct xfs_btree_cur *bno_cur;
+ struct xfs_btree_cur *cnt_cur;
+ struct xfs_btree_cur *ino_cur;
+ struct xfs_btree_cur *fino_cur;
+ struct xfs_btree_cur *rmap_cur;
+ struct xfs_btree_cur *refc_cur;
};
-struct xfs_scrub_context {
+struct xfs_scrub {
/* General scrub state. */
struct xfs_mount *mp;
struct xfs_scrub_metadata *sm;
- const struct xfs_scrub_meta_ops *ops;
+ const struct xchk_meta_ops *ops;
struct xfs_trans *tp;
struct xfs_inode *ip;
void *buf;
@@ -66,78 +66,76 @@ struct xfs_scrub_context {
bool has_quotaofflock;
/* State tracking for single-AG operations. */
- struct xfs_scrub_ag sa;
+ struct xchk_ag sa;
};
/* Metadata scrubbers */
-int xfs_scrub_tester(struct xfs_scrub_context *sc);
-int xfs_scrub_superblock(struct xfs_scrub_context *sc);
-int xfs_scrub_agf(struct xfs_scrub_context *sc);
-int xfs_scrub_agfl(struct xfs_scrub_context *sc);
-int xfs_scrub_agi(struct xfs_scrub_context *sc);
-int xfs_scrub_bnobt(struct xfs_scrub_context *sc);
-int xfs_scrub_cntbt(struct xfs_scrub_context *sc);
-int xfs_scrub_inobt(struct xfs_scrub_context *sc);
-int xfs_scrub_finobt(struct xfs_scrub_context *sc);
-int xfs_scrub_rmapbt(struct xfs_scrub_context *sc);
-int xfs_scrub_refcountbt(struct xfs_scrub_context *sc);
-int xfs_scrub_inode(struct xfs_scrub_context *sc);
-int xfs_scrub_bmap_data(struct xfs_scrub_context *sc);
-int xfs_scrub_bmap_attr(struct xfs_scrub_context *sc);
-int xfs_scrub_bmap_cow(struct xfs_scrub_context *sc);
-int xfs_scrub_directory(struct xfs_scrub_context *sc);
-int xfs_scrub_xattr(struct xfs_scrub_context *sc);
-int xfs_scrub_symlink(struct xfs_scrub_context *sc);
-int xfs_scrub_parent(struct xfs_scrub_context *sc);
+int xchk_tester(struct xfs_scrub *sc);
+int xchk_superblock(struct xfs_scrub *sc);
+int xchk_agf(struct xfs_scrub *sc);
+int xchk_agfl(struct xfs_scrub *sc);
+int xchk_agi(struct xfs_scrub *sc);
+int xchk_bnobt(struct xfs_scrub *sc);
+int xchk_cntbt(struct xfs_scrub *sc);
+int xchk_inobt(struct xfs_scrub *sc);
+int xchk_finobt(struct xfs_scrub *sc);
+int xchk_rmapbt(struct xfs_scrub *sc);
+int xchk_refcountbt(struct xfs_scrub *sc);
+int xchk_inode(struct xfs_scrub *sc);
+int xchk_bmap_data(struct xfs_scrub *sc);
+int xchk_bmap_attr(struct xfs_scrub *sc);
+int xchk_bmap_cow(struct xfs_scrub *sc);
+int xchk_directory(struct xfs_scrub *sc);
+int xchk_xattr(struct xfs_scrub *sc);
+int xchk_symlink(struct xfs_scrub *sc);
+int xchk_parent(struct xfs_scrub *sc);
#ifdef CONFIG_XFS_RT
-int xfs_scrub_rtbitmap(struct xfs_scrub_context *sc);
-int xfs_scrub_rtsummary(struct xfs_scrub_context *sc);
+int xchk_rtbitmap(struct xfs_scrub *sc);
+int xchk_rtsummary(struct xfs_scrub *sc);
#else
static inline int
-xfs_scrub_rtbitmap(struct xfs_scrub_context *sc)
+xchk_rtbitmap(struct xfs_scrub *sc)
{
return -ENOENT;
}
static inline int
-xfs_scrub_rtsummary(struct xfs_scrub_context *sc)
+xchk_rtsummary(struct xfs_scrub *sc)
{
return -ENOENT;
}
#endif
#ifdef CONFIG_XFS_QUOTA
-int xfs_scrub_quota(struct xfs_scrub_context *sc);
+int xchk_quota(struct xfs_scrub *sc);
#else
static inline int
-xfs_scrub_quota(struct xfs_scrub_context *sc)
+xchk_quota(struct xfs_scrub *sc)
{
return -ENOENT;
}
#endif
/* cross-referencing helpers */
-void xfs_scrub_xref_is_used_space(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len);
-void xfs_scrub_xref_is_not_inode_chunk(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len);
-void xfs_scrub_xref_is_inode_chunk(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len);
-void xfs_scrub_xref_is_owned_by(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len,
- struct xfs_owner_info *oinfo);
-void xfs_scrub_xref_is_not_owned_by(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len,
- struct xfs_owner_info *oinfo);
-void xfs_scrub_xref_has_no_owner(struct xfs_scrub_context *sc,
- xfs_agblock_t agbno, xfs_extlen_t len);
-void xfs_scrub_xref_is_cow_staging(struct xfs_scrub_context *sc,
- xfs_agblock_t bno, xfs_extlen_t len);
-void xfs_scrub_xref_is_not_shared(struct xfs_scrub_context *sc,
- xfs_agblock_t bno, xfs_extlen_t len);
+void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len);
+void xchk_xref_is_not_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len);
+void xchk_xref_is_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len);
+void xchk_xref_is_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len, struct xfs_owner_info *oinfo);
+void xchk_xref_is_not_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len, struct xfs_owner_info *oinfo);
+void xchk_xref_has_no_owner(struct xfs_scrub *sc, xfs_agblock_t agbno,
+ xfs_extlen_t len);
+void xchk_xref_is_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
+ xfs_extlen_t len);
+void xchk_xref_is_not_shared(struct xfs_scrub *sc, xfs_agblock_t bno,
+ xfs_extlen_t len);
#ifdef CONFIG_XFS_RT
-void xfs_scrub_xref_is_used_rt_space(struct xfs_scrub_context *sc,
- xfs_rtblock_t rtbno, xfs_extlen_t len);
+void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno,
+ xfs_extlen_t len);
#else
-# define xfs_scrub_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
+# define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
#endif
#endif /* __XFS_SCRUB_SCRUB_H__ */
diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c
index 570a89812116..f7ebaa946999 100644
--- a/fs/xfs/scrub/symlink.c
+++ b/fs/xfs/scrub/symlink.c
@@ -25,28 +25,28 @@
/* Set us up to scrub a symbolic link. */
int
-xfs_scrub_setup_symlink(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
+xchk_setup_symlink(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip)
{
/* Allocate the buffer without the inode lock held. */
sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, KM_SLEEP);
if (!sc->buf)
return -ENOMEM;
- return xfs_scrub_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, ip, 0);
}
/* Symbolic links. */
int
-xfs_scrub_symlink(
- struct xfs_scrub_context *sc)
+xchk_symlink(
+ struct xfs_scrub *sc)
{
- struct xfs_inode *ip = sc->ip;
- struct xfs_ifork *ifp;
- loff_t len;
- int error = 0;
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_ifork *ifp;
+ loff_t len;
+ int error = 0;
if (!S_ISLNK(VFS_I(ip)->i_mode))
return -ENOENT;
@@ -55,7 +55,7 @@ xfs_scrub_symlink(
/* Plausible size? */
if (len > XFS_SYMLINK_MAXLEN || len <= 0) {
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
@@ -63,16 +63,16 @@ xfs_scrub_symlink(
if (ifp->if_flags & XFS_IFINLINE) {
if (len > XFS_IFORK_DSIZE(ip) ||
len > strnlen(ifp->if_u1.if_data, XFS_IFORK_DSIZE(ip)))
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out;
}
/* Remote symlink; must read the contents. */
error = xfs_readlink_bmap_ilocked(sc->ip, sc->buf);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
if (strnlen(sc->buf, XFS_SYMLINK_MAXLEN) < len)
- xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
out:
return error;
}
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 7c76d8b5cb05..96feaf8dcdec 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -22,9 +22,9 @@
/* Figure out which block the btree cursor was pointing to. */
static inline xfs_fsblock_t
-xfs_scrub_btree_cur_fsbno(
- struct xfs_btree_cur *cur,
- int level)
+xchk_btree_cur_fsbno(
+ struct xfs_btree_cur *cur,
+ int level)
{
if (level < cur->bc_nlevels && cur->bc_bufs[level])
return XFS_DADDR_TO_FSB(cur->bc_mp, cur->bc_bufs[level]->b_bn);
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index cec3e5ece5a1..4e20f0e48232 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -12,7 +12,7 @@
#include <linux/tracepoint.h>
#include "xfs_bit.h"
-DECLARE_EVENT_CLASS(xfs_scrub_class,
+DECLARE_EVENT_CLASS(xchk_class,
TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm,
int error),
TP_ARGS(ip, sm, error),
@@ -47,19 +47,19 @@ DECLARE_EVENT_CLASS(xfs_scrub_class,
__entry->error)
)
#define DEFINE_SCRUB_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_class, name, \
+DEFINE_EVENT(xchk_class, name, \
TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, \
int error), \
TP_ARGS(ip, sm, error))
-DEFINE_SCRUB_EVENT(xfs_scrub_start);
-DEFINE_SCRUB_EVENT(xfs_scrub_done);
-DEFINE_SCRUB_EVENT(xfs_scrub_deadlock_retry);
-DEFINE_SCRUB_EVENT(xfs_repair_attempt);
-DEFINE_SCRUB_EVENT(xfs_repair_done);
+DEFINE_SCRUB_EVENT(xchk_start);
+DEFINE_SCRUB_EVENT(xchk_done);
+DEFINE_SCRUB_EVENT(xchk_deadlock_retry);
+DEFINE_SCRUB_EVENT(xrep_attempt);
+DEFINE_SCRUB_EVENT(xrep_done);
-TRACE_EVENT(xfs_scrub_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+TRACE_EVENT(xchk_op_error,
+ TP_PROTO(struct xfs_scrub *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int error, void *ret_ip),
TP_ARGS(sc, agno, bno, error, ret_ip),
TP_STRUCT__entry(
@@ -87,8 +87,8 @@ TRACE_EVENT(xfs_scrub_op_error,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_file_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
+TRACE_EVENT(xchk_file_op_error,
+ TP_PROTO(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, int error, void *ret_ip),
TP_ARGS(sc, whichfork, offset, error, ret_ip),
TP_STRUCT__entry(
@@ -119,8 +119,8 @@ TRACE_EVENT(xfs_scrub_file_op_error,
__entry->ret_ip)
);
-DECLARE_EVENT_CLASS(xfs_scrub_block_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, void *ret_ip),
+DECLARE_EVENT_CLASS(xchk_block_error_class,
+ TP_PROTO(struct xfs_scrub *sc, xfs_daddr_t daddr, void *ret_ip),
TP_ARGS(sc, daddr, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -153,16 +153,16 @@ DECLARE_EVENT_CLASS(xfs_scrub_block_error_class,
)
#define DEFINE_SCRUB_BLOCK_ERROR_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_block_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, \
+DEFINE_EVENT(xchk_block_error_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, xfs_daddr_t daddr, \
void *ret_ip), \
TP_ARGS(sc, daddr, ret_ip))
-DEFINE_SCRUB_BLOCK_ERROR_EVENT(xfs_scrub_block_error);
-DEFINE_SCRUB_BLOCK_ERROR_EVENT(xfs_scrub_block_preen);
+DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_error);
+DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_preen);
-DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, void *ret_ip),
+DECLARE_EVENT_CLASS(xchk_ino_error_class,
+ TP_PROTO(struct xfs_scrub *sc, xfs_ino_t ino, void *ret_ip),
TP_ARGS(sc, ino, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -184,17 +184,17 @@ DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class,
)
#define DEFINE_SCRUB_INO_ERROR_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_ino_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, \
+DEFINE_EVENT(xchk_ino_error_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, xfs_ino_t ino, \
void *ret_ip), \
TP_ARGS(sc, ino, ret_ip))
-DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_error);
-DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_preen);
-DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_warning);
+DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_error);
+DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_preen);
+DEFINE_SCRUB_INO_ERROR_EVENT(xchk_ino_warning);
-DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class,
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
+DECLARE_EVENT_CLASS(xchk_fblock_error_class,
+ TP_PROTO(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, void *ret_ip),
TP_ARGS(sc, whichfork, offset, ret_ip),
TP_STRUCT__entry(
@@ -223,16 +223,16 @@ DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class,
);
#define DEFINE_SCRUB_FBLOCK_ERROR_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_fblock_error_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, int whichfork, \
+DEFINE_EVENT(xchk_fblock_error_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, int whichfork, \
xfs_fileoff_t offset, void *ret_ip), \
TP_ARGS(sc, whichfork, offset, ret_ip))
-DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xfs_scrub_fblock_error);
-DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xfs_scrub_fblock_warning);
+DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_error);
+DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xchk_fblock_warning);
-TRACE_EVENT(xfs_scrub_incomplete,
- TP_PROTO(struct xfs_scrub_context *sc, void *ret_ip),
+TRACE_EVENT(xchk_incomplete,
+ TP_PROTO(struct xfs_scrub *sc, void *ret_ip),
TP_ARGS(sc, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -250,8 +250,8 @@ TRACE_EVENT(xfs_scrub_incomplete,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_btree_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+TRACE_EVENT(xchk_btree_op_error,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, int error, void *ret_ip),
TP_ARGS(sc, cur, level, error, ret_ip),
TP_STRUCT__entry(
@@ -266,7 +266,7 @@ TRACE_EVENT(xfs_scrub_btree_op_error,
__field(void *, ret_ip)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
@@ -290,8 +290,8 @@ TRACE_EVENT(xfs_scrub_btree_op_error,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+TRACE_EVENT(xchk_ifork_btree_op_error,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, int error, void *ret_ip),
TP_ARGS(sc, cur, level, error, ret_ip),
TP_STRUCT__entry(
@@ -308,7 +308,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
__field(void *, ret_ip)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->ino = sc->ip->i_ino;
__entry->whichfork = cur->bc_private.b.whichfork;
@@ -335,8 +335,8 @@ TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_btree_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+TRACE_EVENT(xchk_btree_error,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, void *ret_ip),
TP_ARGS(sc, cur, level, ret_ip),
TP_STRUCT__entry(
@@ -350,7 +350,7 @@ TRACE_EVENT(xfs_scrub_btree_error,
__field(void *, ret_ip)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
__entry->btnum = cur->bc_btnum;
@@ -371,8 +371,8 @@ TRACE_EVENT(xfs_scrub_btree_error,
__entry->ret_ip)
);
-TRACE_EVENT(xfs_scrub_ifork_btree_error,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+TRACE_EVENT(xchk_ifork_btree_error,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level, void *ret_ip),
TP_ARGS(sc, cur, level, ret_ip),
TP_STRUCT__entry(
@@ -388,7 +388,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_error,
__field(void *, ret_ip)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->ino = sc->ip->i_ino;
__entry->whichfork = cur->bc_private.b.whichfork;
@@ -413,8 +413,8 @@ TRACE_EVENT(xfs_scrub_ifork_btree_error,
__entry->ret_ip)
);
-DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+DECLARE_EVENT_CLASS(xchk_sbtree_class,
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
int level),
TP_ARGS(sc, cur, level),
TP_STRUCT__entry(
@@ -428,7 +428,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
__field(int, ptr)
),
TP_fast_assign(
- xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
__entry->dev = sc->mp->m_super->s_dev;
__entry->type = sc->sm->sm_type;
@@ -450,16 +450,16 @@ DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
__entry->ptr)
)
#define DEFINE_SCRUB_SBTREE_EVENT(name) \
-DEFINE_EVENT(xfs_scrub_sbtree_class, name, \
- TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, \
+DEFINE_EVENT(xchk_sbtree_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, \
int level), \
TP_ARGS(sc, cur, level))
-DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_rec);
-DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_key);
+DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_rec);
+DEFINE_SCRUB_SBTREE_EVENT(xchk_btree_key);
-TRACE_EVENT(xfs_scrub_xref_error,
- TP_PROTO(struct xfs_scrub_context *sc, int error, void *ret_ip),
+TRACE_EVENT(xchk_xref_error,
+ TP_PROTO(struct xfs_scrub *sc, int error, void *ret_ip),
TP_ARGS(sc, error, ret_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -483,7 +483,7 @@ TRACE_EVENT(xfs_scrub_xref_error,
/* repair tracepoints */
#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)
-DECLARE_EVENT_CLASS(xfs_repair_extent_class,
+DECLARE_EVENT_CLASS(xrep_extent_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len),
TP_ARGS(mp, agno, agbno, len),
@@ -506,15 +506,14 @@ DECLARE_EVENT_CLASS(xfs_repair_extent_class,
__entry->len)
);
#define DEFINE_REPAIR_EXTENT_EVENT(name) \
-DEFINE_EVENT(xfs_repair_extent_class, name, \
+DEFINE_EVENT(xrep_extent_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len), \
TP_ARGS(mp, agno, agbno, len))
-DEFINE_REPAIR_EXTENT_EVENT(xfs_repair_dispose_btree_extent);
-DEFINE_REPAIR_EXTENT_EVENT(xfs_repair_collect_btree_extent);
-DEFINE_REPAIR_EXTENT_EVENT(xfs_repair_agfl_insert);
+DEFINE_REPAIR_EXTENT_EVENT(xrep_dispose_btree_extent);
+DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert);
-DECLARE_EVENT_CLASS(xfs_repair_rmap_class,
+DECLARE_EVENT_CLASS(xrep_rmap_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len,
uint64_t owner, uint64_t offset, unsigned int flags),
@@ -547,17 +546,17 @@ DECLARE_EVENT_CLASS(xfs_repair_rmap_class,
__entry->flags)
);
#define DEFINE_REPAIR_RMAP_EVENT(name) \
-DEFINE_EVENT(xfs_repair_rmap_class, name, \
+DEFINE_EVENT(xrep_rmap_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len, \
uint64_t owner, uint64_t offset, unsigned int flags), \
TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
-DEFINE_REPAIR_RMAP_EVENT(xfs_repair_alloc_extent_fn);
-DEFINE_REPAIR_RMAP_EVENT(xfs_repair_ialloc_extent_fn);
-DEFINE_REPAIR_RMAP_EVENT(xfs_repair_rmap_extent_fn);
-DEFINE_REPAIR_RMAP_EVENT(xfs_repair_bmap_extent_fn);
+DEFINE_REPAIR_RMAP_EVENT(xrep_alloc_extent_fn);
+DEFINE_REPAIR_RMAP_EVENT(xrep_ialloc_extent_fn);
+DEFINE_REPAIR_RMAP_EVENT(xrep_rmap_extent_fn);
+DEFINE_REPAIR_RMAP_EVENT(xrep_bmap_extent_fn);
-TRACE_EVENT(xfs_repair_refcount_extent_fn,
+TRACE_EVENT(xrep_refcount_extent_fn,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
struct xfs_refcount_irec *irec),
TP_ARGS(mp, agno, irec),
@@ -583,7 +582,7 @@ TRACE_EVENT(xfs_repair_refcount_extent_fn,
__entry->refcount)
)
-TRACE_EVENT(xfs_repair_init_btblock,
+TRACE_EVENT(xrep_init_btblock,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
xfs_btnum_t btnum),
TP_ARGS(mp, agno, agbno, btnum),
@@ -605,7 +604,7 @@ TRACE_EVENT(xfs_repair_init_btblock,
__entry->agbno,
__entry->btnum)
)
-TRACE_EVENT(xfs_repair_findroot_block,
+TRACE_EVENT(xrep_findroot_block,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
uint32_t magic, uint16_t level),
TP_ARGS(mp, agno, agbno, magic, level),
@@ -630,7 +629,7 @@ TRACE_EVENT(xfs_repair_findroot_block,
__entry->magic,
__entry->level)
)
-TRACE_EVENT(xfs_repair_calc_ag_resblks,
+TRACE_EVENT(xrep_calc_ag_resblks,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t icount, xfs_agblock_t aglen, xfs_agblock_t freelen,
xfs_agblock_t usedlen),
@@ -659,7 +658,7 @@ TRACE_EVENT(xfs_repair_calc_ag_resblks,
__entry->freelen,
__entry->usedlen)
)
-TRACE_EVENT(xfs_repair_calc_ag_resblks_btsize,
+TRACE_EVENT(xrep_calc_ag_resblks_btsize,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t bnobt_sz, xfs_agblock_t inobt_sz,
xfs_agblock_t rmapbt_sz, xfs_agblock_t refcbt_sz),
@@ -688,7 +687,7 @@ TRACE_EVENT(xfs_repair_calc_ag_resblks_btsize,
__entry->rmapbt_sz,
__entry->refcbt_sz)
)
-TRACE_EVENT(xfs_repair_reset_counters,
+TRACE_EVENT(xrep_reset_counters,
TP_PROTO(struct xfs_mount *mp),
TP_ARGS(mp),
TP_STRUCT__entry(
@@ -701,7 +700,7 @@ TRACE_EVENT(xfs_repair_reset_counters,
MAJOR(__entry->dev), MINOR(__entry->dev))
)
-TRACE_EVENT(xfs_repair_ialloc_insert,
+TRACE_EVENT(xrep_ialloc_insert,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agino_t startino, uint16_t holemask, uint8_t count,
uint8_t freecount, uint64_t freemask),
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 583a9f539bf1..f6ffb4f248f7 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -8,7 +8,6 @@
#ifdef CONFIG_XFS_DEBUG
#define DEBUG 1
-#define XFS_BUF_LOCK_TRACKING 1
#endif
#ifdef CONFIG_XFS_ASSERT_FATAL
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 8eb3ba3d4d00..49f5f5896a43 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
* All Rights Reserved.
*/
#include "xfs.h"
@@ -20,9 +21,6 @@
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
#include "xfs_reflink.h"
-#include <linux/gfp.h>
-#include <linux/mpage.h>
-#include <linux/pagevec.h>
#include <linux/writeback.h>
/*
@@ -30,31 +28,11 @@
*/
struct xfs_writepage_ctx {
struct xfs_bmbt_irec imap;
- bool imap_valid;
unsigned int io_type;
+ unsigned int cow_seq;
struct xfs_ioend *ioend;
- sector_t last_block;
};
-void
-xfs_count_page_state(
- struct page *page,
- int *delalloc,
- int *unwritten)
-{
- struct buffer_head *bh, *head;
-
- *delalloc = *unwritten = 0;
-
- bh = head = page_buffers(page);
- do {
- if (buffer_unwritten(bh))
- (*unwritten) = 1;
- else if (buffer_delay(bh))
- (*delalloc) = 1;
- } while ((bh = bh->b_this_page) != head);
-}
-
struct block_device *
xfs_find_bdev_for_inode(
struct inode *inode)
@@ -81,60 +59,23 @@ xfs_find_daxdev_for_inode(
return mp->m_ddev_targp->bt_daxdev;
}
-/*
- * We're now finished for good with this page. Update the page state via the
- * associated buffer_heads, paying attention to the start and end offsets that
- * we need to process on the page.
- *
- * Note that we open code the action in end_buffer_async_write here so that we
- * only have to iterate over the buffers attached to the page once. This is not
- * only more efficient, but also ensures that we only calls end_page_writeback
- * at the end of the iteration, and thus avoids the pitfall of having the page
- * and buffers potentially freed after every call to end_buffer_async_write.
- */
static void
xfs_finish_page_writeback(
struct inode *inode,
struct bio_vec *bvec,
int error)
{
- struct buffer_head *head = page_buffers(bvec->bv_page), *bh = head;
- bool busy = false;
- unsigned int off = 0;
- unsigned long flags;
-
- ASSERT(bvec->bv_offset < PAGE_SIZE);
- ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
- ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
- ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
-
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
- do {
- if (off >= bvec->bv_offset &&
- off < bvec->bv_offset + bvec->bv_len) {
- ASSERT(buffer_async_write(bh));
- ASSERT(bh->b_end_io == NULL);
-
- if (error) {
- mark_buffer_write_io_error(bh);
- clear_buffer_uptodate(bh);
- SetPageError(bvec->bv_page);
- } else {
- set_buffer_uptodate(bh);
- }
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
- } else if (buffer_async_write(bh)) {
- ASSERT(buffer_locked(bh));
- busy = true;
- }
- off += bh->b_size;
- } while ((bh = bh->b_this_page) != head);
- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
- local_irq_restore(flags);
+ struct iomap_page *iop = to_iomap_page(bvec->bv_page);
+
+ if (error) {
+ SetPageError(bvec->bv_page);
+ mapping_set_error(inode->i_mapping, -EIO);
+ }
+
+ ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
+ ASSERT(!iop || atomic_read(&iop->write_count) > 0);
- if (!busy)
+ if (!iop || atomic_dec_and_test(&iop->write_count))
end_page_writeback(bvec->bv_page);
}
@@ -170,7 +111,6 @@ xfs_destroy_ioend(
/* walk each page on bio, ending page IO on them */
bio_for_each_segment_all(bvec, bio, i)
xfs_finish_page_writeback(inode, bvec, error);
-
bio_put(bio);
}
@@ -363,39 +303,66 @@ xfs_end_bio(
STATIC int
xfs_map_blocks(
+ struct xfs_writepage_ctx *wpc,
struct inode *inode,
- loff_t offset,
- struct xfs_bmbt_irec *imap,
- int type)
+ loff_t offset)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
ssize_t count = i_blocksize(inode);
- xfs_fileoff_t offset_fsb, end_fsb;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb;
+ xfs_fileoff_t cow_fsb = NULLFILEOFF;
+ struct xfs_bmbt_irec imap;
+ int whichfork = XFS_DATA_FORK;
+ struct xfs_iext_cursor icur;
+ bool imap_valid;
int error = 0;
- int bmapi_flags = XFS_BMAPI_ENTIRE;
- int nimaps = 1;
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
+ /*
+ * We have to make sure the cached mapping is within EOF to protect
+ * against eofblocks trimming on file release leaving us with a stale
+ * mapping. Otherwise, a page for a subsequent file extending buffered
+ * write could get picked up by this writeback cycle and written to the
+ * wrong blocks.
+ *
+ * Note that what we really want here is a generic mapping invalidation
+ * mechanism to protect us from arbitrary extent modifying contexts, not
+ * just eofblocks.
+ */
+ xfs_trim_extent_eof(&wpc->imap, ip);
/*
- * Truncate can race with writeback since writeback doesn't take the
- * iolock and truncate decreases the file size before it starts
- * truncating the pages between new_size and old_size. Therefore, we
- * can end up in the situation where writeback gets a CoW fork mapping
- * but the truncate makes the mapping invalid and we end up in here
- * trying to get a new mapping. Bail out here so that we simply never
- * get a valid mapping and so we drop the write altogether. The page
- * truncation will kill the contents anyway.
+ * COW fork blocks can overlap data fork blocks even if the blocks
+ * aren't shared. COW I/O always takes precedent, so we must always
+ * check for overlap on reflink inodes unless the mapping is already a
+ * COW one, or the COW fork hasn't changed from the last time we looked
+ * at it.
+ *
+ * It's safe to check the COW fork if_seq here without the ILOCK because
+ * we've indirectly protected against concurrent updates: writeback has
+ * the page locked, which prevents concurrent invalidations by reflink
+ * and directio and prevents concurrent buffered writes to the same
+ * page. Changes to if_seq always happen under i_lock, which protects
+ * against concurrent updates and provides a memory barrier on the way
+ * out that ensures that we always see the current value.
*/
- if (type == XFS_IO_COW && offset > i_size_read(inode))
+ imap_valid = offset_fsb >= wpc->imap.br_startoff &&
+ offset_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount;
+ if (imap_valid &&
+ (!xfs_inode_has_cow_data(ip) ||
+ wpc->io_type == XFS_IO_COW ||
+ wpc->cow_seq == READ_ONCE(ip->i_cowfp->if_seq)))
return 0;
- ASSERT(type != XFS_IO_COW);
- if (type == XFS_IO_UNWRITTEN)
- bmapi_flags |= XFS_BMAPI_IGSTATE;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+ /*
+ * If we don't have a valid map, now it's time to get a new one for this
+ * offset. This will convert delayed allocations (including COW ones)
+ * into real extents. If we return without a valid map, it means we
+ * landed in a hole and we skip the block.
+ */
xfs_ilock(ip, XFS_ILOCK_SHARED);
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
(ip->i_df.if_flags & XFS_IFEXTENTS));
@@ -404,109 +371,96 @@ xfs_map_blocks(
if (offset > mp->m_super->s_maxbytes - count)
count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
- imap, &nimaps, bmapi_flags);
+
/*
- * Truncate an overwrite extent if there's a pending CoW
- * reservation before the end of this extent. This forces us
- * to come back to writepage to take care of the CoW.
+ * Check if this is offset is covered by a COW extents, and if yes use
+ * it directly instead of looking up anything in the data fork.
*/
- if (nimaps && type == XFS_IO_OVERWRITE)
- xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- if (error)
- return error;
-
- if (type == XFS_IO_DELALLOC &&
- (!nimaps || isnullstartblock(imap->br_startblock))) {
- error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
- imap);
- if (!error)
- trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
- return error;
- }
-
-#ifdef DEBUG
- if (type == XFS_IO_UNWRITTEN) {
- ASSERT(nimaps);
- ASSERT(imap->br_startblock != HOLESTARTBLOCK);
- ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+ if (xfs_inode_has_cow_data(ip) &&
+ xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
+ cow_fsb = imap.br_startoff;
+ if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
+ wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ /*
+ * Truncate can race with writeback since writeback doesn't
+ * take the iolock and truncate decreases the file size before
+ * it starts truncating the pages between new_size and old_size.
+ * Therefore, we can end up in the situation where writeback
+ * gets a CoW fork mapping but the truncate makes the mapping
+ * invalid and we end up in here trying to get a new mapping.
+ * bail out here so that we simply never get a valid mapping
+ * and so we drop the write altogether. The page truncation
+ * will kill the contents anyway.
+ */
+ if (offset > i_size_read(inode)) {
+ wpc->io_type = XFS_IO_HOLE;
+ return 0;
+ }
+ whichfork = XFS_COW_FORK;
+ wpc->io_type = XFS_IO_COW;
+ goto allocate_blocks;
}
-#endif
- if (nimaps)
- trace_xfs_map_blocks_found(ip, offset, count, type, imap);
- return 0;
-}
-
-STATIC bool
-xfs_imap_valid(
- struct inode *inode,
- struct xfs_bmbt_irec *imap,
- xfs_off_t offset)
-{
- offset >>= inode->i_blkbits;
/*
- * We have to make sure the cached mapping is within EOF to protect
- * against eofblocks trimming on file release leaving us with a stale
- * mapping. Otherwise, a page for a subsequent file extending buffered
- * write could get picked up by this writeback cycle and written to the
- * wrong blocks.
- *
- * Note that what we really want here is a generic mapping invalidation
- * mechanism to protect us from arbitrary extent modifying contexts, not
- * just eofblocks.
+ * Map valid and no COW extent in the way? We're done.
*/
- xfs_trim_extent_eof(imap, XFS_I(inode));
-
- return offset >= imap->br_startoff &&
- offset < imap->br_startoff + imap->br_blockcount;
-}
-
-STATIC void
-xfs_start_buffer_writeback(
- struct buffer_head *bh)
-{
- ASSERT(buffer_mapped(bh));
- ASSERT(buffer_locked(bh));
- ASSERT(!buffer_delay(bh));
- ASSERT(!buffer_unwritten(bh));
-
- bh->b_end_io = NULL;
- set_buffer_async_write(bh);
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
-}
-
-STATIC void
-xfs_start_page_writeback(
- struct page *page,
- int clear_dirty)
-{
- ASSERT(PageLocked(page));
- ASSERT(!PageWriteback(page));
+ if (imap_valid) {
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ return 0;
+ }
/*
- * if the page was not fully cleaned, we need to ensure that the higher
- * layers come back to it correctly. That means we need to keep the page
- * dirty, and for WB_SYNC_ALL writeback we need to ensure the
- * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
- * write this page in this writeback sweep will be made.
+ * If we don't have a valid map, now it's time to get a new one for this
+ * offset. This will convert delayed allocations (including COW ones)
+ * into real extents.
*/
- if (clear_dirty) {
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- } else
- set_page_writeback_keepwrite(page);
+ if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
+ imap.br_startoff = end_fsb; /* fake a hole past EOF */
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
- unlock_page(page);
-}
+ if (imap.br_startoff > offset_fsb) {
+ /* landed in a hole or beyond EOF */
+ imap.br_blockcount = imap.br_startoff - offset_fsb;
+ imap.br_startoff = offset_fsb;
+ imap.br_startblock = HOLESTARTBLOCK;
+ wpc->io_type = XFS_IO_HOLE;
+ } else {
+ /*
+ * Truncate to the next COW extent if there is one. This is the
+ * only opportunity to do this because we can skip COW fork
+ * lookups for the subsequent blocks in the mapping; however,
+ * the requirement to treat the COW range separately remains.
+ */
+ if (cow_fsb != NULLFILEOFF &&
+ cow_fsb < imap.br_startoff + imap.br_blockcount)
+ imap.br_blockcount = cow_fsb - imap.br_startoff;
+
+ if (isnullstartblock(imap.br_startblock)) {
+ /* got a delalloc extent */
+ wpc->io_type = XFS_IO_DELALLOC;
+ goto allocate_blocks;
+ }
-static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
-{
- return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+ if (imap.br_state == XFS_EXT_UNWRITTEN)
+ wpc->io_type = XFS_IO_UNWRITTEN;
+ else
+ wpc->io_type = XFS_IO_OVERWRITE;
+ }
+
+ wpc->imap = imap;
+ trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
+ return 0;
+allocate_blocks:
+ error = xfs_iomap_write_allocate(ip, whichfork, offset, &imap,
+ &wpc->cow_seq);
+ if (error)
+ return error;
+ ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
+ imap.br_startoff + imap.br_blockcount <= cow_fsb);
+ wpc->imap = imap;
+ trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
+ return 0;
}
/*
@@ -574,27 +528,20 @@ xfs_submit_ioend(
return 0;
}
-static void
-xfs_init_bio_from_bh(
- struct bio *bio,
- struct buffer_head *bh)
-{
- bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio_set_dev(bio, bh->b_bdev);
-}
-
static struct xfs_ioend *
xfs_alloc_ioend(
struct inode *inode,
unsigned int type,
xfs_off_t offset,
- struct buffer_head *bh)
+ struct block_device *bdev,
+ sector_t sector)
{
struct xfs_ioend *ioend;
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
- xfs_init_bio_from_bh(bio, bh);
+ bio_set_dev(bio, bdev);
+ bio->bi_iter.bi_sector = sector;
ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
INIT_LIST_HEAD(&ioend->io_list);
@@ -619,13 +566,14 @@ static void
xfs_chain_bio(
struct xfs_ioend *ioend,
struct writeback_control *wbc,
- struct buffer_head *bh)
+ struct block_device *bdev,
+ sector_t sector)
{
struct bio *new;
new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
- xfs_init_bio_from_bh(new, bh);
-
+ bio_set_dev(new, bdev);
+ new->bi_iter.bi_sector = sector;
bio_chain(ioend->io_bio, new);
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
@@ -635,122 +583,47 @@ xfs_chain_bio(
}
/*
- * Test to see if we've been building up a completion structure for
- * earlier buffers -- if so, we try to append to this ioend if we
- * can, otherwise we finish off any current ioend and start another.
- * Return the ioend we finished off so that the caller can submit it
- * once it has finished processing the dirty page.
+ * Test to see if we have an existing ioend structure that we could append to
+ * first, otherwise finish off the current ioend and start another.
*/
STATIC void
xfs_add_to_ioend(
struct inode *inode,
- struct buffer_head *bh,
xfs_off_t offset,
+ struct page *page,
+ struct iomap_page *iop,
struct xfs_writepage_ctx *wpc,
struct writeback_control *wbc,
struct list_head *iolist)
{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct block_device *bdev = xfs_find_bdev_for_inode(inode);
+ unsigned len = i_blocksize(inode);
+ unsigned poff = offset & (PAGE_SIZE - 1);
+ sector_t sector;
+
+ sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
+ ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
+
if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
- bh->b_blocknr != wpc->last_block + 1 ||
+ sector != bio_end_sector(wpc->ioend->io_bio) ||
offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
+ wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset,
+ bdev, sector);
}
- /*
- * If the buffer doesn't fit into the bio we need to allocate a new
- * one. This shouldn't happen more than once for a given buffer.
- */
- while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
- xfs_chain_bio(wpc->ioend, wbc, bh);
-
- wpc->ioend->io_size += bh->b_size;
- wpc->last_block = bh->b_blocknr;
- xfs_start_buffer_writeback(bh);
-}
-
-STATIC void
-xfs_map_buffer(
- struct inode *inode,
- struct buffer_head *bh,
- struct xfs_bmbt_irec *imap,
- xfs_off_t offset)
-{
- sector_t bn;
- struct xfs_mount *m = XFS_I(inode)->i_mount;
- xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
- xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
-
- ASSERT(imap->br_startblock != HOLESTARTBLOCK);
- ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
-
- bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
- ((offset - iomap_offset) >> inode->i_blkbits);
-
- ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
-
- bh->b_blocknr = bn;
- set_buffer_mapped(bh);
-}
-
-STATIC void
-xfs_map_at_offset(
- struct inode *inode,
- struct buffer_head *bh,
- struct xfs_bmbt_irec *imap,
- xfs_off_t offset)
-{
- ASSERT(imap->br_startblock != HOLESTARTBLOCK);
- ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
-
- xfs_map_buffer(inode, bh, imap, offset);
- set_buffer_mapped(bh);
- clear_buffer_delay(bh);
- clear_buffer_unwritten(bh);
-}
-
-/*
- * Test if a given page contains at least one buffer of a given @type.
- * If @check_all_buffers is true, then we walk all the buffers in the page to
- * try to find one of the type passed in. If it is not set, then the caller only
- * needs to check the first buffer on the page for a match.
- */
-STATIC bool
-xfs_check_page_type(
- struct page *page,
- unsigned int type,
- bool check_all_buffers)
-{
- struct buffer_head *bh;
- struct buffer_head *head;
-
- if (PageWriteback(page))
- return false;
- if (!page->mapping)
- return false;
- if (!page_has_buffers(page))
- return false;
-
- bh = head = page_buffers(page);
- do {
- if (buffer_unwritten(bh)) {
- if (type == XFS_IO_UNWRITTEN)
- return true;
- } else if (buffer_delay(bh)) {
- if (type == XFS_IO_DELALLOC)
- return true;
- } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
- if (type == XFS_IO_OVERWRITE)
- return true;
- }
-
- /* If we are only checking the first buffer, we are done now. */
- if (!check_all_buffers)
- break;
- } while ((bh = bh->b_this_page) != head);
+ if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) {
+ if (iop)
+ atomic_inc(&iop->write_count);
+ if (bio_full(wpc->ioend->io_bio))
+ xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
+ __bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ }
- return false;
+ wpc->ioend->io_size += len;
}
STATIC void
@@ -759,34 +632,20 @@ xfs_vm_invalidatepage(
unsigned int offset,
unsigned int length)
{
- trace_xfs_invalidatepage(page->mapping->host, page, offset,
- length);
-
- /*
- * If we are invalidating the entire page, clear the dirty state from it
- * so that we can check for attempts to release dirty cached pages in
- * xfs_vm_releasepage().
- */
- if (offset == 0 && length >= PAGE_SIZE)
- cancel_dirty_page(page);
- block_invalidatepage(page, offset, length);
+ trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
+ iomap_invalidatepage(page, offset, length);
}
/*
- * If the page has delalloc buffers on it, we need to punch them out before we
- * invalidate the page. If we don't, we leave a stale delalloc mapping on the
- * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
- * is done on that same region - the delalloc extent is returned when none is
- * supposed to be there.
- *
- * We prevent this by truncating away the delalloc regions on the page before
- * invalidating it. Because they are delalloc, we can do this without needing a
- * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
- * truncation without a transaction as there is no space left for block
- * reservation (typically why we see a ENOSPC in writeback).
+ * If the page has delalloc blocks on it, we need to punch them out before we
+ * invalidate the page. If we don't, we leave a stale delalloc mapping on the
+ * inode that can trip up a later direct I/O read operation on the same region.
*
- * This is not a performance critical path, so for now just do the punching a
- * buffer head at a time.
+ * We prevent this by truncating away the delalloc regions on the page. Because
+ * they are delalloc, we can do this without needing a transaction. Indeed - if
+ * we get ENOSPC errors, we have to be able to do this truncation without a
+ * transaction as there is no space left for block reservation (typically why we
+ * see a ENOSPC in writeback).
*/
STATIC void
xfs_aops_discard_page(
@@ -794,104 +653,31 @@ xfs_aops_discard_page(
{
struct inode *inode = page->mapping->host;
struct xfs_inode *ip = XFS_I(inode);
- struct buffer_head *bh, *head;
+ struct xfs_mount *mp = ip->i_mount;
loff_t offset = page_offset(page);
+ xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
+ int error;
- if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
- goto out_invalidate;
-
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ if (XFS_FORCED_SHUTDOWN(mp))
goto out_invalidate;
- xfs_alert(ip->i_mount,
+ xfs_alert(mp,
"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
page, ip->i_ino, offset);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- bh = head = page_buffers(page);
- do {
- int error;
- xfs_fileoff_t start_fsb;
-
- if (!buffer_delay(bh))
- goto next_buffer;
-
- start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
- error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
- if (error) {
- /* something screwed, just bail */
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_alert(ip->i_mount,
- "page discard unable to remove delalloc mapping.");
- }
- break;
- }
-next_buffer:
- offset += i_blocksize(inode);
-
- } while ((bh = bh->b_this_page) != head);
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
+ PAGE_SIZE / i_blocksize(inode));
+ if (error && !XFS_FORCED_SHUTDOWN(mp))
+ xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
- return;
-}
-
-static int
-xfs_map_cow(
- struct xfs_writepage_ctx *wpc,
- struct inode *inode,
- loff_t offset,
- unsigned int *new_type)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_bmbt_irec imap;
- bool is_cow = false;
- int error;
-
- /*
- * If we already have a valid COW mapping keep using it.
- */
- if (wpc->io_type == XFS_IO_COW) {
- wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
- if (wpc->imap_valid) {
- *new_type = XFS_IO_COW;
- return 0;
- }
- }
-
- /*
- * Else we need to check if there is a COW mapping at this offset.
- */
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- if (!is_cow)
- return 0;
-
- /*
- * And if the COW mapping has a delayed extent here we need to
- * allocate real space for it now.
- */
- if (isnullstartblock(imap.br_startblock)) {
- error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
- &imap);
- if (error)
- return error;
- }
-
- wpc->io_type = *new_type = XFS_IO_COW;
- wpc->imap_valid = true;
- wpc->imap = imap;
- return 0;
}
/*
* We implement an immediate ioend submission policy here to avoid needing to
* chain multiple ioends and hence nest mempool allocations which can violate
* forward progress guarantees we need to provide. The current ioend we are
- * adding buffers to is cached on the writepage context, and if the new buffer
+ * adding blocks to is cached on the writepage context, and if the new block
* does not append to the cached ioend it will create a new ioend and cache that
* instead.
*
@@ -912,138 +698,99 @@ xfs_writepage_map(
uint64_t end_offset)
{
LIST_HEAD(submit_list);
+ struct iomap_page *iop = to_iomap_page(page);
+ unsigned len = i_blocksize(inode);
struct xfs_ioend *ioend, *next;
- struct buffer_head *bh, *head;
- ssize_t len = i_blocksize(inode);
- uint64_t offset;
- int error = 0;
- int count = 0;
- int uptodate = 1;
- unsigned int new_type;
-
- bh = head = page_buffers(page);
- offset = page_offset(page);
- do {
- if (offset >= end_offset)
- break;
- if (!buffer_uptodate(bh))
- uptodate = 0;
+ uint64_t file_offset; /* file offset of page */
+ int error = 0, count = 0, i;
- /*
- * set_page_dirty dirties all buffers in a page, independent
- * of their state. The dirty state however is entirely
- * meaningless for holes (!mapped && uptodate), so skip
- * buffers covering holes here.
- */
- if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
- wpc->imap_valid = false;
- continue;
- }
+ ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
+ ASSERT(!iop || atomic_read(&iop->write_count) == 0);
- if (buffer_unwritten(bh))
- new_type = XFS_IO_UNWRITTEN;
- else if (buffer_delay(bh))
- new_type = XFS_IO_DELALLOC;
- else if (buffer_uptodate(bh))
- new_type = XFS_IO_OVERWRITE;
- else {
- if (PageUptodate(page))
- ASSERT(buffer_mapped(bh));
- /*
- * This buffer is not uptodate and will not be
- * written to disk. Ensure that we will put any
- * subsequent writeable buffers into a new
- * ioend.
- */
- wpc->imap_valid = false;
+ /*
+ * Walk through the page to find areas to write back. If we run off the
+ * end of the current map or find the current map invalid, grab a new
+ * one.
+ */
+ for (i = 0, file_offset = page_offset(page);
+ i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
+ i++, file_offset += len) {
+ if (iop && !test_bit(i, iop->uptodate))
continue;
- }
- if (xfs_is_reflink_inode(XFS_I(inode))) {
- error = xfs_map_cow(wpc, inode, offset, &new_type);
- if (error)
- goto out;
- }
-
- if (wpc->io_type != new_type) {
- wpc->io_type = new_type;
- wpc->imap_valid = false;
- }
-
- if (wpc->imap_valid)
- wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
- offset);
- if (!wpc->imap_valid) {
- error = xfs_map_blocks(inode, offset, &wpc->imap,
- wpc->io_type);
- if (error)
- goto out;
- wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
- offset);
- }
- if (wpc->imap_valid) {
- lock_buffer(bh);
- if (wpc->io_type != XFS_IO_OVERWRITE)
- xfs_map_at_offset(inode, bh, &wpc->imap, offset);
- xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
- count++;
- }
-
- } while (offset += len, ((bh = bh->b_this_page) != head));
-
- if (uptodate && bh == head)
- SetPageUptodate(page);
+ error = xfs_map_blocks(wpc, inode, file_offset);
+ if (error)
+ break;
+ if (wpc->io_type == XFS_IO_HOLE)
+ continue;
+ xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+ &submit_list);
+ count++;
+ }
ASSERT(wpc->ioend || list_empty(&submit_list));
+ ASSERT(PageLocked(page));
+ ASSERT(!PageWriteback(page));
-out:
/*
- * On error, we have to fail the ioend here because we have locked
- * buffers in the ioend. If we don't do this, we'll deadlock
- * invalidating the page as that tries to lock the buffers on the page.
- * Also, because we may have set pages under writeback, we have to make
- * sure we run IO completion to mark the error state of the IO
- * appropriately, so we can't cancel the ioend directly here. That means
- * we have to mark this page as under writeback if we included any
- * buffers from it in the ioend chain so that completion treats it
- * correctly.
+ * On error, we have to fail the ioend here because we may have set
+ * pages under writeback, we have to make sure we run IO completion to
+ * mark the error state of the IO appropriately, so we can't cancel the
+ * ioend directly here. That means we have to mark this page as under
+ * writeback if we included any blocks from it in the ioend chain so
+ * that completion treats it correctly.
*
* If we didn't include the page in the ioend, the on error we can
* simply discard and unlock it as there are no other users of the page
- * or it's buffers right now. The caller will still need to trigger
- * submission of outstanding ioends on the writepage context so they are
- * treated correctly on error.
+ * now. The caller will still need to trigger submission of outstanding
+ * ioends on the writepage context so they are treated correctly on
+ * error.
*/
- if (count) {
- xfs_start_page_writeback(page, !error);
+ if (unlikely(error)) {
+ if (!count) {
+ xfs_aops_discard_page(page);
+ ClearPageUptodate(page);
+ unlock_page(page);
+ goto done;
+ }
/*
- * Preserve the original error if there was one, otherwise catch
- * submission errors here and propagate into subsequent ioend
- * submissions.
+ * If the page was not fully cleaned, we need to ensure that the
+ * higher layers come back to it correctly. That means we need
+ * to keep the page dirty, and for WB_SYNC_ALL writeback we need
+ * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
+ * so another attempt to write this page in this writeback sweep
+ * will be made.
*/
- list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
- int error2;
-
- list_del_init(&ioend->io_list);
- error2 = xfs_submit_ioend(wbc, ioend, error);
- if (error2 && !error)
- error = error2;
- }
- } else if (error) {
- xfs_aops_discard_page(page);
- ClearPageUptodate(page);
- unlock_page(page);
+ set_page_writeback_keepwrite(page);
} else {
- /*
- * We can end up here with no error and nothing to write if we
- * race with a partial page truncate on a sub-page block sized
- * filesystem. In that case we need to mark the page clean.
- */
- xfs_start_page_writeback(page, 1);
- end_page_writeback(page);
+ clear_page_dirty_for_io(page);
+ set_page_writeback(page);
}
+ unlock_page(page);
+
+ /*
+ * Preserve the original error if there was one, otherwise catch
+ * submission errors here and propagate into subsequent ioend
+ * submissions.
+ */
+ list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
+ int error2;
+
+ list_del_init(&ioend->io_list);
+ error2 = xfs_submit_ioend(wbc, ioend, error);
+ if (error2 && !error)
+ error = error2;
+ }
+
+ /*
+ * We can end up here with no error and nothing to write only if we race
+ * with a partial page truncate on a sub-page block sized filesystem.
+ */
+ if (!count)
+ end_page_writeback(page);
+done:
mapping_set_error(page->mapping, error);
return error;
}
@@ -1054,7 +801,6 @@ out:
* For delalloc space on the page we need to allocate space and flush it.
* For unwritten space on the page we need to start the conversion to
* regular allocated space.
- * For any other dirty buffer heads on the page we should flush them.
*/
STATIC int
xfs_do_writepage(
@@ -1070,8 +816,6 @@ xfs_do_writepage(
trace_xfs_writepage(inode, page, 0, 0);
- ASSERT(page_has_buffers(page));
-
/*
* Refuse to write the page out if we are called from reclaim context.
*
@@ -1210,166 +954,13 @@ xfs_dax_writepages(
xfs_find_bdev_for_inode(mapping->host), wbc);
}
-/*
- * Called to move a page into cleanable state - and from there
- * to be released. The page should already be clean. We always
- * have buffer heads in this call.
- *
- * Returns 1 if the page is ok to release, 0 otherwise.
- */
STATIC int
xfs_vm_releasepage(
struct page *page,
gfp_t gfp_mask)
{
- int delalloc, unwritten;
-
trace_xfs_releasepage(page->mapping->host, page, 0, 0);
-
- /*
- * mm accommodates an old ext3 case where clean pages might not have had
- * the dirty bit cleared. Thus, it can send actual dirty pages to
- * ->releasepage() via shrink_active_list(). Conversely,
- * block_invalidatepage() can send pages that are still marked dirty but
- * otherwise have invalidated buffers.
- *
- * We want to release the latter to avoid unnecessary buildup of the
- * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
- * that are entirely invalidated and need to be released. Hence the
- * only time we should get dirty pages here is through
- * shrink_active_list() and so we can simply skip those now.
- *
- * warn if we've left any lingering delalloc/unwritten buffers on clean
- * or invalidated pages we are about to release.
- */
- if (PageDirty(page))
- return 0;
-
- xfs_count_page_state(page, &delalloc, &unwritten);
-
- if (WARN_ON_ONCE(delalloc))
- return 0;
- if (WARN_ON_ONCE(unwritten))
- return 0;
-
- return try_to_free_buffers(page);
-}
-
-/*
- * If this is O_DIRECT or the mpage code calling tell them how large the mapping
- * is, so that we can avoid repeated get_blocks calls.
- *
- * If the mapping spans EOF, then we have to break the mapping up as the mapping
- * for blocks beyond EOF must be marked new so that sub block regions can be
- * correctly zeroed. We can't do this for mappings within EOF unless the mapping
- * was just allocated or is unwritten, otherwise the callers would overwrite
- * existing data with zeros. Hence we have to split the mapping into a range up
- * to and including EOF, and a second mapping for beyond EOF.
- */
-static void
-xfs_map_trim_size(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- struct xfs_bmbt_irec *imap,
- xfs_off_t offset,
- ssize_t size)
-{
- xfs_off_t mapping_size;
-
- mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
- mapping_size <<= inode->i_blkbits;
-
- ASSERT(mapping_size > 0);
- if (mapping_size > size)
- mapping_size = size;
- if (offset < i_size_read(inode) &&
- (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
- /* limit mapping to block that spans EOF */
- mapping_size = roundup_64(i_size_read(inode) - offset,
- i_blocksize(inode));
- }
- if (mapping_size > LONG_MAX)
- mapping_size = LONG_MAX;
-
- bh_result->b_size = mapping_size;
-}
-
-static int
-xfs_get_blocks(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb, end_fsb;
- int error = 0;
- int lockmode = 0;
- struct xfs_bmbt_irec imap;
- int nimaps = 1;
- xfs_off_t offset;
- ssize_t size;
-
- BUG_ON(create);
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- offset = (xfs_off_t)iblock << inode->i_blkbits;
- ASSERT(bh_result->b_size >= i_blocksize(inode));
- size = bh_result->b_size;
-
- if (offset >= i_size_read(inode))
- return 0;
-
- /*
- * Direct I/O is usually done on preallocated files, so try getting
- * a block mapping without an exclusive lock first.
- */
- lockmode = xfs_ilock_data_map_shared(ip);
-
- ASSERT(offset <= mp->m_super->s_maxbytes);
- if (offset > mp->m_super->s_maxbytes - size)
- size = mp->m_super->s_maxbytes - offset;
- end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
-
- error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
- &nimaps, 0);
- if (error)
- goto out_unlock;
- if (!nimaps) {
- trace_xfs_get_blocks_notfound(ip, offset, size);
- goto out_unlock;
- }
-
- trace_xfs_get_blocks_found(ip, offset, size,
- imap.br_state == XFS_EXT_UNWRITTEN ?
- XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
- xfs_iunlock(ip, lockmode);
-
- /* trim mapping down to size requested */
- xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
-
- /*
- * For unwritten extents do not report a disk address in the buffered
- * read case (treat as if we're reading into a hole).
- */
- if (xfs_bmap_is_real_extent(&imap))
- xfs_map_buffer(inode, bh_result, &imap, offset);
-
- /*
- * If this is a realtime file, data may be on a different device.
- * to that pointed to from the buffer_head b_bdev currently.
- */
- bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
- return 0;
-
-out_unlock:
- xfs_iunlock(ip, lockmode);
- return error;
+ return iomap_releasepage(page, gfp_mask);
}
STATIC sector_t
@@ -1401,7 +992,7 @@ xfs_vm_readpage(
struct page *page)
{
trace_xfs_vm_readpage(page->mapping->host, 1);
- return mpage_readpage(page, xfs_get_blocks);
+ return iomap_readpage(page, &xfs_iomap_ops);
}
STATIC int
@@ -1412,63 +1003,7 @@ xfs_vm_readpages(
unsigned nr_pages)
{
trace_xfs_vm_readpages(mapping->host, nr_pages);
- return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
-}
-
-/*
- * This is basically a copy of __set_page_dirty_buffers() with one
- * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
- * dirty, we'll never be able to clean them because we don't write buffers
- * beyond EOF, and that means we can't invalidate pages that span EOF
- * that have been marked dirty. Further, the dirty state can leak into
- * the file interior if the file is extended, resulting in all sorts of
- * bad things happening as the state does not match the underlying data.
- *
- * XXX: this really indicates that bufferheads in XFS need to die. Warts like
- * this only exist because of bufferheads and how the generic code manages them.
- */
-STATIC int
-xfs_vm_set_page_dirty(
- struct page *page)
-{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
- loff_t end_offset;
- loff_t offset;
- int newly_dirty;
-
- if (unlikely(!mapping))
- return !TestSetPageDirty(page);
-
- end_offset = i_size_read(inode);
- offset = page_offset(page);
-
- spin_lock(&mapping->private_lock);
- if (page_has_buffers(page)) {
- struct buffer_head *head = page_buffers(page);
- struct buffer_head *bh = head;
-
- do {
- if (offset < end_offset)
- set_buffer_dirty(bh);
- bh = bh->b_this_page;
- offset += i_blocksize(inode);
- } while (bh != head);
- }
- /*
- * Lock out page->mem_cgroup migration to keep PageDirty
- * synchronized with per-memcg dirty page counters.
- */
- lock_page_memcg(page);
- newly_dirty = !TestSetPageDirty(page);
- spin_unlock(&mapping->private_lock);
-
- if (newly_dirty)
- __set_page_dirty(page, mapping, 1);
- unlock_page_memcg(page);
- if (newly_dirty)
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- return newly_dirty;
+ return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
}
static int
@@ -1486,13 +1021,13 @@ const struct address_space_operations xfs_address_space_operations = {
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
- .set_page_dirty = xfs_vm_set_page_dirty,
+ .set_page_dirty = iomap_set_page_dirty,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
.bmap = xfs_vm_bmap,
.direct_IO = noop_direct_IO,
- .migratepage = buffer_migrate_page,
- .is_partially_uptodate = block_is_partially_uptodate,
+ .migratepage = iomap_migrate_page,
+ .is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
.swap_activate = xfs_iomap_swapfile_activate,
};
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 25bc6d4a1231..9af867951a10 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -17,6 +17,7 @@ enum {
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
XFS_IO_OVERWRITE, /* covers already allocated extent */
XFS_IO_COW, /* covers copy-on-write extent */
+ XFS_IO_HOLE, /* covers region without any block allocation */
};
#define XFS_IO_TYPES \
@@ -24,7 +25,8 @@ enum {
{ XFS_IO_DELALLOC, "delalloc" }, \
{ XFS_IO_UNWRITTEN, "unwritten" }, \
{ XFS_IO_OVERWRITE, "overwrite" }, \
- { XFS_IO_COW, "CoW" }
+ { XFS_IO_COW, "CoW" }, \
+ { XFS_IO_HOLE, "hole" }
/*
* Structure for buffered I/O completions.
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 7ce10055f275..228821b2ebe0 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -26,6 +26,7 @@
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_dir2.h"
+#include "xfs_defer.h"
/*
* Look at all the extents for this logical region,
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index f9ca80154c9c..a58034049995 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -87,7 +87,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
*/
if (context->bufsize == 0 ||
(XFS_ISRESET_CURSOR(cursor) &&
- (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
+ (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
context->put_listent(context,
sfe->flags,
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 956ebd583e27..ce45f066995e 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -375,9 +375,8 @@ xfs_bud_init(
*/
int
xfs_bui_recover(
- struct xfs_mount *mp,
- struct xfs_bui_log_item *buip,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *parent_tp,
+ struct xfs_bui_log_item *buip)
{
int error = 0;
unsigned int bui_type;
@@ -393,6 +392,7 @@ xfs_bui_recover(
struct xfs_trans *tp;
struct xfs_inode *ip = NULL;
struct xfs_bmbt_irec irec;
+ struct xfs_mount *mp = parent_tp->t_mountp;
ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
@@ -441,6 +441,12 @@ xfs_bui_recover(
XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
if (error)
return error;
+ /*
+ * Recovery stashes all deferred ops during intent processing and
+ * finishes them on completion. Transfer current dfops state to this
+ * transaction and transfer the result back before we return.
+ */
+ xfs_defer_move(tp, parent_tp);
budp = xfs_trans_get_bud(tp, buip);
/* Grab the inode. */
@@ -469,9 +475,8 @@ xfs_bui_recover(
xfs_trans_ijoin(tp, ip, 0);
count = bmap->me_len;
- error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
- ip, whichfork, bmap->me_startoff,
- bmap->me_startblock, &count, state);
+ error = xfs_trans_log_finish_bmap_update(tp, budp, type, ip, whichfork,
+ bmap->me_startoff, bmap->me_startblock, &count, state);
if (error)
goto err_inode;
@@ -481,23 +486,25 @@ xfs_bui_recover(
irec.br_blockcount = count;
irec.br_startoff = bmap->me_startoff;
irec.br_state = state;
- error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
+ error = xfs_bmap_unmap_extent(tp, ip, &irec);
if (error)
goto err_inode;
}
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
+ xfs_defer_move(parent_tp, tp);
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- IRELE(ip);
+ xfs_irele(ip);
return error;
err_inode:
+ xfs_defer_move(parent_tp, tp);
xfs_trans_cancel(tp);
if (ip) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- IRELE(ip);
+ xfs_irele(ip);
}
return error;
}
diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h
index fd1a1b13df51..89e043a88bb8 100644
--- a/fs/xfs/xfs_bmap_item.h
+++ b/fs/xfs/xfs_bmap_item.h
@@ -79,7 +79,6 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *,
struct xfs_bui_log_item *);
void xfs_bui_item_free(struct xfs_bui_log_item *);
void xfs_bui_release(struct xfs_bui_log_item *);
-int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip,
- struct xfs_defer_ops *dfops);
+int xfs_bui_recover(struct xfs_trans *parent_tp, struct xfs_bui_log_item *buip);
#endif /* __XFS_BMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 83b1e8c6c18f..addbd74ecd8e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -702,16 +702,15 @@ xfs_bmap_punch_delalloc_range(
struct xfs_iext_cursor icur;
int error = 0;
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
if (error)
- return error;
+ goto out_unlock;
}
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
- return 0;
+ goto out_unlock;
while (got.br_startoff + got.br_blockcount > start_fsb) {
del = got;
@@ -735,6 +734,8 @@ xfs_bmap_punch_delalloc_range(
break;
}
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -872,13 +873,11 @@ xfs_alloc_file_space(
xfs_filblks_t allocatesize_fsb;
xfs_extlen_t extsz, temp;
xfs_fileoff_t startoffset_fsb;
- xfs_fsblock_t firstfsb;
int nimaps;
int quota_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
- struct xfs_defer_ops dfops;
uint qblocks, resblks, resrtextents;
int error;
@@ -971,20 +970,15 @@ xfs_alloc_file_space(
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &firstfsb);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
- allocatesize_fsb, alloc_type, &firstfsb,
- resblks, imapp, &nimaps, &dfops);
+ allocatesize_fsb, alloc_type, resblks,
+ imapp, &nimaps);
if (error)
goto error0;
/*
* Complete the transaction
*/
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto error0;
-
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
@@ -1003,8 +997,7 @@ xfs_alloc_file_space(
return error;
-error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
- xfs_defer_cancel(&dfops);
+error0: /* unlock inode, unreserve quota blocks, cancel trans */
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
error1: /* Just cancel transaction */
@@ -1022,8 +1015,6 @@ xfs_unmap_extent(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t firstfsb;
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
int error;
@@ -1041,24 +1032,15 @@ xfs_unmap_extent(
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &firstfsb);
- error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
- &dfops, done);
- if (error)
- goto out_bmap_cancel;
-
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
error = xfs_trans_commit(tp);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
goto out_unlock;
@@ -1279,7 +1261,7 @@ xfs_prepare_shift(
* we've flushed all the dirty data out to disk to avoid having
* CoW extents at the wrong offsets.
*/
- if (xfs_is_reflink_inode(ip)) {
+ if (xfs_inode_has_cow_data(ip)) {
error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
true);
if (error)
@@ -1310,8 +1292,6 @@ xfs_collapse_file_space(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
@@ -1344,22 +1324,16 @@ xfs_collapse_file_space(
goto out_trans_cancel;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_defer_init(&dfops, &first_block);
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
- &done, &first_block, &dfops);
+ &done);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
error = xfs_trans_commit(tp);
}
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
@@ -1386,8 +1360,6 @@ xfs_insert_file_space(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
xfs_fileoff_t next_fsb = NULLFSBLOCK;
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
@@ -1423,22 +1395,17 @@ xfs_insert_file_space(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_defer_init(&dfops, &first_block);
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
- &done, stop_fsb, &first_block, &dfops);
+ &done, stop_fsb);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
error = xfs_trans_commit(tp);
}
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
+out_trans_cancel:
xfs_trans_cancel(tp);
return error;
}
@@ -1566,14 +1533,13 @@ xfs_swap_extent_rmap(
struct xfs_inode *ip,
struct xfs_inode *tip)
{
+ struct xfs_trans *tp = *tpp;
struct xfs_bmbt_irec irec;
struct xfs_bmbt_irec uirec;
struct xfs_bmbt_irec tirec;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
xfs_filblks_t count_fsb;
- xfs_fsblock_t firstfsb;
- struct xfs_defer_ops dfops;
int error;
xfs_filblks_t ilen;
xfs_filblks_t rlen;
@@ -1609,7 +1575,7 @@ xfs_swap_extent_rmap(
/* Unmap the old blocks in the source file. */
while (tirec.br_blockcount) {
- xfs_defer_init(&dfops, &firstfsb);
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
/* Read extent from the source file */
@@ -1631,33 +1597,29 @@ xfs_swap_extent_rmap(
trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
/* Remove the mapping from the donor file. */
- error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
- tip, &uirec);
+ error = xfs_bmap_unmap_extent(tp, tip, &uirec);
if (error)
goto out_defer;
/* Remove the mapping from the source file. */
- error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
- ip, &irec);
+ error = xfs_bmap_unmap_extent(tp, ip, &irec);
if (error)
goto out_defer;
/* Map the donor file's blocks into the source file. */
- error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
- ip, &uirec);
+ error = xfs_bmap_map_extent(tp, ip, &uirec);
if (error)
goto out_defer;
/* Map the source file's blocks into the donor file. */
- error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
- tip, &irec);
+ error = xfs_bmap_map_extent(tp, tip, &irec);
if (error)
goto out_defer;
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(tpp, &dfops);
+ error = xfs_defer_finish(tpp);
+ tp = *tpp;
if (error)
- goto out_defer;
+ goto out;
tirec.br_startoff += rlen;
if (tirec.br_startblock != HOLESTARTBLOCK &&
@@ -1675,7 +1637,7 @@ xfs_swap_extent_rmap(
return 0;
out_defer:
- xfs_defer_cancel(&dfops);
+ xfs_defer_cancel(tp);
out:
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
tip->i_d.di_flags2 = tip_flags2;
@@ -1691,7 +1653,6 @@ xfs_swap_extent_forks(
int *src_log_flags,
int *target_log_flags)
{
- struct xfs_ifork tempifp, *ifp, *tifp;
xfs_filblks_t aforkblks = 0;
xfs_filblks_t taforkblks = 0;
xfs_extnum_t junk;
@@ -1733,11 +1694,7 @@ xfs_swap_extent_forks(
/*
* Swap the data forks of the inodes
*/
- ifp = &ip->i_df;
- tifp = &tip->i_df;
- tempifp = *ifp; /* struct copy */
- *ifp = *tifp; /* struct copy */
- *tifp = tempifp; /* struct copy */
+ swap(ip->i_df, tip->i_df);
/*
* Fix the on-disk inode values
@@ -1746,13 +1703,8 @@ xfs_swap_extent_forks(
ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
- tmp = (uint64_t) ip->i_d.di_nextents;
- ip->i_d.di_nextents = tip->i_d.di_nextents;
- tip->i_d.di_nextents = tmp;
-
- tmp = (uint64_t) ip->i_d.di_format;
- ip->i_d.di_format = tip->i_d.di_format;
- tip->i_d.di_format = tmp;
+ swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
+ swap(ip->i_d.di_format, tip->i_d.di_format);
/*
* The extents in the source inode could still contain speculative
@@ -1846,7 +1798,6 @@ xfs_swap_extents(
int src_log_flags, target_log_flags;
int error = 0;
int lock_flags;
- struct xfs_ifork *cowfp;
uint64_t f;
int resblks = 0;
@@ -1987,18 +1938,11 @@ xfs_swap_extents(
/* Swap the cow forks. */
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
- xfs_extnum_t extnum;
-
ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
- extnum = ip->i_cnextents;
- ip->i_cnextents = tip->i_cnextents;
- tip->i_cnextents = extnum;
-
- cowfp = ip->i_cowfp;
- ip->i_cowfp = tip->i_cowfp;
- tip->i_cowfp = cowfp;
+ swap(ip->i_cnextents, tip->i_cnextents);
+ swap(ip->i_cowfp, tip->i_cowfp);
if (ip->i_cowfp && ip->i_cowfp->if_bytes)
xfs_inode_set_cowblocks_tag(ip);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index e9c058e3761c..e839907e8492 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -34,16 +34,6 @@
static kmem_zone_t *xfs_buf_zone;
-#ifdef XFS_BUF_LOCK_TRACKING
-# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
-# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
-# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
-#else
-# define XB_SET_OWNER(bp) do { } while (0)
-# define XB_CLEAR_OWNER(bp) do { } while (0)
-# define XB_GET_OWNER(bp) do { } while (0)
-#endif
-
#define xb_to_gfp(flags) \
((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
@@ -226,7 +216,6 @@ _xfs_buf_alloc(
INIT_LIST_HEAD(&bp->b_li_list);
sema_init(&bp->b_sema, 0); /* held, no waiters */
spin_lock_init(&bp->b_lock);
- XB_SET_OWNER(bp);
bp->b_target = target;
bp->b_flags = flags;
@@ -757,11 +746,7 @@ _xfs_buf_read(
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
- if (flags & XBF_ASYNC) {
- xfs_buf_submit(bp);
- return 0;
- }
- return xfs_buf_submit_wait(bp);
+ return xfs_buf_submit(bp);
}
xfs_buf_t *
@@ -846,7 +831,7 @@ xfs_buf_read_uncached(
bp->b_flags |= XBF_READ;
bp->b_ops = ops;
- xfs_buf_submit_wait(bp);
+ xfs_buf_submit(bp);
if (bp->b_error) {
int error = bp->b_error;
xfs_buf_relse(bp);
@@ -1095,12 +1080,10 @@ xfs_buf_trylock(
int locked;
locked = down_trylock(&bp->b_sema) == 0;
- if (locked) {
- XB_SET_OWNER(bp);
+ if (locked)
trace_xfs_buf_trylock(bp, _RET_IP_);
- } else {
+ else
trace_xfs_buf_trylock_fail(bp, _RET_IP_);
- }
return locked;
}
@@ -1122,7 +1105,6 @@ xfs_buf_lock(
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
down(&bp->b_sema);
- XB_SET_OWNER(bp);
trace_xfs_buf_lock_done(bp, _RET_IP_);
}
@@ -1133,9 +1115,7 @@ xfs_buf_unlock(
{
ASSERT(xfs_buf_islocked(bp));
- XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
-
trace_xfs_buf_unlock(bp, _RET_IP_);
}
@@ -1249,7 +1229,7 @@ xfs_bwrite(
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
XBF_WRITE_FAIL | XBF_DONE);
- error = xfs_buf_submit_wait(bp);
+ error = xfs_buf_submit(bp);
if (error) {
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
@@ -1453,29 +1433,55 @@ _xfs_buf_ioapply(
}
/*
- * Asynchronous IO submission path. This transfers the buffer lock ownership and
- * the current reference to the IO. It is not safe to reference the buffer after
- * a call to this function unless the caller holds an additional reference
- * itself.
+ * Wait for I/O completion of a sync buffer and return the I/O error code.
*/
-void
-xfs_buf_submit(
+static int
+xfs_buf_iowait(
struct xfs_buf *bp)
{
+ ASSERT(!(bp->b_flags & XBF_ASYNC));
+
+ trace_xfs_buf_iowait(bp, _RET_IP_);
+ wait_for_completion(&bp->b_iowait);
+ trace_xfs_buf_iowait_done(bp, _RET_IP_);
+
+ return bp->b_error;
+}
+
+/*
+ * Buffer I/O submission path, read or write. Asynchronous submission transfers
+ * the buffer lock ownership and the current reference to the IO. It is not
+ * safe to reference the buffer after a call to this function unless the caller
+ * holds an additional reference itself.
+ */
+int
+__xfs_buf_submit(
+ struct xfs_buf *bp,
+ bool wait)
+{
+ int error = 0;
+
trace_xfs_buf_submit(bp, _RET_IP_);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
- ASSERT(bp->b_flags & XBF_ASYNC);
/* on shutdown we stale and complete the buffer immediately */
if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
xfs_buf_ioerror(bp, -EIO);
bp->b_flags &= ~XBF_DONE;
xfs_buf_stale(bp);
- xfs_buf_ioend(bp);
- return;
+ if (bp->b_flags & XBF_ASYNC)
+ xfs_buf_ioend(bp);
+ return -EIO;
}
+ /*
+ * Grab a reference so the buffer does not go away underneath us. For
+ * async buffers, I/O completion drops the callers reference, which
+ * could occur before submission returns.
+ */
+ xfs_buf_hold(bp);
+
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
@@ -1483,22 +1489,13 @@ xfs_buf_submit(
bp->b_io_error = 0;
/*
- * The caller's reference is released during I/O completion.
- * This occurs some time after the last b_io_remaining reference is
- * released, so after we drop our Io reference we have to have some
- * other reference to ensure the buffer doesn't go away from underneath
- * us. Take a direct reference to ensure we have safe access to the
- * buffer until we are finished with it.
- */
- xfs_buf_hold(bp);
-
- /*
* Set the count to 1 initially, this will stop an I/O completion
* callout which happens before we have started all the I/O from calling
* xfs_buf_ioend too early.
*/
atomic_set(&bp->b_io_remaining, 1);
- xfs_buf_ioacct_inc(bp);
+ if (bp->b_flags & XBF_ASYNC)
+ xfs_buf_ioacct_inc(bp);
_xfs_buf_ioapply(bp);
/*
@@ -1507,74 +1504,19 @@ xfs_buf_submit(
* that we don't return to the caller with completion still pending.
*/
if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
- if (bp->b_error)
+ if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
xfs_buf_ioend(bp);
else
xfs_buf_ioend_async(bp);
}
- xfs_buf_rele(bp);
- /* Note: it is not safe to reference bp now we've dropped our ref */
-}
-
-/*
- * Synchronous buffer IO submission path, read or write.
- */
-int
-xfs_buf_submit_wait(
- struct xfs_buf *bp)
-{
- int error;
-
- trace_xfs_buf_submit_wait(bp, _RET_IP_);
-
- ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
-
- if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
- xfs_buf_ioerror(bp, -EIO);
- xfs_buf_stale(bp);
- bp->b_flags &= ~XBF_DONE;
- return -EIO;
- }
-
- if (bp->b_flags & XBF_WRITE)
- xfs_buf_wait_unpin(bp);
-
- /* clear the internal error state to avoid spurious errors */
- bp->b_io_error = 0;
+ if (wait)
+ error = xfs_buf_iowait(bp);
/*
- * For synchronous IO, the IO does not inherit the submitters reference
- * count, nor the buffer lock. Hence we cannot release the reference we
- * are about to take until we've waited for all IO completion to occur,
- * including any xfs_buf_ioend_async() work that may be pending.
- */
- xfs_buf_hold(bp);
-
- /*
- * Set the count to 1 initially, this will stop an I/O completion
- * callout which happens before we have started all the I/O from calling
- * xfs_buf_ioend too early.
- */
- atomic_set(&bp->b_io_remaining, 1);
- _xfs_buf_ioapply(bp);
-
- /*
- * make sure we run completion synchronously if it raced with us and is
- * already complete.
- */
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
- xfs_buf_ioend(bp);
-
- /* wait for completion before gathering the error from the buffer */
- trace_xfs_buf_iowait(bp, _RET_IP_);
- wait_for_completion(&bp->b_iowait);
- trace_xfs_buf_iowait_done(bp, _RET_IP_);
- error = bp->b_error;
-
- /*
- * all done now, we can release the hold that keeps the buffer
- * referenced for the entire IO.
+ * Release the hold that keeps the buffer referenced for the entire
+ * I/O. Note that if the buffer is async, it is not safe to reference
+ * after this release.
*/
xfs_buf_rele(bp);
return error;
@@ -1972,16 +1914,11 @@ xfs_buf_cmp(
}
/*
- * submit buffers for write.
- *
- * When we have a large buffer list, we do not want to hold all the buffers
- * locked while we block on the request queue waiting for IO dispatch. To avoid
- * this problem, we lock and submit buffers in groups of 50, thereby minimising
- * the lock hold times for lists which may contain thousands of objects.
- *
- * To do this, we sort the buffer list before we walk the list to lock and
- * submit buffers, and we plug and unplug around each group of buffers we
- * submit.
+ * Submit buffers for write. If wait_list is specified, the buffers are
+ * submitted using sync I/O and placed on the wait list such that the caller can
+ * iowait each buffer. Otherwise async I/O is used and the buffers are released
+ * at I/O completion time. In either case, buffers remain locked until I/O
+ * completes and the buffer is released from the queue.
*/
static int
xfs_buf_delwri_submit_buffers(
@@ -2023,21 +1960,21 @@ xfs_buf_delwri_submit_buffers(
trace_xfs_buf_delwri_split(bp, _RET_IP_);
/*
- * We do all IO submission async. This means if we need
- * to wait for IO completion we need to take an extra
- * reference so the buffer is still valid on the other
- * side. We need to move the buffer onto the io_list
- * at this point so the caller can still access it.
+ * If we have a wait list, each buffer (and associated delwri
+ * queue reference) transfers to it and is submitted
+ * synchronously. Otherwise, drop the buffer from the delwri
+ * queue and submit async.
*/
bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
- bp->b_flags |= XBF_WRITE | XBF_ASYNC;
+ bp->b_flags |= XBF_WRITE;
if (wait_list) {
- xfs_buf_hold(bp);
+ bp->b_flags &= ~XBF_ASYNC;
list_move_tail(&bp->b_list, wait_list);
- } else
+ } else {
+ bp->b_flags |= XBF_ASYNC;
list_del_init(&bp->b_list);
-
- xfs_buf_submit(bp);
+ }
+ __xfs_buf_submit(bp, false);
}
blk_finish_plug(&plug);
@@ -2084,9 +2021,11 @@ xfs_buf_delwri_submit(
list_del_init(&bp->b_list);
- /* locking the buffer will wait for async IO completion. */
- xfs_buf_lock(bp);
- error2 = bp->b_error;
+ /*
+ * Wait on the locked buffer, check for errors and unlock and
+ * release the delwri queue reference.
+ */
+ error2 = xfs_buf_iowait(bp);
xfs_buf_relse(bp);
if (!error)
error = error2;
@@ -2132,23 +2071,18 @@ xfs_buf_delwri_pushbuf(
/*
* Delwri submission clears the DELWRI_Q buffer flag and returns with
- * the buffer on the wait list with an associated reference. Rather than
+ * the buffer on the wait list with the original reference. Rather than
* bounce the buffer from a local wait list back to the original list
* after I/O completion, reuse the original list as the wait list.
*/
xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
/*
- * The buffer is now under I/O and wait listed as during typical delwri
- * submission. Lock the buffer to wait for I/O completion. Rather than
- * remove the buffer from the wait list and release the reference, we
- * want to return with the buffer queued to the original list. The
- * buffer already sits on the original list with a wait list reference,
- * however. If we let the queue inherit that wait list reference, all we
- * need to do is reset the DELWRI_Q flag.
+ * The buffer is now locked, under I/O and wait listed on the original
+ * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
+ * return with the buffer unlocked and on the original queue.
*/
- xfs_buf_lock(bp);
- error = bp->b_error;
+ error = xfs_buf_iowait(bp);
bp->b_flags |= _XBF_DELWRI_Q;
xfs_buf_unlock(bp);
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index d24dbd4dac39..4e3171acd0f8 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -12,7 +12,6 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/dax.h>
-#include <linux/buffer_head.h>
#include <linux/uio.h>
#include <linux/list_lru.h>
@@ -199,10 +198,6 @@ typedef struct xfs_buf {
int b_last_error;
const struct xfs_buf_ops *b_ops;
-
-#ifdef XFS_BUF_LOCK_TRACKING
- int b_last_holder;
-#endif
} xfs_buf_t;
/* Finding and Reading Buffers */
@@ -298,8 +293,14 @@ extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
xfs_failaddr_t failaddr);
#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
-extern void xfs_buf_submit(struct xfs_buf *bp);
-extern int xfs_buf_submit_wait(struct xfs_buf *bp);
+
+extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
+static inline int xfs_buf_submit(struct xfs_buf *bp)
+{
+ bool wait = bp->b_flags & XBF_ASYNC ? false : true;
+ return __xfs_buf_submit(bp, wait);
+}
+
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
#define xfs_buf_zero(bp, off, len) \
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 678a5fcd7576..93f07edafd81 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -128,7 +128,7 @@ next_extent:
}
out_del_cursor:
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
xfs_buf_relse(agbp);
out_put_perag:
xfs_perag_put(pag);
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 0973a0423bed..87e6dd5326d5 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -286,17 +286,15 @@ xfs_dquot_disk_alloc(
struct xfs_buf **bpp)
{
struct xfs_bmbt_irec map;
- struct xfs_defer_ops dfops;
- struct xfs_mount *mp = (*tpp)->t_mountp;
+ struct xfs_trans *tp = *tpp;
+ struct xfs_mount *mp = tp->t_mountp;
struct xfs_buf *bp;
struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags);
- xfs_fsblock_t firstblock;
int nmaps = 1;
int error;
trace_xfs_dqalloc(dqp);
- xfs_defer_init(&dfops, &firstblock);
xfs_ilock(quotip, XFS_ILOCK_EXCL);
if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
/*
@@ -308,13 +306,12 @@ xfs_dquot_disk_alloc(
}
/* Create the block mapping. */
- xfs_trans_ijoin(*tpp, quotip, XFS_ILOCK_EXCL);
- error = xfs_bmapi_write(*tpp, quotip, dqp->q_fileoffset,
+ xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
+ error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
- &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
- &map, &nmaps, &dfops);
+ XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps);
if (error)
- goto error0;
+ return error;
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
ASSERT(nmaps == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
@@ -326,19 +323,17 @@ xfs_dquot_disk_alloc(
dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
/* now we can just get the buffer (there's nothing to read yet) */
- bp = xfs_trans_get_buf(*tpp, mp->m_ddev_targp, dqp->q_blkno,
+ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, 0);
- if (!bp) {
- error = -ENOMEM;
- goto error1;
- }
+ if (!bp)
+ return -ENOMEM;
bp->b_ops = &xfs_dquot_buf_ops;
/*
* Make a chunk of dquots out of this buffer and log
* the entire thing.
*/
- xfs_qm_init_dquot_blk(*tpp, mp, be32_to_cpu(dqp->q_core.d_id),
+ xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
xfs_buf_set_ref(bp, XFS_DQUOT_REF);
@@ -352,10 +347,8 @@ xfs_dquot_disk_alloc(
* the buffer locked across the _defer_finish call. We can now do
* this correctly with xfs_defer_bjoin.
*
- * Above, we allocated a disk block for the dquot information and
- * used get_buf to initialize the dquot. If the _defer_bjoin fails,
- * the buffer is still locked to *tpp, so we must _bhold_release and
- * then _trans_brelse the buffer. If the _defer_finish fails, the old
+ * Above, we allocated a disk block for the dquot information and used
+ * get_buf to initialize the dquot. If the _defer_finish fails, the old
* transaction is gone but the new buffer is not joined or held to any
* transaction, so we must _buf_relse it.
*
@@ -364,25 +357,15 @@ xfs_dquot_disk_alloc(
* is responsible for unlocking any buffer passed back, either
* manually or by committing the transaction.
*/
- xfs_trans_bhold(*tpp, bp);
- error = xfs_defer_bjoin(&dfops, bp);
- if (error) {
- xfs_trans_bhold_release(*tpp, bp);
- xfs_trans_brelse(*tpp, bp);
- goto error1;
- }
- error = xfs_defer_finish(tpp, &dfops);
+ xfs_trans_bhold(tp, bp);
+ error = xfs_defer_finish(tpp);
+ tp = *tpp;
if (error) {
xfs_buf_relse(bp);
- goto error1;
+ return error;
}
*bpp = bp;
return 0;
-
-error1:
- xfs_defer_cancel(&dfops);
-error0:
- return error;
}
/*
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 0470114a8d80..9866f542e77b 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -50,6 +50,7 @@ static unsigned int xfs_errortag_random_default[] = {
XFS_RANDOM_LOG_ITEM_PIN,
XFS_RANDOM_BUF_LRU_REF,
XFS_RANDOM_FORCE_SCRUB_REPAIR,
+ XFS_RANDOM_FORCE_SUMMARY_RECALC,
};
struct xfs_errortag_attr {
@@ -157,6 +158,7 @@ XFS_ERRORTAG_ATTR_RW(log_bad_crc, XFS_ERRTAG_LOG_BAD_CRC);
XFS_ERRORTAG_ATTR_RW(log_item_pin, XFS_ERRTAG_LOG_ITEM_PIN);
XFS_ERRORTAG_ATTR_RW(buf_lru_ref, XFS_ERRTAG_BUF_LRU_REF);
XFS_ERRORTAG_ATTR_RW(force_repair, XFS_ERRTAG_FORCE_SCRUB_REPAIR);
+XFS_ERRORTAG_ATTR_RW(bad_summary, XFS_ERRTAG_FORCE_SUMMARY_RECALC);
static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(noerror),
@@ -192,6 +194,7 @@ static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(log_item_pin),
XFS_ERRORTAG_ATTR_LIST(buf_lru_ref),
XFS_ERRORTAG_ATTR_LIST(force_repair),
+ XFS_ERRORTAG_ATTR_LIST(bad_summary),
NULL,
};
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 3cf4682e2510..f2284ceb129f 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -150,7 +150,7 @@ xfs_nfs_get_inode(
}
if (VFS_I(ip)->i_generation != generation) {
- IRELE(ip);
+ xfs_irele(ip);
return ERR_PTR(-ESTALE);
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a3e7767a5715..61a5ad2600e8 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -721,12 +721,10 @@ xfs_file_write_iter(
static void
xfs_wait_dax_page(
- struct inode *inode,
- bool *did_unlock)
+ struct inode *inode)
{
struct xfs_inode *ip = XFS_I(inode);
- *did_unlock = true;
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
schedule();
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
@@ -735,8 +733,7 @@ xfs_wait_dax_page(
static int
xfs_break_dax_layouts(
struct inode *inode,
- uint iolock,
- bool *did_unlock)
+ bool *retry)
{
struct page *page;
@@ -746,9 +743,10 @@ xfs_break_dax_layouts(
if (!page)
return 0;
+ *retry = true;
return ___wait_var_event(&page->_refcount,
atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, xfs_wait_dax_page(inode, did_unlock));
+ 0, 0, xfs_wait_dax_page(inode));
}
int
@@ -766,7 +764,7 @@ xfs_break_layouts(
retry = false;
switch (reason) {
case BREAK_UNMAP:
- error = xfs_break_dax_layouts(inode, *iolock, &retry);
+ error = xfs_break_dax_layouts(inode, &retry);
if (error || retry)
break;
/* fall through */
@@ -933,31 +931,16 @@ xfs_file_clone_range(
len, false);
}
-STATIC ssize_t
+STATIC int
xfs_file_dedupe_range(
- struct file *src_file,
- u64 loff,
- u64 len,
- struct file *dst_file,
- u64 dst_loff)
+ struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+ u64 len)
{
- struct inode *srci = file_inode(src_file);
- u64 max_dedupe;
- int error;
-
- /*
- * Since we have to read all these pages in to compare them, cut
- * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
- * That means we won't do more than MAX_RW_COUNT IO per request.
- */
- max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
- if (len > max_dedupe)
- len = max_dedupe;
- error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
+ return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, true);
- if (error)
- return error;
- return len;
}
STATIC int
@@ -1171,7 +1154,7 @@ xfs_file_mmap(
file_accessed(filp);
vma->vm_ops = &xfs_file_vm_ops;
if (IS_DAX(file_inode(filp)))
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ vma->vm_flags |= VM_HUGEPAGE;
return 0;
}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 2d2c5ab9143c..182501373af2 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -19,6 +19,8 @@
#include "xfs_filestream.h"
#include "xfs_trace.h"
#include "xfs_ag_resv.h"
+#include "xfs_trans.h"
+#include "xfs_shared.h"
struct xfs_fstrm_item {
struct xfs_mru_cache_elem mru;
@@ -339,7 +341,7 @@ xfs_filestream_lookup_ag(
if (xfs_filestream_pick_ag(pip, startag, &ag, 0, 0))
ag = NULLAGNUMBER;
out:
- IRELE(pip);
+ xfs_irele(pip);
return ag;
}
@@ -377,7 +379,7 @@ xfs_filestream_new_ag(
if (xfs_alloc_is_userdata(ap->datatype))
flags |= XFS_PICK_USERDATA;
- if (ap->dfops->dop_low)
+ if (ap->tp->t_flags & XFS_TRANS_LOWMODE)
flags |= XFS_PICK_LOWSPACE;
err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen);
@@ -388,7 +390,7 @@ xfs_filestream_new_ag(
if (mru)
xfs_fstrm_free_func(mp, mru);
- IRELE(pip);
+ xfs_irele(pip);
exit:
if (*agp == NULLAGNUMBER)
*agp = 0;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index c7157bc48bd1..3d76a9e35870 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -214,12 +214,12 @@ xfs_getfsmap_is_shared(
/* Are there any shared blocks here? */
flen = 0;
cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
- info->agno, NULL);
+ info->agno);
error = xfs_refcount_find_shared(cur, rec->rm_startblock,
rec->rm_blockcount, &fbno, &flen, false);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
if (error)
return error;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 3f2bd6032cf8..7c00b8bedfe3 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -536,7 +536,7 @@ xfs_fs_reserve_ag_blocks(
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
pag = xfs_perag_get(mp, agno);
- err2 = xfs_ag_resv_init(pag);
+ err2 = xfs_ag_resv_init(pag, NULL);
xfs_perag_put(pag);
if (err2 && !error)
error = err2;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 47f417d20a30..245483cc282b 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -66,7 +66,7 @@ xfs_inode_alloc(
ip->i_cowfp = NULL;
ip->i_cnextents = 0;
ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
- memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
+ memset(&ip->i_df, 0, sizeof(ip->i_df));
ip->i_flags = 0;
ip->i_delayed_blks = 0;
memset(&ip->i_d, 0, sizeof(ip->i_d));
@@ -716,7 +716,7 @@ xfs_icache_inode_is_allocated(
return error;
*inuse = !!(VFS_I(ip)->i_mode);
- IRELE(ip);
+ xfs_irele(ip);
return 0;
}
@@ -856,7 +856,7 @@ restart:
xfs_iflags_test(batch[i], XFS_INEW))
xfs_inew_wait(batch[i]);
error = execute(batch[i], flags, args);
- IRELE(batch[i]);
+ xfs_irele(batch[i]);
if (error == -EAGAIN) {
skipped++;
continue;
@@ -1697,14 +1697,13 @@ xfs_inode_clear_eofblocks_tag(
*/
static bool
xfs_prep_free_cowblocks(
- struct xfs_inode *ip,
- struct xfs_ifork *ifp)
+ struct xfs_inode *ip)
{
/*
* Just clear the tag if we have an empty cow fork or none at all. It's
* possible the inode was fully unshared since it was originally tagged.
*/
- if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
+ if (!xfs_inode_has_cow_data(ip)) {
trace_xfs_inode_free_cowblocks_invalid(ip);
xfs_inode_clear_cowblocks_tag(ip);
return false;
@@ -1742,11 +1741,10 @@ xfs_inode_free_cowblocks(
void *args)
{
struct xfs_eofblocks *eofb = args;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
int match;
int ret = 0;
- if (!xfs_prep_free_cowblocks(ip, ifp))
+ if (!xfs_prep_free_cowblocks(ip))
return 0;
if (eofb) {
@@ -1771,7 +1769,7 @@ xfs_inode_free_cowblocks(
* Check again, nobody else should be able to dirty blocks or change
* the reflink iflag now that we have the first two locks held.
*/
- if (xfs_prep_free_cowblocks(ip, ifp))
+ if (xfs_prep_free_cowblocks(ip))
ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 5df4de666cc1..d957a46dc1cb 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -927,7 +927,7 @@ xfs_ialloc(
case S_IFLNK:
ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
ip->i_df.if_flags = XFS_IFEXTENTS;
- ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
+ ip->i_df.if_bytes = 0;
ip->i_df.if_u1.if_root = NULL;
break;
default:
@@ -1142,8 +1142,6 @@ xfs_create(
struct xfs_inode *ip = NULL;
struct xfs_trans *tp = NULL;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
prid_t prid;
struct xfs_dquot *udqp = NULL;
@@ -1195,9 +1193,6 @@ xfs_create(
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
unlock_dp_on_error = true;
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
-
/*
* Reserve disk quota and the inode.
*/
@@ -1226,7 +1221,7 @@ xfs_create(
unlock_dp_on_error = false;
error = xfs_dir_createname(tp, dp, name, ip->i_ino,
- &first_block, &dfops, resblks ?
+ resblks ?
resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != -ENOSPC);
@@ -1238,11 +1233,11 @@ xfs_create(
if (is_dir) {
error = xfs_dir_init(tp, ip, dp);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
error = xfs_bumplink(tp, dp);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
/*
@@ -1260,10 +1255,6 @@ xfs_create(
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
goto out_release_inode;
@@ -1275,8 +1266,6 @@ xfs_create(
*ipp = ip;
return 0;
- out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_inode:
@@ -1287,7 +1276,7 @@ xfs_create(
*/
if (ip) {
xfs_finish_inode_setup(ip);
- IRELE(ip);
+ xfs_irele(ip);
}
xfs_qm_dqrele(udqp);
@@ -1382,7 +1371,7 @@ xfs_create_tmpfile(
*/
if (ip) {
xfs_finish_inode_setup(ip);
- IRELE(ip);
+ xfs_irele(ip);
}
xfs_qm_dqrele(udqp);
@@ -1401,8 +1390,6 @@ xfs_link(
xfs_mount_t *mp = tdp->i_mount;
xfs_trans_t *tp;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
int resblks;
trace_xfs_link(tdp, target_name);
@@ -1451,9 +1438,6 @@ xfs_link(
goto error_return;
}
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
-
/*
* Handle initial link state of O_TMPFILE inode
*/
@@ -1464,7 +1448,7 @@ xfs_link(
}
error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
- &first_block, &dfops, resblks);
+ resblks);
if (error)
goto error_return;
xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -1482,12 +1466,6 @@ xfs_link(
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_defer_finish(&tp, &dfops);
- if (error) {
- xfs_defer_cancel(&dfops);
- goto error_return;
- }
-
return xfs_trans_commit(tp);
error_return:
@@ -1545,8 +1523,6 @@ xfs_itruncate_extents_flags(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp = *tpp;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
xfs_fileoff_t first_unmap_block;
xfs_fileoff_t last_block;
xfs_filblks_t unmap_len;
@@ -1583,10 +1559,9 @@ xfs_itruncate_extents_flags(
ASSERT(first_unmap_block < last_block);
unmap_len = last_block - first_unmap_block + 1;
while (!done) {
- xfs_defer_init(&dfops, &first_block);
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
- XFS_ITRUNC_MAX_EXTENTS, &first_block,
- &dfops, &done);
+ XFS_ITRUNC_MAX_EXTENTS, &done);
if (error)
goto out_bmap_cancel;
@@ -1594,10 +1569,9 @@ xfs_itruncate_extents_flags(
* Duplicate the transaction that has the permanent
* reservation and commit the old transaction.
*/
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_defer_finish(&tp);
if (error)
- goto out_bmap_cancel;
+ goto out;
error = xfs_trans_roll_inode(&tp, ip);
if (error)
@@ -1631,7 +1605,7 @@ out_bmap_cancel:
* the transaction can be properly aborted. We just need to make sure
* we're not holding any resources that we were not when we came in.
*/
- xfs_defer_cancel(&dfops);
+ xfs_defer_cancel(tp);
goto out;
}
@@ -1733,7 +1707,6 @@ xfs_inactive_truncate(
ASSERT(XFS_FORCED_SHUTDOWN(mp));
return error;
}
-
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
@@ -1774,8 +1747,6 @@ STATIC int
xfs_inactive_ifree(
struct xfs_inode *ip)
{
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
@@ -1812,9 +1783,7 @@ xfs_inactive_ifree(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
- error = xfs_ifree(tp, ip, &dfops);
+ error = xfs_ifree(tp, ip);
if (error) {
/*
* If we fail to free the inode, shut down. The cancel
@@ -1840,12 +1809,6 @@ xfs_inactive_ifree(
* Just ignore errors at this point. There is nothing we can do except
* to try to keep going. Make sure it's not a silent error.
*/
- error = xfs_defer_finish(&tp, &dfops);
- if (error) {
- xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
- __func__, error);
- xfs_defer_cancel(&dfops);
- }
error = xfs_trans_commit(tp);
if (error)
xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
@@ -1868,7 +1831,6 @@ xfs_inactive(
xfs_inode_t *ip)
{
struct xfs_mount *mp;
- struct xfs_ifork *cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
int error;
int truncate = 0;
@@ -1877,7 +1839,6 @@ xfs_inactive(
* to clean up here.
*/
if (VFS_I(ip)->i_mode == 0) {
- ASSERT(ip->i_df.if_real_bytes == 0);
ASSERT(ip->i_df.if_broot_bytes == 0);
return;
}
@@ -1890,7 +1851,7 @@ xfs_inactive(
return;
/* Try to clean out the cow blocks if there are any. */
- if (xfs_is_reflink_inode(ip) && cow_ifp->if_bytes > 0)
+ if (xfs_inode_has_cow_data(ip))
xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
if (VFS_I(ip)->i_nlink != 0) {
@@ -2445,9 +2406,8 @@ xfs_ifree_local_data(
*/
int
xfs_ifree(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
{
int error;
struct xfs_icluster xic = { 0 };
@@ -2466,7 +2426,7 @@ xfs_ifree(
if (error)
return error;
- error = xfs_difree(tp, ip->i_ino, dfops, &xic);
+ error = xfs_difree(tp, ip->i_ino, &xic);
if (error)
return error;
@@ -2577,8 +2537,6 @@ xfs_remove(
xfs_trans_t *tp = NULL;
int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
int error = 0;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
uint resblks;
trace_xfs_remove(dp, name);
@@ -2658,13 +2616,10 @@ xfs_remove(
if (error)
goto out_trans_cancel;
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
- error = xfs_dir_removename(tp, dp, name, ip->i_ino,
- &first_block, &dfops, resblks);
+ error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
if (error) {
ASSERT(error != -ENOENT);
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
/*
@@ -2675,10 +2630,6 @@ xfs_remove(
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
goto std_return;
@@ -2688,8 +2639,6 @@ xfs_remove(
return 0;
- out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
std_return:
@@ -2749,11 +2698,8 @@ xfs_sort_for_rename(
static int
xfs_finish_rename(
- struct xfs_trans *tp,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *tp)
{
- int error;
-
/*
* If this is a synchronous mount, make sure that the rename transaction
* goes to disk before returning to the user.
@@ -2761,13 +2707,6 @@ xfs_finish_rename(
if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_defer_finish(&tp, dfops);
- if (error) {
- xfs_defer_cancel(dfops);
- xfs_trans_cancel(tp);
- return error;
- }
-
return xfs_trans_commit(tp);
}
@@ -2785,8 +2724,6 @@ xfs_cross_rename(
struct xfs_inode *dp2,
struct xfs_name *name2,
struct xfs_inode *ip2,
- struct xfs_defer_ops *dfops,
- xfs_fsblock_t *first_block,
int spaceres)
{
int error = 0;
@@ -2795,16 +2732,12 @@ xfs_cross_rename(
int dp2_flags = 0;
/* Swap inode number for dirent in first parent */
- error = xfs_dir_replace(tp, dp1, name1,
- ip2->i_ino,
- first_block, dfops, spaceres);
+ error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
if (error)
goto out_trans_abort;
/* Swap inode number for dirent in second parent */
- error = xfs_dir_replace(tp, dp2, name2,
- ip1->i_ino,
- first_block, dfops, spaceres);
+ error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
if (error)
goto out_trans_abort;
@@ -2818,8 +2751,7 @@ xfs_cross_rename(
if (S_ISDIR(VFS_I(ip2)->i_mode)) {
error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
- dp1->i_ino, first_block,
- dfops, spaceres);
+ dp1->i_ino, spaceres);
if (error)
goto out_trans_abort;
@@ -2845,8 +2777,7 @@ xfs_cross_rename(
if (S_ISDIR(VFS_I(ip1)->i_mode)) {
error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
- dp2->i_ino, first_block,
- dfops, spaceres);
+ dp2->i_ino, spaceres);
if (error)
goto out_trans_abort;
@@ -2885,10 +2816,9 @@ xfs_cross_rename(
}
xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
- return xfs_finish_rename(tp, dfops);
+ return xfs_finish_rename(tp);
out_trans_abort:
- xfs_defer_cancel(dfops);
xfs_trans_cancel(tp);
return error;
}
@@ -2943,8 +2873,6 @@ xfs_rename(
{
struct xfs_mount *mp = src_dp->i_mount;
struct xfs_trans *tp;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
int num_inodes = __XFS_SORT_INODES;
@@ -3026,14 +2954,11 @@ xfs_rename(
goto out_trans_cancel;
}
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
-
/* RENAME_EXCHANGE is unique from here on. */
if (flags & RENAME_EXCHANGE)
return xfs_cross_rename(tp, src_dp, src_name, src_ip,
target_dp, target_name, target_ip,
- &dfops, &first_block, spaceres);
+ spaceres);
/*
* Set up the target.
@@ -3054,10 +2979,9 @@ xfs_rename(
* to account for the ".." reference from the new entry.
*/
error = xfs_dir_createname(tp, target_dp, target_name,
- src_ip->i_ino, &first_block,
- &dfops, spaceres);
+ src_ip->i_ino, spaceres);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_trans_ichgtime(tp, target_dp,
XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -3065,7 +2989,7 @@ xfs_rename(
if (new_parent && src_is_directory) {
error = xfs_bumplink(tp, target_dp);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
} else { /* target_ip != NULL */
/*
@@ -3094,10 +3018,9 @@ xfs_rename(
* name at the destination directory, remove it first.
*/
error = xfs_dir_replace(tp, target_dp, target_name,
- src_ip->i_ino,
- &first_block, &dfops, spaceres);
+ src_ip->i_ino, spaceres);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_trans_ichgtime(tp, target_dp,
XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -3108,7 +3031,7 @@ xfs_rename(
*/
error = xfs_droplink(tp, target_ip);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
if (src_is_directory) {
/*
@@ -3116,7 +3039,7 @@ xfs_rename(
*/
error = xfs_droplink(tp, target_ip);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
} /* target_ip != NULL */
@@ -3129,11 +3052,10 @@ xfs_rename(
* directory.
*/
error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
- target_dp->i_ino,
- &first_block, &dfops, spaceres);
+ target_dp->i_ino, spaceres);
ASSERT(error != -EEXIST);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
/*
@@ -3159,7 +3081,7 @@ xfs_rename(
*/
error = xfs_droplink(tp, src_dp);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
/*
@@ -3169,12 +3091,12 @@ xfs_rename(
*/
if (wip) {
error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
- &first_block, &dfops, spaceres);
+ spaceres);
} else
error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
- &first_block, &dfops, spaceres);
+ spaceres);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
/*
* For whiteouts, we need to bump the link count on the whiteout inode.
@@ -3188,10 +3110,10 @@ xfs_rename(
ASSERT(VFS_I(wip)->i_nlink == 0);
error = xfs_bumplink(tp, wip);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
error = xfs_iunlink_remove(tp, wip);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
/*
@@ -3207,18 +3129,16 @@ xfs_rename(
if (new_parent)
xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
- error = xfs_finish_rename(tp, &dfops);
+ error = xfs_finish_rename(tp);
if (wip)
- IRELE(wip);
+ xfs_irele(wip);
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_wip:
if (wip)
- IRELE(wip);
+ xfs_irele(wip);
return error;
}
@@ -3674,3 +3594,12 @@ xfs_iflush_int(
corrupt_out:
return -EFSCORRUPTED;
}
+
+/* Release an inode. */
+void
+xfs_irele(
+ struct xfs_inode *ip)
+{
+ trace_xfs_irele(ip, _RET_IP_);
+ iput(VFS_I(ip));
+}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 2ed63a49e890..be2014520155 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -15,7 +15,6 @@
struct xfs_dinode;
struct xfs_inode;
struct xfs_buf;
-struct xfs_defer_ops;
struct xfs_bmbt_irec;
struct xfs_inode_log_item;
struct xfs_mount;
@@ -34,9 +33,9 @@ typedef struct xfs_inode {
struct xfs_imap i_imap; /* location for xfs_imap() */
/* Extent information. */
- xfs_ifork_t *i_afp; /* attribute fork pointer */
- xfs_ifork_t *i_cowfp; /* copy on write extents */
- xfs_ifork_t i_df; /* data fork */
+ struct xfs_ifork *i_afp; /* attribute fork pointer */
+ struct xfs_ifork *i_cowfp; /* copy on write extents */
+ struct xfs_ifork i_df; /* data fork */
/* operations vectors */
const struct xfs_dir_ops *d_ops; /* directory ops vector */
@@ -199,6 +198,15 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
}
/*
+ * Check if an inode has any data in the COW fork. This might be often false
+ * even for inodes with the reflink flag when there is no pending COW operation.
+ */
+static inline bool xfs_inode_has_cow_data(struct xfs_inode *ip)
+{
+ return ip->i_cowfp && ip->i_cowfp->if_bytes;
+}
+
+/*
* In-core inode flags.
*/
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
@@ -415,8 +423,7 @@ uint xfs_ilock_data_map_shared(struct xfs_inode *);
uint xfs_ilock_attr_map_shared(struct xfs_inode *);
uint xfs_ip2xflags(struct xfs_inode *);
-int xfs_ifree(struct xfs_trans *, xfs_inode_t *,
- struct xfs_defer_ops *);
+int xfs_ifree(struct xfs_trans *, struct xfs_inode *);
int xfs_itruncate_extents_flags(struct xfs_trans **,
struct xfs_inode *, int, xfs_fsize_t, int);
void xfs_iext_realloc(xfs_inode_t *, int, int);
@@ -484,18 +491,7 @@ static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
xfs_finish_inode_setup(ip);
}
-#define IHOLD(ip) \
-do { \
- ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
- ihold(VFS_I(ip)); \
- trace_xfs_ihold(ip, _THIS_IP_); \
-} while (0)
-
-#define IRELE(ip) \
-do { \
- trace_xfs_irele(ip, _THIS_IP_); \
- iput(VFS_I(ip)); \
-} while (0)
+void xfs_irele(struct xfs_inode *ip);
extern struct kmem_zone *xfs_inode_zone;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 2389c34c172d..fa1c4fe2ffbf 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -194,8 +194,6 @@ xfs_inode_item_format_data_fork(
* to be there by xfs_idata_realloc().
*/
data_bytes = roundup(ip->i_df.if_bytes, 4);
- ASSERT(ip->i_df.if_real_bytes == 0 ||
- ip->i_df.if_real_bytes >= data_bytes);
ASSERT(ip->i_df.if_u1.if_data != NULL);
ASSERT(ip->i_d.di_size > 0);
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL,
@@ -280,8 +278,6 @@ xfs_inode_item_format_attr_fork(
* to be there by xfs_idata_realloc().
*/
data_bytes = roundup(ip->i_afp->if_bytes, 4);
- ASSERT(ip->i_afp->if_real_bytes == 0 ||
- ip->i_afp->if_real_bytes >= data_bytes);
ASSERT(ip->i_afp->if_u1.if_data != NULL);
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL,
ip->i_afp->if_u1.if_data,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 55876dd02f0c..6320aca39f39 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * Copyright (c) 2016 Christoph Hellwig.
+ * Copyright (c) 2016-2018 Christoph Hellwig.
* All Rights Reserved.
*/
#include <linux/iomap.h>
@@ -152,13 +152,11 @@ xfs_iomap_write_direct(
xfs_fileoff_t offset_fsb;
xfs_fileoff_t last_fsb;
xfs_filblks_t count_fsb, resaligned;
- xfs_fsblock_t firstfsb;
xfs_extlen_t extsz;
int nimaps;
int quota_flag;
int rt;
xfs_trans_t *tp;
- struct xfs_defer_ops dfops;
uint qblocks, resblks, resrtextents;
int error;
int lockmode;
@@ -254,21 +252,15 @@ xfs_iomap_write_direct(
* From this point onwards we overwrite the imap pointer that the
* caller gave to us.
*/
- xfs_defer_init(&dfops, &firstfsb);
nimaps = 1;
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
- bmapi_flags, &firstfsb, resblks, imap,
- &nimaps, &dfops);
+ bmapi_flags, resblks, imap, &nimaps);
if (error)
- goto out_bmap_cancel;
+ goto out_res_cancel;
/*
* Complete the transaction
*/
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
goto out_unlock;
@@ -288,8 +280,7 @@ out_unlock:
xfs_iunlock(ip, lockmode);
return error;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
+out_res_cancel:
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
out_trans_cancel:
xfs_trans_cancel(tp);
@@ -626,7 +617,7 @@ retry:
* Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
* them out if the write happens to fail.
*/
- iomap->flags = IOMAP_F_NEW;
+ iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
done:
if (isnullstartblock(got.br_startblock))
@@ -660,13 +651,13 @@ xfs_iomap_write_allocate(
xfs_inode_t *ip,
int whichfork,
xfs_off_t offset,
- xfs_bmbt_irec_t *imap)
+ xfs_bmbt_irec_t *imap,
+ unsigned int *cow_seq)
{
xfs_mount_t *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_fileoff_t offset_fsb, last_block;
xfs_fileoff_t end_fsb, map_start_fsb;
- xfs_fsblock_t first_block;
- struct xfs_defer_ops dfops;
xfs_filblks_t count_fsb;
xfs_trans_t *tp;
int nimaps;
@@ -716,8 +707,6 @@ xfs_iomap_write_allocate(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &first_block);
-
/*
* it is possible that the extents have changed since
* we did the read call as we dropped the ilock for a
@@ -770,13 +759,8 @@ xfs_iomap_write_allocate(
* pointer that the caller gave to us.
*/
error = xfs_bmapi_write(tp, ip, map_start_fsb,
- count_fsb, flags, &first_block,
- nres, imap, &nimaps,
- &dfops);
- if (error)
- goto trans_cancel;
-
- error = xfs_defer_finish(&tp, &dfops);
+ count_fsb, flags, nres, imap,
+ &nimaps);
if (error)
goto trans_cancel;
@@ -784,6 +768,8 @@ xfs_iomap_write_allocate(
if (error)
goto error0;
+ if (whichfork == XFS_COW_FORK)
+ *cow_seq = READ_ONCE(ifp->if_seq);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
@@ -810,7 +796,6 @@ xfs_iomap_write_allocate(
}
trans_cancel:
- xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
error0:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -828,11 +813,9 @@ xfs_iomap_write_unwritten(
xfs_fileoff_t offset_fsb;
xfs_filblks_t count_fsb;
xfs_filblks_t numblks_fsb;
- xfs_fsblock_t firstfsb;
int nimaps;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap;
- struct xfs_defer_ops dfops;
struct inode *inode = VFS_I(ip);
xfs_fsize_t i_size;
uint resblks;
@@ -877,11 +860,10 @@ xfs_iomap_write_unwritten(
/*
* Modify the unwritten extent state of the buffer.
*/
- xfs_defer_init(&dfops, &firstfsb);
nimaps = 1;
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
- XFS_BMAPI_CONVERT, &firstfsb, resblks,
- &imap, &nimaps, &dfops);
+ XFS_BMAPI_CONVERT, resblks, &imap,
+ &nimaps);
if (error)
goto error_on_bmapi_transaction;
@@ -901,10 +883,6 @@ xfs_iomap_write_unwritten(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto error_on_bmapi_transaction;
-
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
@@ -928,7 +906,6 @@ xfs_iomap_write_unwritten(
return 0;
error_on_bmapi_transaction:
- xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
@@ -1132,7 +1109,7 @@ xfs_file_iomap_begin(
if (error)
return error;
- iomap->flags = IOMAP_F_NEW;
+ iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
out_finish:
@@ -1202,11 +1179,8 @@ xfs_file_iomap_end_delalloc(
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
XFS_FSB_TO_B(mp, end_fsb) - 1);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
end_fsb - start_fsb);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
if (error && !XFS_FORCED_SHUTDOWN(mp)) {
xfs_alert(mp, "%s: unable to clean up ino %lld",
__func__, ip->i_ino);
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 83474c9cede9..c6170548831b 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -14,7 +14,7 @@ struct xfs_bmbt_irec;
int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
struct xfs_bmbt_irec *, int);
int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
- struct xfs_bmbt_irec *);
+ struct xfs_bmbt_irec *, unsigned int *);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 0fa29f39d658..c3e74f9128e8 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -26,6 +26,7 @@
#include "xfs_dir2.h"
#include "xfs_trans_space.h"
#include "xfs_iomap.h"
+#include "xfs_defer.h"
#include <linux/capability.h>
#include <linux/xattr.h>
@@ -208,7 +209,7 @@ xfs_generic_create(
xfs_finish_inode_setup(ip);
if (!tmpfile)
xfs_cleanup_inode(dir, inode, dentry);
- iput(inode);
+ xfs_irele(ip);
goto out_free_acl;
}
@@ -390,7 +391,7 @@ xfs_vn_symlink(
out_cleanup_inode:
xfs_finish_inode_setup(cip);
xfs_cleanup_inode(dir, inode, dentry);
- iput(inode);
+ xfs_irele(cip);
out:
return error;
}
@@ -1253,7 +1254,7 @@ xfs_setup_inode(
inode_sb_list_add(inode);
/* make the inode look hashed for the writeback code */
- hlist_add_fake(&inode->i_hash);
+ inode_fake_hash(inode);
inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 24f4f1c555b5..e9508ba01ed1 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -114,7 +114,7 @@ xfs_bulkstat_one_int(
break;
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- IRELE(ip);
+ xfs_irele(ip);
error = formatter(buffer, ubsize, ubused, buf);
if (!error)
@@ -458,8 +458,7 @@ xfs_bulkstat(
* pending error, then we are done.
*/
del_cursor:
- xfs_btree_del_cursor(cur, error ?
- XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
xfs_buf_relse(agbp);
if (error)
break;
@@ -632,8 +631,7 @@ next_ag:
kmem_free(buffer);
if (cur)
- xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
- XFS_BTREE_NOERROR));
+ xfs_btree_del_cursor(cur, error);
if (agbp)
xfs_buf_relse(agbp);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 5e56f3b93d4b..c3b610b687d1 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -410,7 +410,7 @@ out_error:
}
/*
- * Reserve log space and return a ticket corresponding the reservation.
+ * Reserve log space and return a ticket corresponding to the reservation.
*
* Each reservation is going to reserve extra space for a log record header.
* When writes happen to the on-disk log, we don't subtract the length of the
@@ -826,6 +826,88 @@ xfs_log_mount_cancel(
* deallocation must not be done until source-end.
*/
+/* Actually write the unmount record to disk. */
+static void
+xfs_log_write_unmount_record(
+ struct xfs_mount *mp)
+{
+ /* the data section must be 32 bit size aligned */
+ struct xfs_unmount_log_format magic = {
+ .magic = XLOG_UNMOUNT_TYPE,
+ };
+ struct xfs_log_iovec reg = {
+ .i_addr = &magic,
+ .i_len = sizeof(magic),
+ .i_type = XLOG_REG_TYPE_UNMOUNT,
+ };
+ struct xfs_log_vec vec = {
+ .lv_niovecs = 1,
+ .lv_iovecp = &reg,
+ };
+ struct xlog *log = mp->m_log;
+ struct xlog_in_core *iclog;
+ struct xlog_ticket *tic = NULL;
+ xfs_lsn_t lsn;
+ uint flags = XLOG_UNMOUNT_TRANS;
+ int error;
+
+ error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
+ if (error)
+ goto out_err;
+
+ /*
+ * If we think the summary counters are bad, clear the unmount header
+ * flag in the unmount record so that the summary counters will be
+ * recalculated during log recovery at next mount. Refer to
+ * xlog_check_unmount_rec for more details.
+ */
+ if (XFS_TEST_ERROR((mp->m_flags & XFS_MOUNT_BAD_SUMMARY), mp,
+ XFS_ERRTAG_FORCE_SUMMARY_RECALC)) {
+ xfs_alert(mp, "%s: will fix summary counters at next mount",
+ __func__);
+ flags &= ~XLOG_UNMOUNT_TRANS;
+ }
+
+ /* remove inited flag, and account for space used */
+ tic->t_flags = 0;
+ tic->t_curr_res -= sizeof(magic);
+ error = xlog_write(log, &vec, tic, &lsn, NULL, flags);
+ /*
+ * At this point, we're umounting anyway, so there's no point in
+ * transitioning log state to IOERROR. Just continue...
+ */
+out_err:
+ if (error)
+ xfs_alert(mp, "%s: unmount record failed", __func__);
+
+ spin_lock(&log->l_icloglock);
+ iclog = log->l_iclog;
+ atomic_inc(&iclog->ic_refcnt);
+ xlog_state_want_sync(log, iclog);
+ spin_unlock(&log->l_icloglock);
+ error = xlog_state_release_iclog(log, iclog);
+
+ spin_lock(&log->l_icloglock);
+ switch (iclog->ic_state) {
+ default:
+ if (!XLOG_FORCED_SHUTDOWN(log)) {
+ xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
+ break;
+ }
+ /* fall through */
+ case XLOG_STATE_ACTIVE:
+ case XLOG_STATE_DIRTY:
+ spin_unlock(&log->l_icloglock);
+ break;
+ }
+
+ if (tic) {
+ trace_xfs_log_umount_write(log, tic);
+ xlog_ungrant_log_space(log, tic);
+ xfs_log_ticket_put(tic);
+ }
+}
+
/*
* Unmount record used to have a string "Unmount filesystem--" in the
* data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
@@ -842,8 +924,6 @@ xfs_log_unmount_write(xfs_mount_t *mp)
#ifdef DEBUG
xlog_in_core_t *first_iclog;
#endif
- xlog_ticket_t *tic = NULL;
- xfs_lsn_t lsn;
int error;
/*
@@ -870,66 +950,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
} while (iclog != first_iclog);
#endif
if (! (XLOG_FORCED_SHUTDOWN(log))) {
- error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
- if (!error) {
- /* the data section must be 32 bit size aligned */
- struct {
- uint16_t magic;
- uint16_t pad1;
- uint32_t pad2; /* may as well make it 64 bits */
- } magic = {
- .magic = XLOG_UNMOUNT_TYPE,
- };
- struct xfs_log_iovec reg = {
- .i_addr = &magic,
- .i_len = sizeof(magic),
- .i_type = XLOG_REG_TYPE_UNMOUNT,
- };
- struct xfs_log_vec vec = {
- .lv_niovecs = 1,
- .lv_iovecp = &reg,
- };
-
- /* remove inited flag, and account for space used */
- tic->t_flags = 0;
- tic->t_curr_res -= sizeof(magic);
- error = xlog_write(log, &vec, tic, &lsn,
- NULL, XLOG_UNMOUNT_TRANS);
- /*
- * At this point, we're umounting anyway,
- * so there's no point in transitioning log state
- * to IOERROR. Just continue...
- */
- }
-
- if (error)
- xfs_alert(mp, "%s: unmount record failed", __func__);
-
-
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
- atomic_inc(&iclog->ic_refcnt);
- xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
- error = xlog_state_release_iclog(log, iclog);
-
- spin_lock(&log->l_icloglock);
- if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
- iclog->ic_state == XLOG_STATE_DIRTY)) {
- if (!XLOG_FORCED_SHUTDOWN(log)) {
- xlog_wait(&iclog->ic_force_wait,
- &log->l_icloglock);
- } else {
- spin_unlock(&log->l_icloglock);
- }
- } else {
- spin_unlock(&log->l_icloglock);
- }
- if (tic) {
- trace_xfs_log_umount_write(log, tic);
- xlog_ungrant_log_space(log, tic);
- xfs_log_ticket_put(tic);
- }
+ xfs_log_write_unmount_record(mp);
} else {
/*
* We're already in forced_shutdown mode, couldn't
@@ -4083,3 +4104,12 @@ xfs_log_check_lsn(
return valid;
}
+
+bool
+xfs_log_in_recovery(
+ struct xfs_mount *mp)
+{
+ struct xlog *log = mp->m_log;
+
+ return log->l_flags & XLOG_ACTIVE_RECOVERY;
+}
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 3c1f6a8b4b70..73a64bf32f6f 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -153,5 +153,6 @@ bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
void xfs_log_work_queue(struct xfs_mount *mp);
void xfs_log_quiesce(struct xfs_mount *mp);
bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
+bool xfs_log_in_recovery(struct xfs_mount *);
#endif /* __XFS_LOG_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b181b5f57a19..a21dc61ec09e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -196,7 +196,7 @@ xlog_bread_noalign(
bp->b_io_length = nbblks;
bp->b_error = 0;
- error = xfs_buf_submit_wait(bp);
+ error = xfs_buf_submit(bp);
if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
xfs_buf_ioerror_alert(bp, __func__);
return error;
@@ -4733,10 +4733,9 @@ xlog_recover_cancel_rui(
/* Recover the CUI if necessary. */
STATIC int
xlog_recover_process_cui(
- struct xfs_mount *mp,
+ struct xfs_trans *parent_tp,
struct xfs_ail *ailp,
- struct xfs_log_item *lip,
- struct xfs_defer_ops *dfops)
+ struct xfs_log_item *lip)
{
struct xfs_cui_log_item *cuip;
int error;
@@ -4749,7 +4748,7 @@ xlog_recover_process_cui(
return 0;
spin_unlock(&ailp->ail_lock);
- error = xfs_cui_recover(mp, cuip, dfops);
+ error = xfs_cui_recover(parent_tp, cuip);
spin_lock(&ailp->ail_lock);
return error;
@@ -4774,10 +4773,9 @@ xlog_recover_cancel_cui(
/* Recover the BUI if necessary. */
STATIC int
xlog_recover_process_bui(
- struct xfs_mount *mp,
+ struct xfs_trans *parent_tp,
struct xfs_ail *ailp,
- struct xfs_log_item *lip,
- struct xfs_defer_ops *dfops)
+ struct xfs_log_item *lip)
{
struct xfs_bui_log_item *buip;
int error;
@@ -4790,7 +4788,7 @@ xlog_recover_process_bui(
return 0;
spin_unlock(&ailp->ail_lock);
- error = xfs_bui_recover(mp, buip, dfops);
+ error = xfs_bui_recover(parent_tp, buip);
spin_lock(&ailp->ail_lock);
return error;
@@ -4829,9 +4827,9 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
/* Take all the collected deferred ops and finish them in order. */
static int
xlog_finish_defer_ops(
- struct xfs_mount *mp,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *parent_tp)
{
+ struct xfs_mount *mp = parent_tp->t_mountp;
struct xfs_trans *tp;
int64_t freeblks;
uint resblks;
@@ -4854,16 +4852,10 @@ xlog_finish_defer_ops(
0, XFS_TRANS_RESERVE, &tp);
if (error)
return error;
-
- error = xfs_defer_finish(&tp, dfops);
- if (error)
- goto out_cancel;
+ /* transfer all collected dfops to this transaction */
+ xfs_defer_move(tp, parent_tp);
return xfs_trans_commit(tp);
-
-out_cancel:
- xfs_trans_cancel(tp);
- return error;
}
/*
@@ -4886,23 +4878,34 @@ STATIC int
xlog_recover_process_intents(
struct xlog *log)
{
- struct xfs_defer_ops dfops;
+ struct xfs_trans *parent_tp;
struct xfs_ail_cursor cur;
struct xfs_log_item *lip;
struct xfs_ail *ailp;
- xfs_fsblock_t firstfsb;
- int error = 0;
+ int error;
#if defined(DEBUG) || defined(XFS_WARN)
xfs_lsn_t last_lsn;
#endif
+ /*
+ * The intent recovery handlers commit transactions to complete recovery
+ * for individual intents, but any new deferred operations that are
+ * queued during that process are held off until the very end. The
+ * purpose of this transaction is to serve as a container for deferred
+ * operations. Each intent recovery handler must transfer dfops here
+ * before its local transaction commits, and we'll finish the entire
+ * list below.
+ */
+ error = xfs_trans_alloc_empty(log->l_mp, &parent_tp);
+ if (error)
+ return error;
+
ailp = log->l_ailp;
spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
#if defined(DEBUG) || defined(XFS_WARN)
last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
#endif
- xfs_defer_init(&dfops, &firstfsb);
while (lip != NULL) {
/*
* We're done when we see something other than an intent.
@@ -4937,12 +4940,10 @@ xlog_recover_process_intents(
error = xlog_recover_process_rui(log->l_mp, ailp, lip);
break;
case XFS_LI_CUI:
- error = xlog_recover_process_cui(log->l_mp, ailp, lip,
- &dfops);
+ error = xlog_recover_process_cui(parent_tp, ailp, lip);
break;
case XFS_LI_BUI:
- error = xlog_recover_process_bui(log->l_mp, ailp, lip,
- &dfops);
+ error = xlog_recover_process_bui(parent_tp, ailp, lip);
break;
}
if (error)
@@ -4952,10 +4953,9 @@ xlog_recover_process_intents(
out:
xfs_trans_ail_cursor_done(&cur);
spin_unlock(&ailp->ail_lock);
- if (error)
- xfs_defer_cancel(&dfops);
- else
- error = xlog_finish_defer_ops(log->l_mp, &dfops);
+ if (!error)
+ error = xlog_finish_defer_ops(parent_tp);
+ xfs_trans_cancel(parent_tp);
return error;
}
@@ -5094,11 +5094,11 @@ xlog_recover_process_one_iunlink(
*/
ip->i_d.di_dmevmask = 0;
- IRELE(ip);
+ xfs_irele(ip);
return agino;
fail_iput:
- IRELE(ip);
+ xfs_irele(ip);
fail:
/*
* We can't read in the inode this bucket points to, or this inode
@@ -5707,7 +5707,7 @@ xlog_do_recover(
bp->b_flags |= XBF_READ;
bp->b_ops = &xfs_sb_buf_ops;
- error = xfs_buf_submit_wait(bp);
+ error = xfs_buf_submit(bp);
if (error) {
if (!XFS_FORCED_SHUTDOWN(mp)) {
xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index a3378252baa1..02d15098dbee 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -207,6 +207,9 @@ xfs_initialize_perag(
if (xfs_buf_hash_init(pag))
goto out_free_pag;
init_waitqueue_head(&pag->pagb_wait);
+ spin_lock_init(&pag->pagb_lock);
+ pag->pagb_count = 0;
+ pag->pagb_tree = RB_ROOT;
if (radix_tree_preload(GFP_NOFS))
goto out_hash_destroy;
@@ -606,6 +609,57 @@ xfs_default_resblks(xfs_mount_t *mp)
return resblks;
}
+/* Ensure the summary counts are correct. */
+STATIC int
+xfs_check_summary_counts(
+ struct xfs_mount *mp)
+{
+ /*
+ * The AG0 superblock verifier rejects in-progress filesystems,
+ * so we should never see the flag set this far into mounting.
+ */
+ if (mp->m_sb.sb_inprogress) {
+ xfs_err(mp, "sb_inprogress set after log recovery??");
+ WARN_ON(1);
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * Now the log is mounted, we know if it was an unclean shutdown or
+ * not. If it was, with the first phase of recovery has completed, we
+ * have consistent AG blocks on disk. We have not recovered EFIs yet,
+ * but they are recovered transactionally in the second recovery phase
+ * later.
+ *
+ * If the log was clean when we mounted, we can check the summary
+ * counters. If any of them are obviously incorrect, we can recompute
+ * them from the AGF headers in the next step.
+ */
+ if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
+ (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
+ !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
+ mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
+ mp->m_flags |= XFS_MOUNT_BAD_SUMMARY;
+
+ /*
+ * We can safely re-initialise incore superblock counters from the
+ * per-ag data. These may not be correct if the filesystem was not
+ * cleanly unmounted, so we waited for recovery to finish before doing
+ * this.
+ *
+ * If the filesystem was cleanly unmounted or the previous check did
+ * not flag anything weird, then we can trust the values in the
+ * superblock to be correct and we don't need to do anything here.
+ * Otherwise, recalculate the summary counters.
+ */
+ if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+ XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
+ !(mp->m_flags & XFS_MOUNT_BAD_SUMMARY))
+ return 0;
+
+ return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
+}
+
/*
* This function does the following on an initial mount of a file system:
* - reads the superblock from disk and init the mount struct
@@ -831,32 +885,10 @@ xfs_mountfs(
goto out_fail_wait;
}
- /*
- * Now the log is mounted, we know if it was an unclean shutdown or
- * not. If it was, with the first phase of recovery has completed, we
- * have consistent AG blocks on disk. We have not recovered EFIs yet,
- * but they are recovered transactionally in the second recovery phase
- * later.
- *
- * Hence we can safely re-initialise incore superblock counters from
- * the per-ag data. These may not be correct if the filesystem was not
- * cleanly unmounted, so we need to wait for recovery to finish before
- * doing this.
- *
- * If the filesystem was cleanly unmounted, then we can trust the
- * values in the superblock to be correct and we don't need to do
- * anything here.
- *
- * If we are currently making the filesystem, the initialisation will
- * fail as the perag data is in an undefined state.
- */
- if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
- !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
- !mp->m_sb.sb_inprogress) {
- error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
- if (error)
- goto out_log_dealloc;
- }
+ /* Make sure the summary counts are ok. */
+ error = xfs_check_summary_counts(mp);
+ if (error)
+ goto out_log_dealloc;
/*
* Get and sanity-check the root inode.
@@ -1011,7 +1043,7 @@ xfs_mountfs(
out_rtunmount:
xfs_rtunmount_inodes(mp);
out_rele_rip:
- IRELE(rip);
+ xfs_irele(rip);
/* Clean out dquots that might be in memory after quotacheck. */
xfs_qm_unmount(mp);
/*
@@ -1067,7 +1099,7 @@ xfs_unmountfs(
xfs_fs_unreserve_ag_blocks(mp);
xfs_qm_unmount_quotas(mp);
xfs_rtunmount_inodes(mp);
- IRELE(mp->m_rootip);
+ xfs_irele(mp->m_rootip);
/*
* We can potentially deadlock here if we have an inode cluster
@@ -1395,3 +1427,16 @@ xfs_dev_is_read_only(
}
return 0;
}
+
+/* Force the summary counters to be recalculated at next mount. */
+void
+xfs_force_summary_recalc(
+ struct xfs_mount *mp)
+{
+ if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
+ return;
+
+ spin_lock(&mp->m_sb_lock);
+ mp->m_flags |= XFS_MOUNT_BAD_SUMMARY;
+ spin_unlock(&mp->m_sb_lock);
+}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 245349d1e23f..7964513c3128 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -202,6 +202,7 @@ typedef struct xfs_mount {
must be synchronous except
for space allocations */
#define XFS_MOUNT_UNMOUNTING (1ULL << 1) /* filesystem is unmounting */
+#define XFS_MOUNT_BAD_SUMMARY (1ULL << 2) /* summary counters are bad */
#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
operations, typically for
@@ -216,7 +217,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_SMALL_INUMS (1ULL << 14) /* user wants 32bit inodes */
#define XFS_MOUNT_32BITINODES (1ULL << 15) /* inode32 allocator active */
#define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */
-#define XFS_MOUNT_BARRIER (1ULL << 17)
#define XFS_MOUNT_IKEEP (1ULL << 18) /* keep empty inode clusters*/
#define XFS_MOUNT_SWALLOC (1ULL << 19) /* turn on stripe width
* allocation */
@@ -434,5 +434,6 @@ int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
int error_class, int error);
+void xfs_force_summary_recalc(struct xfs_mount *mp);
#endif /* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 9ceb85cce33a..52ed7904df10 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -231,15 +231,15 @@ xfs_qm_unmount_quotas(
*/
if (mp->m_quotainfo) {
if (mp->m_quotainfo->qi_uquotaip) {
- IRELE(mp->m_quotainfo->qi_uquotaip);
+ xfs_irele(mp->m_quotainfo->qi_uquotaip);
mp->m_quotainfo->qi_uquotaip = NULL;
}
if (mp->m_quotainfo->qi_gquotaip) {
- IRELE(mp->m_quotainfo->qi_gquotaip);
+ xfs_irele(mp->m_quotainfo->qi_gquotaip);
mp->m_quotainfo->qi_gquotaip = NULL;
}
if (mp->m_quotainfo->qi_pquotaip) {
- IRELE(mp->m_quotainfo->qi_pquotaip);
+ xfs_irele(mp->m_quotainfo->qi_pquotaip);
mp->m_quotainfo->qi_pquotaip = NULL;
}
}
@@ -1200,12 +1200,12 @@ xfs_qm_dqusage_adjust(
goto error0;
}
- IRELE(ip);
+ xfs_irele(ip);
*res = BULKSTAT_RV_DIDONE;
return 0;
error0:
- IRELE(ip);
+ xfs_irele(ip);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
@@ -1575,11 +1575,11 @@ xfs_qm_init_quotainos(
error_rele:
if (uip)
- IRELE(uip);
+ xfs_irele(uip);
if (gip)
- IRELE(gip);
+ xfs_irele(gip);
if (pip)
- IRELE(pip);
+ xfs_irele(pip);
return error;
}
@@ -1588,15 +1588,15 @@ xfs_qm_destroy_quotainos(
xfs_quotainfo_t *qi)
{
if (qi->qi_uquotaip) {
- IRELE(qi->qi_uquotaip);
+ xfs_irele(qi->qi_uquotaip);
qi->qi_uquotaip = NULL; /* paranoia */
}
if (qi->qi_gquotaip) {
- IRELE(qi->qi_gquotaip);
+ xfs_irele(qi->qi_gquotaip);
qi->qi_gquotaip = NULL;
}
if (qi->qi_pquotaip) {
- IRELE(qi->qi_pquotaip);
+ xfs_irele(qi->qi_pquotaip);
qi->qi_pquotaip = NULL;
}
}
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index abc8a21e3a82..b3190890f096 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -22,6 +22,7 @@
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_defer.h"
STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
@@ -189,15 +190,15 @@ xfs_qm_scall_quotaoff(
* Release our quotainode references if we don't need them anymore.
*/
if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
- IRELE(q->qi_uquotaip);
+ xfs_irele(q->qi_uquotaip);
q->qi_uquotaip = NULL;
}
if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
- IRELE(q->qi_gquotaip);
+ xfs_irele(q->qi_gquotaip);
q->qi_gquotaip = NULL;
}
if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
- IRELE(q->qi_pquotaip);
+ xfs_irele(q->qi_pquotaip);
q->qi_pquotaip = NULL;
}
@@ -250,7 +251,7 @@ xfs_qm_scall_trunc_qfile(
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
out_put:
- IRELE(ip);
+ xfs_irele(ip);
return error;
}
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 205fbb2a77e4..a7c0c657dfaf 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -45,7 +45,7 @@ xfs_qm_fill_state(
tstate->ino_warnlimit = q->qi_iwarnlimit;
tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit;
if (tempqip)
- IRELE(ip);
+ xfs_irele(ip);
}
/*
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 472a73e9d331..fce38b56b962 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -380,9 +380,8 @@ xfs_cud_init(
*/
int
xfs_cui_recover(
- struct xfs_mount *mp,
- struct xfs_cui_log_item *cuip,
- struct xfs_defer_ops *dfops)
+ struct xfs_trans *parent_tp,
+ struct xfs_cui_log_item *cuip)
{
int i;
int error = 0;
@@ -398,6 +397,7 @@ xfs_cui_recover(
xfs_extlen_t new_len;
struct xfs_bmbt_irec irec;
bool requeue_only = false;
+ struct xfs_mount *mp = parent_tp->t_mountp;
ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
@@ -452,6 +452,12 @@ xfs_cui_recover(
mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
if (error)
return error;
+ /*
+ * Recovery stashes all deferred ops during intent processing and
+ * finishes them on completion. Transfer current dfops state to this
+ * transaction and transfer the result back before we return.
+ */
+ xfs_defer_move(tp, parent_tp);
cudp = xfs_trans_get_cud(tp, cuip);
for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
@@ -473,7 +479,7 @@ xfs_cui_recover(
new_len = refc->pe_len;
} else
error = xfs_trans_log_finish_refcount_update(tp, cudp,
- dfops, type, refc->pe_startblock, refc->pe_len,
+ type, refc->pe_startblock, refc->pe_len,
&new_fsb, &new_len, &rcur);
if (error)
goto abort_error;
@@ -484,22 +490,18 @@ xfs_cui_recover(
irec.br_blockcount = new_len;
switch (type) {
case XFS_REFCOUNT_INCREASE:
- error = xfs_refcount_increase_extent(
- tp->t_mountp, dfops, &irec);
+ error = xfs_refcount_increase_extent(tp, &irec);
break;
case XFS_REFCOUNT_DECREASE:
- error = xfs_refcount_decrease_extent(
- tp->t_mountp, dfops, &irec);
+ error = xfs_refcount_decrease_extent(tp, &irec);
break;
case XFS_REFCOUNT_ALLOC_COW:
- error = xfs_refcount_alloc_cow_extent(
- tp->t_mountp, dfops,
+ error = xfs_refcount_alloc_cow_extent(tp,
irec.br_startblock,
irec.br_blockcount);
break;
case XFS_REFCOUNT_FREE_COW:
- error = xfs_refcount_free_cow_extent(
- tp->t_mountp, dfops,
+ error = xfs_refcount_free_cow_extent(tp,
irec.br_startblock,
irec.br_blockcount);
break;
@@ -514,11 +516,13 @@ xfs_cui_recover(
xfs_refcount_finish_one_cleanup(tp, rcur, error);
set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
+ xfs_defer_move(parent_tp, tp);
error = xfs_trans_commit(tp);
return error;
abort_error:
xfs_refcount_finish_one_cleanup(tp, rcur, error);
+ xfs_defer_move(parent_tp, tp);
xfs_trans_cancel(tp);
return error;
}
diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
index dd830b69cd1e..3896dcc2368f 100644
--- a/fs/xfs/xfs_refcount_item.h
+++ b/fs/xfs/xfs_refcount_item.h
@@ -82,7 +82,6 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *,
struct xfs_cui_log_item *);
void xfs_cui_item_free(struct xfs_cui_log_item *);
void xfs_cui_release(struct xfs_cui_log_item *);
-int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip,
- struct xfs_defer_ops *dfops);
+int xfs_cui_recover(struct xfs_trans *parent_tp, struct xfs_cui_log_item *cuip);
#endif /* __XFS_REFCOUNT_ITEM_H__ */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 592fb2071a03..38f405415b88 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -157,12 +157,12 @@ xfs_reflink_find_shared(
if (!agbp)
return -ENOMEM;
- cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
+ cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
find_end_of_shared);
- xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_btree_del_cursor(cur, error);
xfs_trans_brelse(tp, agbp);
return error;
@@ -312,10 +312,8 @@ xfs_reflink_convert_cow_extent(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
xfs_fileoff_t offset_fsb,
- xfs_filblks_t count_fsb,
- struct xfs_defer_ops *dfops)
+ xfs_filblks_t count_fsb)
{
- xfs_fsblock_t first_block = NULLFSBLOCK;
int nimaps = 1;
if (imap->br_state == XFS_EXT_NORM)
@@ -326,8 +324,8 @@ xfs_reflink_convert_cow_extent(
if (imap->br_blockcount == 0)
return 0;
return xfs_bmapi_write(NULL, ip, imap->br_startoff, imap->br_blockcount,
- XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, &first_block,
- 0, imap, &nimaps, dfops);
+ XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, 0, imap,
+ &nimaps);
}
/* Convert all of the unwritten CoW extents in a file's range to real ones. */
@@ -342,8 +340,6 @@ xfs_reflink_convert_cow(
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
xfs_filblks_t count_fsb = end_fsb - offset_fsb;
struct xfs_bmbt_irec imap;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block = NULLFSBLOCK;
int nimaps = 1, error = 0;
ASSERT(count != 0);
@@ -351,8 +347,7 @@ xfs_reflink_convert_cow(
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_bmapi_write(NULL, ip, offset_fsb, count_fsb,
XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT |
- XFS_BMAPI_CONVERT_ONLY, &first_block, 0, &imap, &nimaps,
- &dfops);
+ XFS_BMAPI_CONVERT_ONLY, 0, &imap, &nimaps);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -369,9 +364,7 @@ xfs_reflink_allocate_cow(
xfs_fileoff_t offset_fsb = imap->br_startoff;
xfs_filblks_t count_fsb = imap->br_blockcount;
struct xfs_bmbt_irec got;
- struct xfs_defer_ops dfops;
struct xfs_trans *tp = NULL;
- xfs_fsblock_t first_block;
int nimaps, error = 0;
bool trimmed;
xfs_filblks_t resaligned;
@@ -430,23 +423,18 @@ retry:
xfs_trans_ijoin(tp, ip, 0);
- xfs_defer_init(&dfops, &first_block);
nimaps = 1;
/* Allocate the entire reservation as unwritten blocks. */
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
- XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,
- resblks, imap, &nimaps, &dfops);
+ XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
+ resblks, imap, &nimaps);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_inode_set_cowblocks_tag(ip);
/* Finish up. */
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
return error;
@@ -458,10 +446,8 @@ retry:
if (nimaps == 0)
return -ENOSPC;
convert:
- return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb,
- &dfops);
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
+ return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
+out_trans_cancel:
xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
XFS_QMOPT_RES_REGBLKS);
out:
@@ -471,69 +457,6 @@ out:
}
/*
- * Find the CoW reservation for a given byte offset of a file.
- */
-bool
-xfs_reflink_find_cow_mapping(
- struct xfs_inode *ip,
- xfs_off_t offset,
- struct xfs_bmbt_irec *imap)
-{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
- xfs_fileoff_t offset_fsb;
- struct xfs_bmbt_irec got;
- struct xfs_iext_cursor icur;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
-
- if (!xfs_is_reflink_inode(ip))
- return false;
- offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
- if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
- return false;
- if (got.br_startoff > offset_fsb)
- return false;
-
- trace_xfs_reflink_find_cow_mapping(ip, offset, 1, XFS_IO_OVERWRITE,
- &got);
- *imap = got;
- return true;
-}
-
-/*
- * Trim an extent to end at the next CoW reservation past offset_fsb.
- */
-void
-xfs_reflink_trim_irec_to_next_cow(
- struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb,
- struct xfs_bmbt_irec *imap)
-{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
- struct xfs_bmbt_irec got;
- struct xfs_iext_cursor icur;
-
- if (!xfs_is_reflink_inode(ip))
- return;
-
- /* Find the extent in the CoW fork. */
- if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
- return;
-
- /* This is the extent before; try sliding up one. */
- if (got.br_startoff < offset_fsb) {
- if (!xfs_iext_next_extent(ifp, &icur, &got))
- return;
- }
-
- if (got.br_startoff >= imap->br_startoff + imap->br_blockcount)
- return;
-
- imap->br_blockcount = got.br_startoff - imap->br_startoff;
- trace_xfs_reflink_trim_irec(ip, imap);
-}
-
-/*
* Cancel CoW reservations for some block range of an inode.
*
* If cancel_real is true this function cancels all COW fork extents for the
@@ -553,11 +476,9 @@ xfs_reflink_cancel_cow_blocks(
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got, del;
struct xfs_iext_cursor icur;
- xfs_fsblock_t firstfsb;
- struct xfs_defer_ops dfops;
int error = 0;
- if (!xfs_is_reflink_inode(ip))
+ if (!xfs_inode_has_cow_data(ip))
return 0;
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
return 0;
@@ -581,26 +502,21 @@ xfs_reflink_cancel_cow_blocks(
if (error)
break;
} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
- xfs_defer_init(&dfops, &firstfsb);
+ ASSERT((*tpp)->t_firstblock == NULLFSBLOCK);
/* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(ip->i_mount,
- &dfops, del.br_startblock,
- del.br_blockcount);
+ error = xfs_refcount_free_cow_extent(*tpp,
+ del.br_startblock, del.br_blockcount);
if (error)
break;
- xfs_bmap_add_free(ip->i_mount, &dfops,
- del.br_startblock, del.br_blockcount,
- NULL);
+ xfs_bmap_add_free(*tpp, del.br_startblock,
+ del.br_blockcount, NULL);
/* Roll the transaction */
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(tpp, &dfops);
- if (error) {
- xfs_defer_cancel(&dfops);
+ error = xfs_defer_finish(tpp);
+ if (error)
break;
- }
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
@@ -623,7 +539,6 @@ next_extent:
/* clear tag if cow fork is emptied */
if (!ifp->if_bytes)
xfs_inode_clear_cowblocks_tag(ip);
-
return error;
}
@@ -696,8 +611,6 @@ xfs_reflink_end_cow(
struct xfs_trans *tp;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
- xfs_fsblock_t firstfsb;
- struct xfs_defer_ops dfops;
int error;
unsigned int resblks;
xfs_filblks_t rlen;
@@ -764,12 +677,11 @@ xfs_reflink_end_cow(
goto prev_extent;
/* Unmap the old blocks in the data fork. */
- xfs_defer_init(&dfops, &firstfsb);
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
rlen = del.br_blockcount;
- error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,
- &firstfsb, &dfops);
+ error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Trim the extent to whatever got unmapped. */
if (rlen) {
@@ -779,15 +691,15 @@ xfs_reflink_end_cow(
trace_xfs_reflink_cow_remap(ip, &del);
/* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops,
- del.br_startblock, del.br_blockcount);
+ error = xfs_refcount_free_cow_extent(tp, del.br_startblock,
+ del.br_blockcount);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del);
+ error = xfs_bmap_map_extent(tp, ip, &del);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Charge this new data fork mapping to the on-disk quota. */
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
@@ -796,10 +708,9 @@ xfs_reflink_end_cow(
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_defer_finish(&tp);
if (error)
- goto out_defer;
+ goto out_cancel;
if (!xfs_iext_get_extent(ifp, &icur, &got))
break;
continue;
@@ -814,8 +725,6 @@ prev_extent:
goto out;
return 0;
-out_defer:
- xfs_defer_cancel(&dfops);
out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1070,9 +979,7 @@ xfs_reflink_remap_extent(
struct xfs_mount *mp = ip->i_mount;
bool real_extent = xfs_bmap_is_real_extent(irec);
struct xfs_trans *tp;
- xfs_fsblock_t firstfsb;
unsigned int resblks;
- struct xfs_defer_ops dfops;
struct xfs_bmbt_irec uirec;
xfs_filblks_t rlen;
xfs_filblks_t unmap_len;
@@ -1113,11 +1020,10 @@ xfs_reflink_remap_extent(
/* Unmap the old blocks in the data fork. */
rlen = unmap_len;
while (rlen) {
- xfs_defer_init(&dfops, &firstfsb);
- error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1,
- &firstfsb, &dfops);
+ ASSERT(tp->t_firstblock == NULLFSBLOCK);
+ error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1);
if (error)
- goto out_defer;
+ goto out_cancel;
/*
* Trim the extent to whatever got unmapped.
@@ -1136,14 +1042,14 @@ xfs_reflink_remap_extent(
uirec.br_blockcount, uirec.br_startblock);
/* Update the refcount tree */
- error = xfs_refcount_increase_extent(mp, &dfops, &uirec);
+ error = xfs_refcount_increase_extent(tp, &uirec);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(mp, &dfops, ip, &uirec);
+ error = xfs_bmap_map_extent(tp, ip, &uirec);
if (error)
- goto out_defer;
+ goto out_cancel;
/* Update quota accounting. */
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
@@ -1162,10 +1068,9 @@ xfs_reflink_remap_extent(
next_extent:
/* Process all the deferred stuff. */
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
+ error = xfs_defer_finish(&tp);
if (error)
- goto out_defer;
+ goto out_cancel;
}
error = xfs_trans_commit(tp);
@@ -1174,8 +1079,6 @@ next_extent:
goto out;
return 0;
-out_defer:
- xfs_defer_cancel(&dfops);
out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 1532827ba911..c585ad9552b2 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -18,10 +18,6 @@ extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode);
extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
-extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
- struct xfs_bmbt_irec *imap);
-extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap);
extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 329d4d26c13e..926ed314ffba 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -761,8 +761,6 @@ xfs_growfs_rt_alloc(
struct xfs_buf *bp; /* temporary buffer for zeroing */
xfs_daddr_t d; /* disk block address */
int error; /* error return value */
- xfs_fsblock_t firstblock;/* first block allocated in xaction */
- struct xfs_defer_ops dfops; /* list of freed blocks */
xfs_fsblock_t fsbno; /* filesystem block for bno */
struct xfs_bmbt_irec map; /* block map output */
int nmap; /* number of block maps */
@@ -787,24 +785,20 @@ xfs_growfs_rt_alloc(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_defer_init(&dfops, &firstblock);
/*
* Allocate blocks to the bitmap file.
*/
nmap = 1;
error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
- XFS_BMAPI_METADATA, &firstblock,
- resblks, &map, &nmap, &dfops);
+ XFS_BMAPI_METADATA, resblks, &map,
+ &nmap);
if (!error && nmap < 1)
error = -ENOSPC;
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
/*
* Free any blocks freed up in the transaction, then commit.
*/
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
error = xfs_trans_commit(tp);
if (error)
return error;
@@ -854,8 +848,6 @@ xfs_growfs_rt_alloc(
return 0;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
@@ -1215,7 +1207,7 @@ xfs_rtmount_inodes(
ASSERT(sbp->sb_rsumino != NULLFSINO);
error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
if (error) {
- IRELE(mp->m_rbmip);
+ xfs_irele(mp->m_rbmip);
return error;
}
ASSERT(mp->m_rsumip != NULL);
@@ -1227,9 +1219,9 @@ xfs_rtunmount_inodes(
struct xfs_mount *mp)
{
if (mp->m_rbmip)
- IRELE(mp->m_rbmip);
+ xfs_irele(mp->m_rbmip);
if (mp->m_rsumip)
- IRELE(mp->m_rsumip);
+ xfs_irele(mp->m_rsumip);
}
/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 9d791f158dfe..207ee302b1bb 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -65,11 +65,10 @@ enum {
Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Opt_mtpt, Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
- Opt_allocsize, Opt_norecovery, Opt_barrier, Opt_nobarrier,
- Opt_inode64, Opt_inode32, Opt_ikeep, Opt_noikeep,
- Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, Opt_filestreams,
- Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota,
- Opt_uquota, Opt_gquota, Opt_pquota,
+ Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
+ Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
+ Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
+ Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
};
@@ -118,14 +117,7 @@ static const match_table_t tokens = {
{Opt_qnoenforce, "qnoenforce"}, /* same as uqnoenforce */
{Opt_discard, "discard"}, /* Discard unused blocks */
{Opt_nodiscard, "nodiscard"}, /* Do not discard unused blocks */
-
{Opt_dax, "dax"}, /* Enable direct access to bdev pages */
-
- /* Deprecated mount options scheduled for removal */
- {Opt_barrier, "barrier"}, /* use writer barriers for log write and
- * unwritten extent conversion */
- {Opt_nobarrier, "nobarrier"}, /* .. disable */
-
{Opt_err, NULL},
};
@@ -209,7 +201,6 @@ xfs_parseargs(
* Set some default flags that could be cleared by the mount option
* parsing.
*/
- mp->m_flags |= XFS_MOUNT_BARRIER;
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
/*
@@ -362,14 +353,6 @@ xfs_parseargs(
mp->m_flags |= XFS_MOUNT_DAX;
break;
#endif
- case Opt_barrier:
- xfs_warn(mp, "%s option is deprecated, ignoring.", p);
- mp->m_flags |= XFS_MOUNT_BARRIER;
- break;
- case Opt_nobarrier:
- xfs_warn(mp, "%s option is deprecated, ignoring.", p);
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- break;
default:
xfs_warn(mp, "unknown mount option [%s].", p);
return -EINVAL;
@@ -487,7 +470,6 @@ xfs_showargs(
static struct proc_xfs_info xfs_info_unset[] = {
/* the few simple ones we can get from the mount struct */
{ XFS_MOUNT_COMPAT_IOSIZE, ",largeio" },
- { XFS_MOUNT_BARRIER, ",nobarrier" },
{ XFS_MOUNT_SMALL_INUMS, ",inode64" },
{ 0, NULL }
};
@@ -1278,14 +1260,6 @@ xfs_fs_remount(
token = match_token(p, tokens, args);
switch (token) {
- case Opt_barrier:
- xfs_warn(mp, "%s option is deprecated, ignoring.", p);
- mp->m_flags |= XFS_MOUNT_BARRIER;
- break;
- case Opt_nobarrier:
- xfs_warn(mp, "%s option is deprecated, ignoring.", p);
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- break;
case Opt_inode64:
mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
@@ -1860,7 +1834,7 @@ MODULE_ALIAS_FS("xfs");
STATIC int __init
xfs_init_zones(void)
{
- if (bioset_init(&xfs_ioend_bioset, 4 * MAX_BUF_PER_PAGE,
+ if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
offsetof(struct xfs_ioend, io_inline_bio),
BIOSET_NEED_BVECS))
goto out;
@@ -1886,7 +1860,7 @@ xfs_init_zones(void)
if (!xfs_da_state_zone)
goto out_destroy_btree_cur_zone;
- xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
+ xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
if (!xfs_ifork_zone)
goto out_destroy_da_state_zone;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 3783afcb68d2..a3e98c64b6e3 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -163,8 +163,6 @@ xfs_symlink(
struct xfs_inode *ip = NULL;
int error = 0;
int pathlen;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
xfs_fileoff_t first_fsb;
xfs_filblks_t fs_blocks;
@@ -243,13 +241,6 @@ xfs_symlink(
goto out_trans_cancel;
/*
- * Initialize the bmap freelist prior to calling either
- * bmapi or the directory create code.
- */
- xfs_defer_init(&dfops, &first_block);
- tp->t_agfl_dfops = &dfops;
-
- /*
* Allocate an inode for the symlink.
*/
error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
@@ -290,10 +281,9 @@ xfs_symlink(
nmaps = XFS_SYMLINK_MAPS;
error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
- XFS_BMAPI_METADATA, &first_block, resblks,
- mval, &nmaps, &dfops);
+ XFS_BMAPI_METADATA, resblks, mval, &nmaps);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
if (resblks)
resblks -= fs_blocks;
@@ -311,7 +301,7 @@ xfs_symlink(
BTOBB(byte_cnt), 0);
if (!bp) {
error = -ENOMEM;
- goto out_bmap_cancel;
+ goto out_trans_cancel;
}
bp->b_ops = &xfs_symlink_buf_ops;
@@ -338,10 +328,9 @@ xfs_symlink(
/*
* Create the directory entry for the symlink.
*/
- error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
- &first_block, &dfops, resblks);
+ error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, resblks);
if (error)
- goto out_bmap_cancel;
+ goto out_trans_cancel;
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
@@ -354,10 +343,6 @@ xfs_symlink(
xfs_trans_set_sync(tp);
}
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
if (error)
goto out_release_inode;
@@ -369,8 +354,6 @@ xfs_symlink(
*ipp = ip;
return 0;
-out_bmap_cancel:
- xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_inode:
@@ -381,7 +364,7 @@ out_release_inode:
*/
if (ip) {
xfs_finish_inode_setup(ip);
- IRELE(ip);
+ xfs_irele(ip);
}
xfs_qm_dqrele(udqp);
@@ -403,8 +386,6 @@ xfs_inactive_symlink_rmt(
xfs_buf_t *bp;
int done;
int error;
- xfs_fsblock_t first_block;
- struct xfs_defer_ops dfops;
int i;
xfs_mount_t *mp;
xfs_bmbt_irec_t mval[XFS_SYMLINK_MAPS];
@@ -443,7 +424,6 @@ xfs_inactive_symlink_rmt(
* Find the block(s) so we can inval and unmap them.
*/
done = 0;
- xfs_defer_init(&dfops, &first_block);
nmaps = ARRAY_SIZE(mval);
error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
mval, &nmaps, 0);
@@ -458,28 +438,21 @@ xfs_inactive_symlink_rmt(
XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
if (!bp) {
error = -ENOMEM;
- goto error_bmap_cancel;
+ goto error_trans_cancel;
}
xfs_trans_binval(tp, bp);
}
/*
* Unmap the dead block(s) to the dfops.
*/
- error = xfs_bunmapi(tp, ip, 0, size, 0, nmaps,
- &first_block, &dfops, &done);
+ error = xfs_bunmapi(tp, ip, 0, size, 0, nmaps, &done);
if (error)
- goto error_bmap_cancel;
+ goto error_trans_cancel;
ASSERT(done);
- /*
- * Commit the first transaction. This logs the EFI and the inode.
- */
- xfs_defer_ijoin(&dfops, ip);
- error = xfs_defer_finish(&tp, &dfops);
- if (error)
- goto error_bmap_cancel;
/*
- * Commit the transaction containing extent freeing and EFDs.
+ * Commit the transaction. This first logs the EFI and the inode, then
+ * rolls and commits the transaction that frees the extents.
*/
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_trans_commit(tp);
@@ -498,8 +471,6 @@ xfs_inactive_symlink_rmt(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
-error_bmap_cancel:
- xfs_defer_cancel(&dfops);
error_trans_cancel:
xfs_trans_cancel(tp);
error_unlock:
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 972d45d28097..ad315e83bc02 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -310,7 +310,6 @@ DEFINE_BUF_EVENT(xfs_buf_hold);
DEFINE_BUF_EVENT(xfs_buf_rele);
DEFINE_BUF_EVENT(xfs_buf_iodone);
DEFINE_BUF_EVENT(xfs_buf_submit);
-DEFINE_BUF_EVENT(xfs_buf_submit_wait);
DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done);
DEFINE_BUF_EVENT(xfs_buf_trylock_fail);
@@ -1153,33 +1152,23 @@ DECLARE_EVENT_CLASS(xfs_page_class,
__field(loff_t, size)
__field(unsigned long, offset)
__field(unsigned int, length)
- __field(int, delalloc)
- __field(int, unwritten)
),
TP_fast_assign(
- int delalloc = -1, unwritten = -1;
-
- if (page_has_buffers(page))
- xfs_count_page_state(page, &delalloc, &unwritten);
__entry->dev = inode->i_sb->s_dev;
__entry->ino = XFS_I(inode)->i_ino;
__entry->pgoff = page_offset(page);
__entry->size = i_size_read(inode);
__entry->offset = off;
__entry->length = len;
- __entry->delalloc = delalloc;
- __entry->unwritten = unwritten;
),
TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
- "length %x delalloc %d unwritten %d",
+ "length %x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pgoff,
__entry->size,
__entry->offset,
- __entry->length,
- __entry->delalloc,
- __entry->unwritten)
+ __entry->length)
)
#define DEFINE_PAGE_EVENT(name) \
@@ -1263,9 +1252,6 @@ DEFINE_EVENT(xfs_imap_class, name, \
TP_ARGS(ip, offset, count, type, irec))
DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
-DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
-DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
-DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
DEFINE_IOMAP_EVENT(xfs_iomap_found);
@@ -1304,7 +1290,6 @@ DEFINE_EVENT(xfs_simple_io_class, name, \
TP_ARGS(ip, offset, count))
DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
-DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write);
@@ -1604,7 +1589,7 @@ DECLARE_EVENT_CLASS(xfs_alloc_class,
__entry->wasfromfl = args->wasfromfl;
__entry->resv = args->resv;
__entry->datatype = args->datatype;
- __entry->firstblock = args->firstblock;
+ __entry->firstblock = args->tp->t_firstblock;
),
TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
"prod %u minleft %u total %u alignment %u minalignslop %u "
@@ -2228,67 +2213,54 @@ DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
/* deferred ops */
struct xfs_defer_pending;
-struct xfs_defer_ops;
DECLARE_EVENT_CLASS(xfs_defer_class,
- TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop,
- unsigned long caller_ip),
- TP_ARGS(mp, dop, caller_ip),
+ TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip),
+ TP_ARGS(tp, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(void *, dop)
+ __field(struct xfs_trans *, tp)
__field(char, committed)
- __field(char, low)
__field(unsigned long, caller_ip)
),
TP_fast_assign(
- __entry->dev = mp ? mp->m_super->s_dev : 0;
- __entry->dop = dop;
- __entry->committed = dop->dop_committed;
- __entry->low = dop->dop_low;
+ __entry->dev = tp->t_mountp->m_super->s_dev;
+ __entry->tp = tp;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ops %p committed %d low %d, caller %pS",
+ TP_printk("dev %d:%d tp %p caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->dop,
- __entry->committed,
- __entry->low,
+ __entry->tp,
(char *)__entry->caller_ip)
)
#define DEFINE_DEFER_EVENT(name) \
DEFINE_EVENT(xfs_defer_class, name, \
- TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, \
- unsigned long caller_ip), \
- TP_ARGS(mp, dop, caller_ip))
+ TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \
+ TP_ARGS(tp, caller_ip))
DECLARE_EVENT_CLASS(xfs_defer_error_class,
- TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error),
- TP_ARGS(mp, dop, error),
+ TP_PROTO(struct xfs_trans *tp, int error),
+ TP_ARGS(tp, error),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(void *, dop)
+ __field(struct xfs_trans *, tp)
__field(char, committed)
- __field(char, low)
__field(int, error)
),
TP_fast_assign(
- __entry->dev = mp ? mp->m_super->s_dev : 0;
- __entry->dop = dop;
- __entry->committed = dop->dop_committed;
- __entry->low = dop->dop_low;
+ __entry->dev = tp->t_mountp->m_super->s_dev;
+ __entry->tp = tp;
__entry->error = error;
),
- TP_printk("dev %d:%d ops %p committed %d low %d err %d",
+ TP_printk("dev %d:%d tp %p err %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->dop,
- __entry->committed,
- __entry->low,
+ __entry->tp,
__entry->error)
)
#define DEFINE_DEFER_ERROR_EVENT(name) \
DEFINE_EVENT(xfs_defer_error_class, name, \
- TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error), \
- TP_ARGS(mp, dop, error))
+ TP_PROTO(struct xfs_trans *tp, int error), \
+ TP_ARGS(tp, error))
DECLARE_EVENT_CLASS(xfs_defer_pending_class,
TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp),
@@ -2407,7 +2379,6 @@ DEFINE_EVENT(xfs_map_extent_deferred_class, name, \
xfs_exntst_t state), \
TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state))
-DEFINE_DEFER_EVENT(xfs_defer_init);
DEFINE_DEFER_EVENT(xfs_defer_cancel);
DEFINE_DEFER_EVENT(xfs_defer_trans_roll);
DEFINE_DEFER_EVENT(xfs_defer_trans_abort);
@@ -2417,9 +2388,8 @@ DEFINE_DEFER_EVENT(xfs_defer_finish_done);
DEFINE_DEFER_ERROR_EVENT(xfs_defer_trans_roll_error);
DEFINE_DEFER_ERROR_EVENT(xfs_defer_finish_error);
-DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_work);
-DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_cancel);
-DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_cancel);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_create_intent);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_cancel_list);
DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish);
DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort);
@@ -3215,8 +3185,6 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_bounce_dio_write);
-DEFINE_IOMAP_EVENT(xfs_reflink_find_cow_mapping);
-DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 524f543c5b82..bedc5a5133a5 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -100,6 +100,8 @@ xfs_trans_dup(
ntp->t_mountp = tp->t_mountp;
INIT_LIST_HEAD(&ntp->t_items);
INIT_LIST_HEAD(&ntp->t_busy);
+ INIT_LIST_HEAD(&ntp->t_dfops);
+ ntp->t_firstblock = NULLFSBLOCK;
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(tp->t_ticket != NULL);
@@ -118,7 +120,9 @@ xfs_trans_dup(
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
tp->t_rtx_res = tp->t_rtx_res_used;
ntp->t_pflags = tp->t_pflags;
- ntp->t_agfl_dfops = tp->t_agfl_dfops;
+
+ /* move deferred ops over to the new tp */
+ xfs_defer_move(ntp, tp);
xfs_trans_dup_dqinfo(tp, ntp);
@@ -273,6 +277,8 @@ xfs_trans_alloc(
tp->t_mountp = mp;
INIT_LIST_HEAD(&tp->t_items);
INIT_LIST_HEAD(&tp->t_busy);
+ INIT_LIST_HEAD(&tp->t_dfops);
+ tp->t_firstblock = NULLFSBLOCK;
error = xfs_trans_reserve(tp, resp, blocks, rtextents);
if (error) {
@@ -914,12 +920,21 @@ __xfs_trans_commit(
int error = 0;
int sync = tp->t_flags & XFS_TRANS_SYNC;
- ASSERT(!tp->t_agfl_dfops ||
- !xfs_defer_has_unfinished_work(tp->t_agfl_dfops) || regrant);
-
trace_xfs_trans_commit(tp, _RET_IP_);
/*
+ * Finish deferred items on final commit. Only permanent transactions
+ * should ever have deferred ops.
+ */
+ WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
+ !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
+ if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
+ error = xfs_defer_finish_noroll(&tp);
+ if (error)
+ goto out_unreserve;
+ }
+
+ /*
* If there is nothing to be logged by the transaction,
* then unlock all of the items associated with the
* transaction and free the transaction structure.
@@ -1008,6 +1023,9 @@ xfs_trans_cancel(
trace_xfs_trans_cancel(tp, _RET_IP_);
+ if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
+ xfs_defer_cancel(tp);
+
/*
* See if the caller is relying on us to shut down the
* filesystem. This happens in paths where we detect
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 6526314f0b8f..c3d278e96ad1 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -24,7 +24,6 @@ struct xfs_rui_log_item;
struct xfs_btree_cur;
struct xfs_cui_log_item;
struct xfs_cud_log_item;
-struct xfs_defer_ops;
struct xfs_bui_log_item;
struct xfs_bud_log_item;
@@ -90,6 +89,11 @@ void xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
#define XFS_ITEM_LOCKED 2
#define XFS_ITEM_FLUSHING 3
+/*
+ * Deferred operation item relogging limits.
+ */
+#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
+#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */
/*
* This is the structure maintained for every active transaction.
@@ -102,11 +106,11 @@ typedef struct xfs_trans {
unsigned int t_blk_res_used; /* # of resvd blocks used */
unsigned int t_rtx_res; /* # of rt extents resvd */
unsigned int t_rtx_res_used; /* # of resvd rt extents used */
+ unsigned int t_flags; /* misc flags */
+ xfs_fsblock_t t_firstblock; /* first block allocated */
struct xlog_ticket *t_ticket; /* log mgr ticket */
struct xfs_mount *t_mountp; /* ptr to fs mount struct */
struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
- struct xfs_defer_ops *t_agfl_dfops; /* optional agfl fixup dfops */
- unsigned int t_flags; /* misc flags */
int64_t t_icount_delta; /* superblock icount change */
int64_t t_ifree_delta; /* superblock ifree change */
int64_t t_fdblocks_delta; /* superblock fdblocks chg */
@@ -128,6 +132,7 @@ typedef struct xfs_trans {
int64_t t_rextslog_delta;/* superblocks rextslog chg */
struct list_head t_items; /* log item descriptors */
struct list_head t_busy; /* list of busy extents */
+ struct list_head t_dfops; /* deferred operations */
unsigned long t_pflags; /* saved process flags state */
} xfs_trans_t;
@@ -258,7 +263,7 @@ void xfs_refcount_update_init_defer_op(void);
struct xfs_cud_log_item *xfs_trans_get_cud(struct xfs_trans *tp,
struct xfs_cui_log_item *cuip);
int xfs_trans_log_finish_refcount_update(struct xfs_trans *tp,
- struct xfs_cud_log_item *cudp, struct xfs_defer_ops *dfops,
+ struct xfs_cud_log_item *cudp,
enum xfs_refcount_intent_type type, xfs_fsblock_t startblock,
xfs_extlen_t blockcount, xfs_fsblock_t *new_fsb,
xfs_extlen_t *new_len, struct xfs_btree_cur **pcur);
@@ -270,9 +275,9 @@ void xfs_bmap_update_init_defer_op(void);
struct xfs_bud_log_item *xfs_trans_get_bud(struct xfs_trans *tp,
struct xfs_bui_log_item *buip);
int xfs_trans_log_finish_bmap_update(struct xfs_trans *tp,
- struct xfs_bud_log_item *rudp, struct xfs_defer_ops *dfops,
- enum xfs_bmap_intent_type type, struct xfs_inode *ip,
- int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
- xfs_filblks_t *blockcount, xfs_exntst_t state);
+ struct xfs_bud_log_item *rudp, enum xfs_bmap_intent_type type,
+ struct xfs_inode *ip, int whichfork, xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock, xfs_filblks_t *blockcount,
+ xfs_exntst_t state);
#endif /* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_bmap.c b/fs/xfs/xfs_trans_bmap.c
index a15a5cd867f9..741c558b2179 100644
--- a/fs/xfs/xfs_trans_bmap.c
+++ b/fs/xfs/xfs_trans_bmap.c
@@ -43,7 +43,6 @@ int
xfs_trans_log_finish_bmap_update(
struct xfs_trans *tp,
struct xfs_bud_log_item *budp,
- struct xfs_defer_ops *dop,
enum xfs_bmap_intent_type type,
struct xfs_inode *ip,
int whichfork,
@@ -54,7 +53,7 @@ xfs_trans_log_finish_bmap_update(
{
int error;
- error = xfs_bmap_finish_one(tp, dop, ip, type, whichfork, startoff,
+ error = xfs_bmap_finish_one(tp, ip, type, whichfork, startoff,
startblock, blockcount, state);
/*
@@ -176,7 +175,6 @@ xfs_bmap_update_create_done(
STATIC int
xfs_bmap_update_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
@@ -187,7 +185,7 @@ xfs_bmap_update_finish_item(
bmap = container_of(item, struct xfs_bmap_intent, bi_list);
count = bmap->bi_bmap.br_blockcount;
- error = xfs_trans_log_finish_bmap_update(tp, done_item, dop,
+ error = xfs_trans_log_finish_bmap_update(tp, done_item,
bmap->bi_type,
bmap->bi_owner, bmap->bi_whichfork,
bmap->bi_bmap.br_startoff,
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index bd66c76f55e6..855c0b651fd4 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -171,7 +171,6 @@ xfs_extent_free_create_done(
STATIC int
xfs_extent_free_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
@@ -226,7 +225,6 @@ static const struct xfs_defer_op_type xfs_extent_free_defer_type = {
STATIC int
xfs_agfl_free_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
diff --git a/fs/xfs/xfs_trans_refcount.c b/fs/xfs/xfs_trans_refcount.c
index 46dd4fca8aa7..523c55663954 100644
--- a/fs/xfs/xfs_trans_refcount.c
+++ b/fs/xfs/xfs_trans_refcount.c
@@ -42,7 +42,6 @@ int
xfs_trans_log_finish_refcount_update(
struct xfs_trans *tp,
struct xfs_cud_log_item *cudp,
- struct xfs_defer_ops *dop,
enum xfs_refcount_intent_type type,
xfs_fsblock_t startblock,
xfs_extlen_t blockcount,
@@ -52,7 +51,7 @@ xfs_trans_log_finish_refcount_update(
{
int error;
- error = xfs_refcount_finish_one(tp, dop, type, startblock,
+ error = xfs_refcount_finish_one(tp, type, startblock,
blockcount, new_fsb, new_len, pcur);
/*
@@ -169,7 +168,6 @@ xfs_refcount_update_create_done(
STATIC int
xfs_refcount_update_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)
@@ -180,7 +178,7 @@ xfs_refcount_update_finish_item(
int error;
refc = container_of(item, struct xfs_refcount_intent, ri_list);
- error = xfs_trans_log_finish_refcount_update(tp, done_item, dop,
+ error = xfs_trans_log_finish_refcount_update(tp, done_item,
refc->ri_type,
refc->ri_startblock,
refc->ri_blockcount,
diff --git a/fs/xfs/xfs_trans_rmap.c b/fs/xfs/xfs_trans_rmap.c
index 726d8e2c0558..05b00e40251f 100644
--- a/fs/xfs/xfs_trans_rmap.c
+++ b/fs/xfs/xfs_trans_rmap.c
@@ -193,7 +193,6 @@ xfs_rmap_update_create_done(
STATIC int
xfs_rmap_update_finish_item(
struct xfs_trans *tp,
- struct xfs_defer_ops *dop,
struct list_head *item,
void *done_item,
void **state)