aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ntfs3
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ntfs3')
-rw-r--r--fs/ntfs3/attrib.c557
-rw-r--r--fs/ntfs3/bitmap.c16
-rw-r--r--fs/ntfs3/file.c131
-rw-r--r--fs/ntfs3/frecord.c138
-rw-r--r--fs/ntfs3/fslog.c22
-rw-r--r--fs/ntfs3/fsntfs.c130
-rw-r--r--fs/ntfs3/index.c33
-rw-r--r--fs/ntfs3/inode.c74
-rw-r--r--fs/ntfs3/namei.c6
-rw-r--r--fs/ntfs3/ntfs_fs.h31
-rw-r--r--fs/ntfs3/record.c27
-rw-r--r--fs/ntfs3/run.c108
-rw-r--r--fs/ntfs3/super.c39
-rw-r--r--fs/ntfs3/xattr.c93
14 files changed, 952 insertions, 453 deletions
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index e8c00dda42ad..71f870d497ae 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -84,8 +84,8 @@ static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
/*
* attr_load_runs - Load all runs stored in @attr.
*/
-int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
- struct runs_tree *run, const CLST *vcn)
+static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
+ struct runs_tree *run, const CLST *vcn)
{
int err;
CLST svcn = le64_to_cpu(attr->nres.svcn);
@@ -140,7 +140,10 @@ failed:
}
if (lcn != SPARSE_LCN) {
- mark_as_free_ex(sbi, lcn, clen, trim);
+ if (sbi) {
+ /* mark bitmap range [lcn + clen) as free and trim clusters. */
+ mark_as_free_ex(sbi, lcn, clen, trim);
+ }
dn += clen;
}
@@ -173,7 +176,6 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
{
int err;
CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
- struct wnd_bitmap *wnd = &sbi->used.bitmap;
size_t cnt = run->count;
for (;;) {
@@ -196,9 +198,7 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
/* Add new fragment into run storage. */
if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
/* Undo last 'ntfs_look_for_free_space' */
- down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
- wnd_set_free(wnd, lcn, flen);
- up_write(&wnd->rw_lock);
+ mark_as_free_ex(sbi, lcn, len, false);
err = -ENOMEM;
goto out;
}
@@ -320,7 +320,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
attr_s->name_len, run, 0, alen,
- attr_s->flags, &attr, NULL);
+ attr_s->flags, &attr, NULL, NULL);
if (err)
goto out3;
@@ -419,40 +419,44 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
struct mft_inode *mi, *mi_b;
CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
CLST next_svcn, pre_alloc = -1, done = 0;
- bool is_ext;
+ bool is_ext, is_bad = false;
u32 align;
struct MFT_REC *rec;
again:
+ alen = 0;
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
&mi_b);
if (!attr_b) {
err = -ENOENT;
- goto out;
+ goto bad_inode;
}
if (!attr_b->non_res) {
err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
&attr_b);
- if (err || !attr_b->non_res)
- goto out;
+ if (err)
+ return err;
+
+ /* Return if file is still resident. */
+ if (!attr_b->non_res)
+ goto ok1;
/* Layout of records may be changed, so do a full search. */
goto again;
}
is_ext = is_attr_ext(attr_b);
-
-again_1:
align = sbi->cluster_size;
-
if (is_ext)
align <<= attr_b->nres.c_unit;
old_valid = le64_to_cpu(attr_b->nres.valid_size);
old_size = le64_to_cpu(attr_b->nres.data_size);
old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
+
+again_1:
old_alen = old_alloc >> cluster_bits;
new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
@@ -475,24 +479,27 @@ again_1:
mi = mi_b;
} else if (!le_b) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
} else {
le = le_b;
attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
&mi);
if (!attr) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
next_le_1:
svcn = le64_to_cpu(attr->nres.svcn);
evcn = le64_to_cpu(attr->nres.evcn);
}
-
+ /*
+ * Here we have:
+ * attr,mi,le - last attribute segment (containing 'vcn').
+ * attr_b,mi_b,le_b - base (primary) attribute segment.
+ */
next_le:
rec = mi->mrec;
-
err = attr_load_runs(attr, ni, run, NULL);
if (err)
goto out;
@@ -507,6 +514,13 @@ next_le:
goto ok;
}
+ /*
+ * Add clusters. In simple case we have to:
+ * - allocate space (vcn, lcn, len)
+ * - update packed run in 'mi'
+ * - update attr->nres.evcn
+ * - update attr_b->nres.data_size/attr_b->nres.alloc_size
+ */
to_allocate = new_alen - old_alen;
add_alloc_in_same_attr_seg:
lcn = 0;
@@ -520,9 +534,11 @@ add_alloc_in_same_attr_seg:
pre_alloc = 0;
if (type == ATTR_DATA && !name_len &&
sbi->options->prealloc) {
- CLST new_alen2 = bytes_to_cluster(
- sbi, get_pre_allocated(new_size));
- pre_alloc = new_alen2 - new_alen;
+ pre_alloc =
+ bytes_to_cluster(
+ sbi,
+ get_pre_allocated(new_size)) -
+ new_alen;
}
/* Get the last LCN to allocate from. */
@@ -580,7 +596,7 @@ add_alloc_in_same_attr_seg:
pack_runs:
err = mi_pack_runs(mi, attr, run, vcn - svcn);
if (err)
- goto out;
+ goto undo_1;
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
new_alloc_tmp = (u64)next_svcn << cluster_bits;
@@ -614,7 +630,7 @@ pack_runs:
if (type == ATTR_LIST) {
err = ni_expand_list(ni);
if (err)
- goto out;
+ goto undo_2;
if (next_svcn < vcn)
goto pack_runs;
@@ -624,8 +640,9 @@ pack_runs:
if (!ni->attr_list.size) {
err = ni_create_attr_list(ni);
+ /* In case of error layout of records is not changed. */
if (err)
- goto out;
+ goto undo_2;
/* Layout of records is changed. */
}
@@ -637,48 +654,57 @@ pack_runs:
/* Insert new attribute segment. */
err = ni_insert_nonresident(ni, type, name, name_len, run,
next_svcn, vcn - next_svcn,
- attr_b->flags, &attr, &mi);
- if (err)
- goto out;
-
- if (!is_mft)
- run_truncate_head(run, evcn + 1);
-
- svcn = le64_to_cpu(attr->nres.svcn);
- evcn = le64_to_cpu(attr->nres.evcn);
+ attr_b->flags, &attr, &mi, NULL);
- le_b = NULL;
/*
* Layout of records maybe changed.
* Find base attribute to update.
*/
+ le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
NULL, &mi_b);
if (!attr_b) {
- err = -ENOENT;
- goto out;
+ err = -EINVAL;
+ goto bad_inode;
}
- attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
- attr_b->nres.data_size = attr_b->nres.alloc_size;
- attr_b->nres.valid_size = attr_b->nres.alloc_size;
+ if (err) {
+ /* ni_insert_nonresident failed. */
+ attr = NULL;
+ goto undo_2;
+ }
+
+ if (!is_mft)
+ run_truncate_head(run, evcn + 1);
+
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn = le64_to_cpu(attr->nres.evcn);
+
+ /*
+ * Attribute is in consistency state.
+ * Save this point to restore to if next steps fail.
+ */
+ old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
+ attr_b->nres.valid_size = attr_b->nres.data_size =
+ attr_b->nres.alloc_size = cpu_to_le64(old_size);
mi_b->dirty = true;
goto again_1;
}
if (new_size != old_size ||
(new_alloc != old_alloc && !keep_prealloc)) {
+ /*
+ * Truncate clusters. In simple case we have to:
+ * - update packed run in 'mi'
+ * - update attr->nres.evcn
+ * - update attr_b->nres.data_size/attr_b->nres.alloc_size
+ * - mark and trim clusters as free (vcn, lcn, len)
+ */
+ CLST dlen = 0;
+
vcn = max(svcn, new_alen);
new_alloc_tmp = (u64)vcn << cluster_bits;
- alen = 0;
- err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
- true);
- if (err)
- goto out;
-
- run_truncate(run, vcn);
-
if (vcn > svcn) {
err = mi_pack_runs(mi, attr, run, vcn - svcn);
if (err)
@@ -697,7 +723,7 @@ pack_runs:
if (!al_remove_le(ni, le)) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
@@ -723,12 +749,20 @@ pack_runs:
attr_b->nres.valid_size =
attr_b->nres.alloc_size;
}
+ mi_b->dirty = true;
- if (is_ext)
+ err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
+ true);
+ if (err)
+ goto out;
+
+ if (is_ext) {
+ /* dlen - really deallocated clusters. */
le64_sub_cpu(&attr_b->nres.total_size,
- ((u64)alen << cluster_bits));
+ ((u64)dlen << cluster_bits));
+ }
- mi_b->dirty = true;
+ run_truncate(run, vcn);
if (new_alloc_tmp <= new_alloc)
goto ok;
@@ -747,7 +781,7 @@ pack_runs:
if (le->type != type || le->name_len != name_len ||
memcmp(le_name(le), name, name_len * sizeof(short))) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
err = ni_load_mi(ni, le, &mi);
@@ -757,7 +791,7 @@ pack_runs:
attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
if (!attr) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
goto next_le_1;
}
@@ -772,13 +806,13 @@ ok:
}
}
-out:
- if (!err && attr_b && ret)
+ok1:
+ if (ret)
*ret = attr_b;
/* Update inode_set_bytes. */
- if (!err && ((type == ATTR_DATA && !name_len) ||
- (type == ATTR_ALLOC && name == I30_NAME))) {
+ if (((type == ATTR_DATA && !name_len) ||
+ (type == ATTR_ALLOC && name == I30_NAME))) {
bool dirty = false;
if (ni->vfs_inode.i_size != new_size) {
@@ -786,7 +820,7 @@ out:
dirty = true;
}
- if (attr_b && attr_b->non_res) {
+ if (attr_b->non_res) {
new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
inode_set_bytes(&ni->vfs_inode, new_alloc);
@@ -800,6 +834,47 @@ out:
}
}
+ return 0;
+
+undo_2:
+ vcn -= alen;
+ attr_b->nres.data_size = cpu_to_le64(old_size);
+ attr_b->nres.valid_size = cpu_to_le64(old_valid);
+ attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
+
+ /* Restore 'attr' and 'mi'. */
+ if (attr)
+ goto restore_run;
+
+ if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
+ svcn <= le64_to_cpu(attr_b->nres.evcn)) {
+ attr = attr_b;
+ le = le_b;
+ mi = mi_b;
+ } else if (!le_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ } else {
+ le = le_b;
+ attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
+ &svcn, &mi);
+ if (!attr)
+ goto bad_inode;
+ }
+
+restore_run:
+ if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
+ is_bad = true;
+
+undo_1:
+ run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
+
+ run_truncate(run, vcn);
+out:
+ if (is_bad) {
+bad_inode:
+ _ntfs_bad_inode(&ni->vfs_inode);
+ }
return err;
}
@@ -855,7 +930,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
goto out;
}
- asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
+ asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
if (vcn >= asize) {
err = -EINVAL;
goto out;
@@ -1047,7 +1122,7 @@ ins_ext:
if (evcn1 > next_svcn) {
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
next_svcn, evcn1 - next_svcn,
- attr_b->flags, &attr, &mi);
+ attr_b->flags, &attr, &mi, NULL);
if (err)
goto out;
}
@@ -1173,7 +1248,7 @@ int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
{
struct ntfs_sb_info *sbi = ni->mi.sbi;
u8 cluster_bits = sbi->cluster_bits;
- CLST vcn = from >> cluster_bits;
+ CLST vcn;
CLST vcn_last = (to - 1) >> cluster_bits;
CLST lcn, clen;
int err;
@@ -1647,7 +1722,7 @@ ins_ext:
if (evcn1 > next_svcn) {
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
next_svcn, evcn1 - next_svcn,
- attr_b->flags, &attr, &mi);
+ attr_b->flags, &attr, &mi, NULL);
if (err)
goto out;
}
@@ -1812,18 +1887,12 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
err = ni_insert_nonresident(
ni, ATTR_DATA, NULL, 0, run, next_svcn,
evcn1 - eat - next_svcn, a_flags, &attr,
- &mi);
+ &mi, &le);
if (err)
goto out;
/* Layout of records maybe changed. */
attr_b = NULL;
- le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
- &next_svcn);
- if (!le) {
- err = -EINVAL;
- goto out;
- }
}
/* Free all allocated memory. */
@@ -1918,7 +1987,7 @@ next_attr:
out:
up_write(&ni->file.run_lock);
if (err)
- make_bad_inode(&ni->vfs_inode);
+ _ntfs_bad_inode(&ni->vfs_inode);
return err;
}
@@ -1936,9 +2005,11 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
struct ATTRIB *attr = NULL, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
- CLST svcn, evcn1, vcn, len, end, alen, dealloc;
+ CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
u64 total_size, alloc_size;
u32 mask;
+ __le16 a_flags;
+ struct runs_tree run2;
if (!bytes)
return 0;
@@ -1990,6 +2061,9 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
}
down_write(&ni->file.run_lock);
+ run_init(&run2);
+ run_truncate(run, 0);
+
/*
* Enumerate all attribute segments and punch hole where necessary.
*/
@@ -1997,10 +2071,11 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
vcn = vbo >> sbi->cluster_bits;
len = bytes >> sbi->cluster_bits;
end = vcn + len;
- dealloc = 0;
+ hole = 0;
svcn = le64_to_cpu(attr_b->nres.svcn);
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+ a_flags = attr_b->flags;
if (svcn <= vcn && vcn < evcn1) {
attr = attr_b;
@@ -2008,14 +2083,14 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
mi = mi_b;
} else if (!le_b) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
} else {
le = le_b;
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
&mi);
if (!attr) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
svcn = le64_to_cpu(attr->nres.svcn);
@@ -2023,49 +2098,91 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
}
while (svcn < end) {
- CLST vcn1, zero, dealloc2;
+ CLST vcn1, zero, hole2 = hole;
err = attr_load_runs(attr, ni, run, &svcn);
if (err)
- goto out;
+ goto done;
vcn1 = max(vcn, svcn);
zero = min(end, evcn1) - vcn1;
- dealloc2 = dealloc;
- err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
+ /*
+ * Check range [vcn1 + zero).
+ * Calculate how many clusters there are.
+ * Don't do any destructive actions.
+ */
+ err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
if (err)
- goto out;
+ goto done;
- if (dealloc2 == dealloc) {
- /* Looks like the required range is already sparsed. */
- } else {
- if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
- false)) {
- err = -ENOMEM;
- goto out;
- }
+ /* Check if required range is already hole. */
+ if (hole2 == hole)
+ goto next_attr;
+
+ /* Make a clone of run to undo. */
+ err = run_clone(run, &run2);
+ if (err)
+ goto done;
+
+ /* Make a hole range (sparse) [vcn1 + zero). */
+ if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
+ err = -ENOMEM;
+ goto done;
+ }
- err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
+ /* Update run in attribute segment. */
+ err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
+ if (err)
+ goto done;
+ next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
+ if (next_svcn < evcn1) {
+ /* Insert new attribute segment. */
+ err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
+ next_svcn,
+ evcn1 - next_svcn, a_flags,
+ &attr, &mi, &le);
if (err)
- goto out;
+ goto undo_punch;
+
+ /* Layout of records maybe changed. */
+ attr_b = NULL;
}
+
+ /* Real deallocate. Should not fail. */
+ run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
+
+next_attr:
/* Free all allocated memory. */
run_truncate(run, 0);
if (evcn1 >= alen)
break;
+ /* Get next attribute segment. */
attr = ni_enum_attr_ex(ni, attr, &le, &mi);
if (!attr) {
err = -EINVAL;
- goto out;
+ goto bad_inode;
}
svcn = le64_to_cpu(attr->nres.svcn);
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
}
- total_size -= (u64)dealloc << sbi->cluster_bits;
+done:
+ if (!hole)
+ goto out;
+
+ if (!attr_b) {
+ attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
+ &mi_b);
+ if (!attr_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ }
+ }
+
+ total_size -= (u64)hole << sbi->cluster_bits;
attr_b->nres.total_size = cpu_to_le64(total_size);
mi_b->dirty = true;
@@ -2075,9 +2192,263 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
mark_inode_dirty(&ni->vfs_inode);
out:
+ run_close(&run2);
up_write(&ni->file.run_lock);
+ return err;
+
+bad_inode:
+ _ntfs_bad_inode(&ni->vfs_inode);
+ goto out;
+
+undo_punch:
+ /*
+ * Restore packed runs.
+ * 'mi_pack_runs' should not fail, cause we restore original.
+ */
+ if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
+ goto bad_inode;
+
+ goto done;
+}
+
+/*
+ * attr_insert_range - Insert range (hole) in file.
+ * Not for normal files.
+ */
+int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+{
+ int err = 0;
+ struct runs_tree *run = &ni->file.run;
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ struct ATTRIB *attr = NULL, *attr_b;
+ struct ATTR_LIST_ENTRY *le, *le_b;
+ struct mft_inode *mi, *mi_b;
+ CLST vcn, svcn, evcn1, len, next_svcn;
+ u64 data_size, alloc_size;
+ u32 mask;
+ __le16 a_flags;
+
+ if (!bytes)
+ return 0;
+
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
+ if (!attr_b)
+ return -ENOENT;
+
+ if (!is_attr_ext(attr_b)) {
+ /* It was checked above. See fallocate. */
+ return -EOPNOTSUPP;
+ }
+
+ if (!attr_b->non_res) {
+ data_size = le32_to_cpu(attr_b->res.data_size);
+ alloc_size = data_size;
+ mask = sbi->cluster_mask; /* cluster_size - 1 */
+ } else {
+ data_size = le64_to_cpu(attr_b->nres.data_size);
+ alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
+ mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
+ }
+
+ if (vbo > data_size) {
+ /* Insert range after the file size is not allowed. */
+ return -EINVAL;
+ }
+
+ if ((vbo & mask) || (bytes & mask)) {
+ /* Allow to insert only frame aligned ranges. */
+ return -EINVAL;
+ }
+
+ /*
+ * valid_size <= data_size <= alloc_size
+ * Check alloc_size for maximum possible.
+ */
+ if (bytes > sbi->maxbytes_sparse - alloc_size)
+ return -EFBIG;
+
+ vcn = vbo >> sbi->cluster_bits;
+ len = bytes >> sbi->cluster_bits;
+
+ down_write(&ni->file.run_lock);
+
+ if (!attr_b->non_res) {
+ err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
+ data_size + bytes, NULL, false, NULL);
+
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
+ &mi_b);
+ if (!attr_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ }
+
+ if (err)
+ goto out;
+
+ if (!attr_b->non_res) {
+ /* Still resident. */
+ char *data = Add2Ptr(attr_b, attr_b->res.data_off);
+
+ memmove(data + bytes, data, bytes);
+ memset(data, 0, bytes);
+ goto done;
+ }
+
+ /* Resident files becomes nonresident. */
+ data_size = le64_to_cpu(attr_b->nres.data_size);
+ alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
+ }
+
+ /*
+ * Enumerate all attribute segments and shift start vcn.
+ */
+ a_flags = attr_b->flags;
+ svcn = le64_to_cpu(attr_b->nres.svcn);
+ evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+
+ if (svcn <= vcn && vcn < evcn1) {
+ attr = attr_b;
+ le = le_b;
+ mi = mi_b;
+ } else if (!le_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ } else {
+ le = le_b;
+ attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
+ &mi);
+ if (!attr) {
+ err = -EINVAL;
+ goto bad_inode;
+ }
+
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+ }
+
+ run_truncate(run, 0); /* clear cached values. */
+ err = attr_load_runs(attr, ni, run, NULL);
+ if (err)
+ goto out;
+
+ if (!run_insert_range(run, vcn, len)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Try to pack in current record as much as possible. */
+ err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
if (err)
- make_bad_inode(&ni->vfs_inode);
+ goto out;
+
+ next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
+
+ while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
+ attr->type == ATTR_DATA && !attr->name_len) {
+ le64_add_cpu(&attr->nres.svcn, len);
+ le64_add_cpu(&attr->nres.evcn, len);
+ if (le) {
+ le->vcn = attr->nres.svcn;
+ ni->attr_list.dirty = true;
+ }
+ mi->dirty = true;
+ }
+
+ if (next_svcn < evcn1 + len) {
+ err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
+ next_svcn, evcn1 + len - next_svcn,
+ a_flags, NULL, NULL, NULL);
+
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
+ &mi_b);
+ if (!attr_b) {
+ err = -EINVAL;
+ goto bad_inode;
+ }
+
+ if (err) {
+ /* ni_insert_nonresident failed. Try to undo. */
+ goto undo_insert_range;
+ }
+ }
+
+ /*
+ * Update primary attribute segment.
+ */
+ if (vbo <= ni->i_valid)
+ ni->i_valid += bytes;
+
+ attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
+ attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
+
+ /* ni->valid may be not equal valid_size (temporary). */
+ if (ni->i_valid > data_size + bytes)
+ attr_b->nres.valid_size = attr_b->nres.data_size;
+ else
+ attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
+ mi_b->dirty = true;
+
+done:
+ ni->vfs_inode.i_size += bytes;
+ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+ mark_inode_dirty(&ni->vfs_inode);
+
+out:
+ run_truncate(run, 0); /* clear cached values. */
+
+ up_write(&ni->file.run_lock);
return err;
+
+bad_inode:
+ _ntfs_bad_inode(&ni->vfs_inode);
+ goto out;
+
+undo_insert_range:
+ svcn = le64_to_cpu(attr_b->nres.svcn);
+ evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
+
+ if (svcn <= vcn && vcn < evcn1) {
+ attr = attr_b;
+ le = le_b;
+ mi = mi_b;
+ } else if (!le_b) {
+ goto bad_inode;
+ } else {
+ le = le_b;
+ attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
+ &mi);
+ if (!attr) {
+ goto bad_inode;
+ }
+
+ svcn = le64_to_cpu(attr->nres.svcn);
+ evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+ }
+
+ if (attr_load_runs(attr, ni, run, NULL))
+ goto bad_inode;
+
+ if (!run_collapse_range(run, vcn, len))
+ goto bad_inode;
+
+ if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
+ goto bad_inode;
+
+ while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
+ attr->type == ATTR_DATA && !attr->name_len) {
+ le64_sub_cpu(&attr->nres.svcn, len);
+ le64_sub_cpu(&attr->nres.evcn, len);
+ if (le) {
+ le->vcn = attr->nres.svcn;
+ ni->attr_list.dirty = true;
+ }
+ mi->dirty = true;
+ }
+
+ goto out;
}
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index aa184407520f..e92bbd754365 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -51,11 +51,6 @@ void ntfs3_exit_bitmap(void)
kmem_cache_destroy(ntfs_enode_cachep);
}
-static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
-{
- return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
-}
-
/*
* wnd_scan
*
@@ -565,7 +560,7 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
buf = (ulong *)bh->b_data;
- used = __bitmap_weight(buf, wbits);
+ used = bitmap_weight(buf, wbits);
if (used < wbits) {
frb = wbits - used;
wnd->free_bits[iw] = frb;
@@ -1333,9 +1328,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
if (!new_free)
return -ENOMEM;
- if (new_free != wnd->free_bits)
- memcpy(new_free, wnd->free_bits,
- wnd->nwnd * sizeof(short));
+ memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
memset(new_free + wnd->nwnd, 0,
(new_wnd - wnd->nwnd) * sizeof(short));
kfree(wnd->free_bits);
@@ -1371,7 +1364,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
buf = (ulong *)bh->b_data;
__bitmap_clear(buf, b0, blocksize * 8 - b0);
- frb = wbits - __bitmap_weight(buf, wbits);
+ frb = wbits - bitmap_weight(buf, wbits);
wnd->total_zeroes += frb - wnd->free_bits[iw];
wnd->free_bits[iw] = frb;
@@ -1395,9 +1388,8 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
{
- size_t zlen;
+ size_t zlen = wnd->zone_end - wnd->zone_bit;
- zlen = wnd->zone_end - wnd->zone_bit;
if (zlen)
wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 787b53b984ee..4f2ffc7ef296 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -22,20 +22,20 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
{
struct fstrim_range __user *user_range;
struct fstrim_range range;
- struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!blk_queue_discard(q))
+ if (!bdev_max_discard_sectors(sbi->sb->s_bdev))
return -EOPNOTSUPP;
user_range = (struct fstrim_range __user *)arg;
if (copy_from_user(&range, user_range, sizeof(range)))
return -EFAULT;
- range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity);
+ range.minlen = max_t(u32, range.minlen,
+ bdev_discard_granularity(sbi->sb->s_bdev));
err = ntfs_trim_fs(sbi, &range);
if (err < 0)
@@ -115,7 +115,6 @@ static int ntfs_extend_initialized_size(struct file *file,
for (;;) {
u32 zerofrom, len;
struct page *page;
- void *fsdata;
u8 bits;
CLST vcn, lcn, clen;
@@ -157,16 +156,14 @@ static int ntfs_extend_initialized_size(struct file *file,
if (pos + len > new_valid)
len = new_valid - pos;
- err = pagecache_write_begin(file, mapping, pos, len, 0, &page,
- &fsdata);
+ err = ntfs_write_begin(file, mapping, pos, len, &page, NULL);
if (err)
goto out;
zero_user_segment(page, zerofrom, PAGE_SIZE);
/* This function in any case puts page. */
- err = pagecache_write_end(file, mapping, pos, len, len, page,
- fsdata);
+ err = ntfs_write_end(file, mapping, pos, len, len, page, NULL);
if (err < 0)
goto out;
pos += len;
@@ -245,7 +242,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
lock_buffer(bh);
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
- submit_bh(REQ_OP_READ, 0, bh);
+ submit_bh(REQ_OP_READ, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
@@ -495,7 +492,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
down_write(&ni->file.run_lock);
err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
- &new_valid, true, NULL);
+ &new_valid, ni->mi.sbi->options->prealloc, NULL);
up_write(&ni->file.run_lock);
if (new_valid < ni->i_valid)
@@ -533,21 +530,35 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
{
struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = inode->i_mapping;
struct super_block *sb = inode->i_sb;
struct ntfs_sb_info *sbi = sb->s_fs_info;
struct ntfs_inode *ni = ntfs_i(inode);
loff_t end = vbo + len;
loff_t vbo_down = round_down(vbo, PAGE_SIZE);
- loff_t i_size;
+ bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
+ loff_t i_size, new_size;
+ bool map_locked;
int err;
/* No support for dir. */
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
- /* Return error if mode is not supported. */
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
- FALLOC_FL_COLLAPSE_RANGE)) {
+ /*
+ * vfs_fallocate checks all possible combinations of mode.
+ * Do additional checks here before ntfs_set_state(dirty).
+ */
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ if (!is_supported_holes)
+ return -EOPNOTSUPP;
+ } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+ } else if (mode & FALLOC_FL_INSERT_RANGE) {
+ if (!is_supported_holes)
+ return -EOPNOTSUPP;
+ } else if (mode &
+ ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+ FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
mode);
return -EOPNOTSUPP;
@@ -557,6 +568,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
inode_lock(inode);
i_size = inode->i_size;
+ new_size = max(end, i_size);
+ map_locked = false;
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
/* Should never be here, see ntfs_file_open. */
@@ -564,38 +577,27 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
goto out;
}
+ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+ FALLOC_FL_INSERT_RANGE)) {
+ inode_dio_wait(inode);
+ filemap_invalidate_lock(mapping);
+ map_locked = true;
+ }
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
u32 frame_size;
loff_t mask, vbo_a, end_a, tmp;
- if (!(mode & FALLOC_FL_KEEP_SIZE)) {
- err = -EINVAL;
- goto out;
- }
-
- err = filemap_write_and_wait_range(inode->i_mapping, vbo,
- end - 1);
+ err = filemap_write_and_wait_range(mapping, vbo, end - 1);
if (err)
goto out;
- err = filemap_write_and_wait_range(inode->i_mapping, end,
- LLONG_MAX);
+ err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
if (err)
goto out;
- inode_dio_wait(inode);
-
truncate_pagecache(inode, vbo_down);
- if (!is_sparsed(ni) && !is_compressed(ni)) {
- /*
- * Normal file, can't make hole.
- * TODO: Try to find way to save info about hole.
- */
- err = -EOPNOTSUPP;
- goto out;
- }
-
ni_lock(ni);
err = attr_punch_hole(ni, vbo, len, &frame_size);
ni_unlock(ni);
@@ -627,17 +629,11 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
ni_unlock(ni);
}
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
- if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
- err = -EINVAL;
- goto out;
- }
-
/*
* Write tail of the last page before removed range since
* it will get removed from the page cache below.
*/
- err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
- vbo);
+ err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
if (err)
goto out;
@@ -645,28 +641,58 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
* Write data that will be shifted to preserve them
* when discarding page cache below.
*/
- err = filemap_write_and_wait_range(inode->i_mapping, end,
- LLONG_MAX);
+ err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
if (err)
goto out;
- /* Wait for existing dio to complete. */
- inode_dio_wait(inode);
-
truncate_pagecache(inode, vbo_down);
ni_lock(ni);
err = attr_collapse_range(ni, vbo, len);
ni_unlock(ni);
+ } else if (mode & FALLOC_FL_INSERT_RANGE) {
+ /* Check new size. */
+ err = inode_newsize_ok(inode, new_size);
+ if (err)
+ goto out;
+
+ /* Write out all dirty pages. */
+ err = filemap_write_and_wait_range(mapping, vbo_down,
+ LLONG_MAX);
+ if (err)
+ goto out;
+ truncate_pagecache(inode, vbo_down);
+
+ ni_lock(ni);
+ err = attr_insert_range(ni, vbo, len);
+ ni_unlock(ni);
} else {
+ /* Check new size. */
+
+ /* generic/213: expected -ENOSPC instead of -EFBIG. */
+ if (!is_supported_holes) {
+ loff_t to_alloc = new_size - inode_get_bytes(inode);
+
+ if (to_alloc > 0 &&
+ (to_alloc >> sbi->cluster_bits) >
+ wnd_zeroes(&sbi->used.bitmap)) {
+ err = -ENOSPC;
+ goto out;
+ }
+ }
+
+ err = inode_newsize_ok(inode, new_size);
+ if (err)
+ goto out;
+
/*
- * Normal file: Allocate clusters, do not change 'valid' size.
+ * Allocate clusters, do not change 'valid' size.
*/
- err = ntfs_set_size(inode, max(end, i_size));
+ err = ntfs_set_size(inode, new_size);
if (err)
goto out;
- if (is_sparsed(ni) || is_compressed(ni)) {
+ if (is_supported_holes) {
CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
CLST vcn = vbo >> sbi->cluster_bits;
CLST cend = bytes_to_cluster(sbi, end);
@@ -714,8 +740,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
}
out:
- if (err == -EFBIG)
- err = -ENOSPC;
+ if (map_locked)
+ filemap_invalidate_unlock(mapping);
if (!err) {
inode->i_ctime = inode->i_mtime = current_time(inode);
@@ -762,7 +788,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
}
inode_dio_wait(inode);
- if (attr->ia_size < oldsize)
+ if (attr->ia_size <= oldsize)
err = ntfs_truncate(inode, attr->ia_size);
else if (attr->ia_size > oldsize)
err = ntfs_extend(inode, attr->ia_size, 0, NULL);
@@ -986,7 +1012,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (bytes > count)
bytes = count;
- frame = pos >> frame_bits;
frame_vbo = pos & ~(frame_size - 1);
index = frame_vbo >> PAGE_SHIFT;
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 6f47a9c17f89..381a38a06ec2 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -7,6 +7,7 @@
#include <linux/fiemap.h>
#include <linux/fs.h>
+#include <linux/minmax.h>
#include <linux/vmalloc.h>
#include "debug.h"
@@ -468,7 +469,7 @@ ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi,
&ref, &le);
if (err) {
/* No memory or no space. */
- return NULL;
+ return ERR_PTR(err);
}
le_added = true;
@@ -649,6 +650,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
struct mft_inode *mi;
u32 asize, free;
struct MFT_REF ref;
+ struct MFT_REC *mrec;
__le16 id;
if (!ni->attr_list.dirty)
@@ -692,11 +694,17 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
free -= asize;
}
+ /* Make a copy of primary record to restore if error. */
+ mrec = kmemdup(ni->mi.mrec, sbi->record_size, GFP_NOFS);
+ if (!mrec)
+ return 0; /* Not critical. */
+
/* It seems that attribute list can be removed from primary record. */
mi_remove_attr(NULL, &ni->mi, attr_list);
/*
- * Repeat the cycle above and move all attributes to primary record.
+ * Repeat the cycle above and copy all attributes to primary record.
+ * Do not remove original attributes from subrecords!
* It should be success!
*/
le = NULL;
@@ -707,14 +715,14 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
mi = ni_find_mi(ni, ino_get(&le->ref));
if (!mi) {
/* Should never happened, 'cause already checked. */
- goto bad;
+ goto out;
}
attr = mi_find_attr(mi, NULL, le->type, le_name(le),
le->name_len, &le->id);
if (!attr) {
/* Should never happened, 'cause already checked. */
- goto bad;
+ goto out;
}
asize = le32_to_cpu(attr->size);
@@ -724,18 +732,33 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
le16_to_cpu(attr->name_off));
if (!attr_ins) {
/*
- * Internal error.
- * Either no space in primary record (already checked).
- * Either tried to insert another
- * non indexed attribute (logic error).
+ * No space in primary record (already checked).
*/
- goto bad;
+ goto out;
}
/* Copy all except id. */
id = attr_ins->id;
memcpy(attr_ins, attr, asize);
attr_ins->id = id;
+ }
+
+ /*
+ * Repeat the cycle above and remove all attributes from subrecords.
+ */
+ le = NULL;
+ while ((le = al_enumerate(ni, le))) {
+ if (!memcmp(&le->ref, &ref, sizeof(ref)))
+ continue;
+
+ mi = ni_find_mi(ni, ino_get(&le->ref));
+ if (!mi)
+ continue;
+
+ attr = mi_find_attr(mi, NULL, le->type, le_name(le),
+ le->name_len, &le->id);
+ if (!attr)
+ continue;
/* Remove from original record. */
mi_remove_attr(NULL, mi, attr);
@@ -748,11 +771,13 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
ni->attr_list.le = NULL;
ni->attr_list.dirty = false;
+ kfree(mrec);
+ return 0;
+out:
+ /* Restore primary record. */
+ swap(mrec, ni->mi.mrec);
+ kfree(mrec);
return 0;
-bad:
- ntfs_inode_err(&ni->vfs_inode, "Internal error");
- make_bad_inode(&ni->vfs_inode);
- return -EINVAL;
}
/*
@@ -986,6 +1011,8 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
name_off, svcn, ins_le);
if (!attr)
continue;
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
if (ins_attr)
*ins_attr = attr;
@@ -1007,8 +1034,15 @@ insert_ext:
attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
name_off, svcn, ins_le);
- if (!attr)
+ if (!attr) {
+ err = -EINVAL;
+ goto out2;
+ }
+
+ if (IS_ERR(attr)) {
+ err = PTR_ERR(attr);
goto out2;
+ }
if (ins_attr)
*ins_attr = attr;
@@ -1020,10 +1054,9 @@ insert_ext:
out2:
ni_remove_mi(ni, mi);
mi_put(mi);
- err = -EINVAL;
out1:
- ntfs_mark_rec_free(sbi, rno);
+ ntfs_mark_rec_free(sbi, rno, is_mft);
out:
return err;
@@ -1076,6 +1109,11 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (asize <= free) {
attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
asize, name_off, svcn, ins_le);
+ if (IS_ERR(attr)) {
+ err = PTR_ERR(attr);
+ goto out;
+ }
+
if (attr) {
if (ins_attr)
*ins_attr = attr;
@@ -1173,6 +1211,11 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
goto out;
}
+ if (IS_ERR(attr)) {
+ err = PTR_ERR(attr);
+ goto out;
+ }
+
if (ins_attr)
*ins_attr = attr;
if (ins_mi)
@@ -1218,7 +1261,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
mft_min = mft_new;
mi_min = mi_new;
} else {
- ntfs_mark_rec_free(sbi, mft_new);
+ ntfs_mark_rec_free(sbi, mft_new, true);
mft_new = 0;
ni_remove_mi(ni, mi_new);
}
@@ -1262,7 +1305,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
done = asize - run_size - SIZEOF_NONRESIDENT;
le32_sub_cpu(&ni->mi.mrec->used, done);
- /* Estimate the size of second part: run_buf=NULL. */
+ /* Estimate packed size (run_buf=NULL). */
err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
&plen);
if (err < 0)
@@ -1288,10 +1331,16 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
goto out;
}
+ if (IS_ERR(attr)) {
+ err = PTR_ERR(attr);
+ goto out;
+ }
+
attr->non_res = 1;
attr->name_off = SIZEOF_NONRESIDENT_LE;
attr->flags = 0;
+ /* This function can't fail - cause already checked above. */
run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
run_size, &plen);
@@ -1301,7 +1350,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
out:
if (mft_new) {
- ntfs_mark_rec_free(sbi, mft_new);
+ ntfs_mark_rec_free(sbi, mft_new, true);
ni_remove_mi(ni, mi_new);
}
@@ -1367,8 +1416,6 @@ int ni_expand_list(struct ntfs_inode *ni)
/* Split MFT data as much as possible. */
err = ni_expand_mft_list(ni);
- if (err)
- goto out;
out:
return !err && !done ? -EOPNOTSUPP : err;
@@ -1381,7 +1428,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
const __le16 *name, u8 name_len,
const struct runs_tree *run, CLST svcn, CLST len,
__le16 flags, struct ATTRIB **new_attr,
- struct mft_inode **mi)
+ struct mft_inode **mi, struct ATTR_LIST_ENTRY **le)
{
int err;
CLST plen;
@@ -1394,6 +1441,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
u32 run_size, asize;
struct ntfs_sb_info *sbi = ni->mi.sbi;
+ /* Estimate packed size (run_buf=NULL). */
err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
&plen);
if (err < 0)
@@ -1414,7 +1462,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
}
err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
- &attr, mi, NULL);
+ &attr, mi, le);
if (err)
goto out;
@@ -1423,12 +1471,12 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
attr->name_off = cpu_to_le16(name_off);
attr->flags = flags;
+ /* This function can't fail - cause already checked above. */
run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
attr->nres.svcn = cpu_to_le64(svcn);
attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
- err = 0;
if (new_attr)
*new_attr = attr;
@@ -1560,7 +1608,7 @@ int ni_delete_all(struct ntfs_inode *ni)
mi->dirty = true;
mi_write(mi, 0);
- ntfs_mark_rec_free(sbi, mi->rno);
+ ntfs_mark_rec_free(sbi, mi->rno, false);
ni_remove_mi(ni, mi);
mi_put(mi);
node = next;
@@ -1571,7 +1619,7 @@ int ni_delete_all(struct ntfs_inode *ni)
ni->mi.dirty = true;
err = mi_write(&ni->mi, 0);
- ntfs_mark_rec_free(sbi, ni->mi.rno);
+ ntfs_mark_rec_free(sbi, ni->mi.rno, false);
return err;
}
@@ -1589,7 +1637,8 @@ struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
struct ATTRIB *attr = NULL;
struct ATTR_FILE_NAME *fname;
- *le = NULL;
+ if (le)
+ *le = NULL;
/* Enumerate all names. */
next:
@@ -1605,7 +1654,7 @@ next:
goto next;
if (!uni)
- goto next;
+ return fname;
if (uni->len != fname->name_len)
goto next;
@@ -1964,10 +2013,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
vcn += clen;
- if (vbo + bytes >= end) {
+ if (vbo + bytes >= end)
bytes = end - vbo;
- flags |= FIEMAP_EXTENT_LAST;
- }
if (vbo + bytes <= valid) {
;
@@ -1977,6 +2024,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
/* vbo < valid && valid < vbo + bytes */
u64 dlen = valid - vbo;
+ if (vbo + dlen >= end)
+ flags |= FIEMAP_EXTENT_LAST;
+
err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
flags);
if (err < 0)
@@ -1995,6 +2045,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
flags |= FIEMAP_EXTENT_UNWRITTEN;
}
+ if (vbo + bytes >= end)
+ flags |= FIEMAP_EXTENT_LAST;
+
err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
if (err < 0)
break;
@@ -2298,10 +2351,8 @@ remove_wof:
out:
kfree(pages);
- if (err) {
- make_bad_inode(inode);
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
- }
+ if (err)
+ _ntfs_bad_inode(inode);
return err;
}
@@ -2940,7 +2991,7 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
}
/*
- * ni_add_name - Add new name in MFT and in directory.
+ * ni_add_name - Add new name into MFT and into directory.
*/
int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
struct NTFS_DE *de)
@@ -2949,13 +3000,20 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
struct ATTRIB *attr;
struct ATTR_LIST_ENTRY *le;
struct mft_inode *mi;
+ struct ATTR_FILE_NAME *fname;
struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1);
u16 de_key_size = le16_to_cpu(de->key_size);
mi_get_ref(&ni->mi, &de->ref);
mi_get_ref(&dir_ni->mi, &de_name->home);
- /* Insert new name in MFT. */
+ /* Fill duplicate from any ATTR_NAME. */
+ fname = ni_fname_name(ni, NULL, NULL, NULL, NULL);
+ if (fname)
+ memcpy(&de_name->dup, &fname->dup, sizeof(fname->dup));
+ de_name->dup.fa = ni->std_fa;
+
+ /* Insert new name into MFT. */
err = ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, &attr,
&mi, &le);
if (err)
@@ -2963,7 +3021,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de_name, de_key_size);
- /* Insert new name in directory. */
+ /* Insert new name into directory. */
err = indx_insert_entry(&dir_ni->dir, dir_ni, de, ni->mi.sbi, NULL, 0);
if (err)
ni_remove_attr_le(ni, attr, mi, le);
@@ -2987,7 +3045,7 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
* 1) Add new name and remove old name.
* 2) Remove old name and add new name.
*
- * In most cases (not all!) adding new name in MFT and in directory can
+ * In most cases (not all!) adding new name into MFT and into directory can
* allocate additional cluster(s).
* Second way may result to bad inode if we can't add new name
* and then can't restore (add) old name.
@@ -3257,7 +3315,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
err = err2;
if (is_empty) {
- ntfs_mark_rec_free(sbi, mi->rno);
+ ntfs_mark_rec_free(sbi, mi->rno, false);
rb_erase(node, &ni->mi_tree);
mi_put(mi);
}
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 06492f088d60..0d611a6c5511 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -1185,8 +1185,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
if (!r_page)
return -ENOMEM;
- memset(info, 0, sizeof(struct restart_info));
-
/* Determine which restart area we are looking for. */
if (first) {
vbo = 0;
@@ -3791,10 +3789,11 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
if (!log)
return -ENOMEM;
+ memset(&rst_info, 0, sizeof(struct restart_info));
+
log->ni = ni;
log->l_size = l_size;
log->one_page_buf = kmalloc(page_size, GFP_NOFS);
-
if (!log->one_page_buf) {
err = -ENOMEM;
goto out;
@@ -3820,7 +3819,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
}
log_init_pg_hdr(log, page_size, page_size, 1, 1);
- log_create(log, l_size, 0, get_random_int(), false, false);
+ log_create(log, l_size, 0, get_random_u32(), false, false);
log->ra = ra;
@@ -3842,7 +3841,10 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
if (rst_info.vbo)
goto check_restart_area;
+ memset(&rst_info2, 0, sizeof(struct restart_info));
err = log_read_rst(log, l_size, false, &rst_info2);
+ if (err)
+ goto out;
/* Determine which restart area to use. */
if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
@@ -3891,7 +3893,7 @@ check_restart_area:
/* Do some checks based on whether we have a valid log page. */
if (!rst_info.valid_page) {
- open_log_count = get_random_int();
+ open_log_count = get_random_u32();
goto init_log_instance;
}
open_log_count = le32_to_cpu(ra2->open_log_count);
@@ -4042,7 +4044,7 @@ find_oldest:
memcpy(ra->clients, Add2Ptr(ra2, t16),
le16_to_cpu(ra2->ra_len) - t16);
- log->current_openlog_count = get_random_int();
+ log->current_openlog_count = get_random_u32();
ra->open_log_count = cpu_to_le32(log->current_openlog_count);
log->ra_size = offsetof(struct RESTART_AREA, clients) +
sizeof(struct CLIENT_REC);
@@ -4085,8 +4087,10 @@ process_log:
if (client == LFS_NO_CLIENT_LE) {
/* Insert "NTFS" client LogFile. */
client = ra->client_idx[0];
- if (client == LFS_NO_CLIENT_LE)
- return -EINVAL;
+ if (client == LFS_NO_CLIENT_LE) {
+ err = -EINVAL;
+ goto out;
+ }
t16 = le16_to_cpu(client);
cr = ca + t16;
@@ -5055,7 +5059,7 @@ undo_action_next:
goto add_allocated_vcns;
vcn = le64_to_cpu(lrh->target_vcn);
- vcn &= ~(log->clst_per_page - 1);
+ vcn &= ~(u64)(log->clst_per_page - 1);
add_allocated_vcns:
for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 4de9acb16968..4ed15f64b17f 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -703,12 +703,14 @@ out:
/*
* ntfs_mark_rec_free - Mark record as free.
+ * is_mft - true if we are changing MFT
*/
-void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
+void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
{
struct wnd_bitmap *wnd = &sbi->mft.bitmap;
- down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
+ if (!is_mft)
+ down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
if (rno >= wnd->nbits)
goto out;
@@ -727,7 +729,8 @@ void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
sbi->mft.next_free = rno;
out:
- up_write(&wnd->rw_lock);
+ if (!is_mft)
+ up_write(&wnd->rw_lock);
}
/*
@@ -780,7 +783,7 @@ out:
*/
int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
{
- CLST zone_limit, zone_max, lcn, vcn, len;
+ CLST lcn, vcn, len;
size_t lcn_s, zlen;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
struct ntfs_inode *ni = sbi->mft.ni;
@@ -789,16 +792,6 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
if (wnd_zone_len(wnd))
return 0;
- /*
- * Compute the MFT zone at two steps.
- * It would be nice if we are able to allocate 1/8 of
- * total clusters for MFT but not more then 512 MB.
- */
- zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
- zone_max = wnd->nbits >> 3;
- if (zone_max > zone_limit)
- zone_max = zone_limit;
-
vcn = bytes_to_cluster(sbi,
(u64)sbi->mft.bitmap.nbits << sbi->record_bits);
@@ -812,13 +805,7 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
lcn_s = lcn + 1;
/* Try to allocate clusters after last MFT run. */
- zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
- if (!zlen) {
- ntfs_notice(sbi->sb, "MftZone: unavailable");
- return 0;
- }
-
- /* Truncate too large zone. */
+ zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
wnd_zone_set(wnd, lcn_s, zlen);
return 0;
@@ -827,16 +814,21 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
/*
* ntfs_update_mftmirr - Update $MFTMirr data.
*/
-int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
+void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
{
int err;
struct super_block *sb = sbi->sb;
- u32 blocksize = sb->s_blocksize;
+ u32 blocksize;
sector_t block1, block2;
u32 bytes;
+ if (!sb)
+ return;
+
+ blocksize = sb->s_blocksize;
+
if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
- return 0;
+ return;
err = 0;
bytes = sbi->mft.recs_mirr << sbi->record_bits;
@@ -847,16 +839,13 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
struct buffer_head *bh1, *bh2;
bh1 = sb_bread(sb, block1++);
- if (!bh1) {
- err = -EIO;
- goto out;
- }
+ if (!bh1)
+ return;
bh2 = sb_getblk(sb, block2++);
if (!bh2) {
put_bh(bh1);
- err = -EIO;
- goto out;
+ return;
}
if (buffer_locked(bh2))
@@ -876,13 +865,24 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
put_bh(bh2);
if (err)
- goto out;
+ return;
}
sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
+}
-out:
- return err;
+/*
+ * ntfs_bad_inode
+ *
+ * Marks inode as bad and marks fs as 'dirty'
+ */
+void ntfs_bad_inode(struct inode *inode, const char *hint)
+{
+ struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
+
+ ntfs_inode_err(inode, "%s", hint);
+ make_bad_inode(inode);
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
}
/*
@@ -1395,7 +1395,7 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
if (buffer_locked(bh))
__wait_on_buffer(bh);
- lock_buffer(nb->bh[idx]);
+ lock_buffer(bh);
bh_data = bh->b_data + off;
end_data = Add2Ptr(bh_data, op);
@@ -1443,23 +1443,12 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
return err;
}
-static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
-{
- struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
-
- if (!bio && (current->flags & PF_MEMALLOC)) {
- while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
- }
- return bio;
-}
-
/*
* ntfs_bio_pages - Read/write pages from/to disk.
*/
int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
- u32 op)
+ enum req_op op)
{
int err = 0;
struct bio *new, *bio = NULL;
@@ -1496,19 +1485,13 @@ int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
lbo = ((u64)lcn << cluster_bits) + off;
len = ((u64)clen << cluster_bits) - off;
new_bio:
- new = ntfs_alloc_bio(nr_pages - page_idx);
- if (!new) {
- err = -ENOMEM;
- goto out;
- }
+ new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
if (bio) {
bio_chain(bio, new);
submit_bio(bio);
}
bio = new;
- bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = lbo >> 9;
- bio->bi_opf = op;
while (len) {
off = vbo & (PAGE_SIZE - 1);
@@ -1599,18 +1582,12 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
lbo = (u64)lcn << cluster_bits;
len = (u64)clen << cluster_bits;
new_bio:
- new = ntfs_alloc_bio(BIO_MAX_VECS);
- if (!new) {
- err = -ENOMEM;
- break;
- }
+ new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
if (bio) {
bio_chain(bio, new);
submit_bio(bio);
}
bio = new;
- bio_set_dev(bio, bdev);
- bio->bi_opf = REQ_OP_WRITE;
bio->bi_iter.bi_sector = lbo >> 9;
for (;;) {
@@ -1626,11 +1603,10 @@ new_bio:
}
} while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
- if (bio) {
- if (!err)
- err = submit_bio_wait(bio);
- bio_put(bio);
- }
+ if (!err)
+ err = submit_bio_wait(bio);
+ bio_put(bio);
+
blk_finish_plug(&plug);
out:
unlock_page(fill);
@@ -2448,7 +2424,7 @@ static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
{
- CLST end, i;
+ CLST end, i, zone_len, zlen;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
@@ -2483,6 +2459,28 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
ntfs_unmap_and_discard(sbi, lcn, len);
wnd_set_free(wnd, lcn, len);
+ /* append to MFT zone, if possible. */
+ zone_len = wnd_zone_len(wnd);
+ zlen = min(zone_len + len, sbi->zone_max);
+
+ if (zlen == zone_len) {
+ /* MFT zone already has maximum size. */
+ } else if (!zone_len) {
+ /* Create MFT zone only if 'zlen' is large enough. */
+ if (zlen == sbi->zone_max)
+ wnd_zone_set(wnd, lcn, zlen);
+ } else {
+ CLST zone_lcn = wnd_zone_bit(wnd);
+
+ if (lcn + len == zone_lcn) {
+ /* Append into head MFT zone. */
+ wnd_zone_set(wnd, lcn, zlen);
+ } else if (zone_lcn + zone_len == lcn) {
+ /* Append into tail MFT zone. */
+ wnd_zone_set(wnd, zone_lcn, zlen);
+ }
+ }
+
out:
up_write(&wnd->rw_lock);
}
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 6f81e3a49abf..440328147e7e 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -1042,19 +1042,16 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
{
int err;
struct NTFS_DE *e;
- const struct INDEX_HDR *hdr;
struct indx_node *node;
if (!root)
root = indx_get_root(&ni->dir, ni, NULL, NULL);
if (!root) {
- err = -EINVAL;
- goto out;
+ /* Should not happen. */
+ return -EINVAL;
}
- hdr = &root->ihdr;
-
/* Check cache. */
e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de;
if (e && !de_is_last(e) &&
@@ -1068,39 +1065,35 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
fnd_clear(fnd);
/* Lookup entry that is <= to the search value. */
- e = hdr_find_e(indx, hdr, key, key_len, ctx, diff);
+ e = hdr_find_e(indx, &root->ihdr, key, key_len, ctx, diff);
if (!e)
return -EINVAL;
fnd->root_de = e;
- err = 0;
for (;;) {
node = NULL;
- if (*diff >= 0 || !de_has_vcn_ex(e)) {
- *entry = e;
- goto out;
- }
+ if (*diff >= 0 || !de_has_vcn_ex(e))
+ break;
/* Read next level. */
err = indx_read(indx, ni, de_get_vbn(e), &node);
if (err)
- goto out;
+ return err;
/* Lookup entry that is <= to the search value. */
e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
diff);
if (!e) {
- err = -EINVAL;
put_indx_node(node);
- goto out;
+ return -EINVAL;
}
fnd_push(fnd, node, e);
}
-out:
- return err;
+ *entry = e;
+ return 0;
}
int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
@@ -1354,7 +1347,7 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
goto out;
err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len,
- &run, 0, len, 0, &alloc, NULL);
+ &run, 0, len, 0, &alloc, NULL, NULL);
if (err)
goto out1;
@@ -1685,8 +1678,8 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
{
int err;
const struct NTFS_DE *sp;
- struct NTFS_DE *e, *de_t, *up_e = NULL;
- struct indx_node *n2 = NULL;
+ struct NTFS_DE *e, *de_t, *up_e;
+ struct indx_node *n2;
struct indx_node *n1 = fnd->nodes[level];
struct INDEX_HDR *hdr1 = &n1->index->ihdr;
struct INDEX_HDR *hdr2;
@@ -1994,7 +1987,7 @@ static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
const struct NTFS_DE *e, bool trim)
{
int err;
- struct indx_node *n;
+ struct indx_node *n = NULL;
struct INDEX_HDR *hdr;
CLST vbn = de_get_vbn(e);
size_t i;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index a87ab3ad3cd3..d5a3afbbbfd8 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -430,6 +430,7 @@ end_enum:
} else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
/* Records in $Extend are not a files or general directories. */
+ inode->i_op = &ntfs_file_inode_operations;
} else {
err = -EINVAL;
goto out;
@@ -500,7 +501,7 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
inode = ntfs_read_mft(inode, name, ref);
else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
/* Inode overlaps? */
- make_bad_inode(inode);
+ _ntfs_bad_inode(inode);
}
return inode;
@@ -629,12 +630,9 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
bh->b_size = block_size;
off = vbo & (PAGE_SIZE - 1);
set_bh_page(bh, page, off);
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- err = -EIO;
+ err = bh_read(bh, 0);
+ if (err < 0)
goto out;
- }
zero_user_segment(page, off + voff, off + block_size);
}
}
@@ -676,8 +674,9 @@ static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
}
-static int ntfs_readpage(struct file *file, struct page *page)
+static int ntfs_read_folio(struct file *file, struct folio *folio)
{
+ struct page *page = &folio->page;
int err;
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
@@ -701,7 +700,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
}
/* Normal + sparse files. */
- return mpage_readpage(page, ntfs_get_block);
+ return mpage_read_folio(folio, ntfs_get_block);
}
static void ntfs_readahead(struct readahead_control *rac)
@@ -757,6 +756,7 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
loff_t vbo = iocb->ki_pos;
loff_t end;
int wr = iov_iter_rw(iter) & WRITE;
+ size_t iter_count = iov_iter_count(iter);
loff_t valid;
ssize_t ret;
@@ -770,10 +770,13 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
wr ? ntfs_get_block_direct_IO_W
: ntfs_get_block_direct_IO_R);
- if (ret <= 0)
+ if (ret > 0)
+ end = vbo + ret;
+ else if (wr && ret == -EIOCBQUEUED)
+ end = vbo + iter_count;
+ else
goto out;
- end = vbo + ret;
valid = ni->i_valid;
if (wr) {
if (end > valid && !S_ISBLK(inode->i_mode)) {
@@ -846,12 +849,10 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
static int ntfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct inode *inode = mapping->host;
- struct ntfs_inode *ni = ntfs_i(inode);
/* Redirect call to 'ntfs_writepage' for resident files. */
- get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
-
- return mpage_writepages(mapping, wbc, get_block);
+ if (is_resident(ntfs_i(mapping->host)))
+ return generic_writepages(mapping, wbc);
+ return mpage_writepages(mapping, wbc, ntfs_get_block);
}
static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
@@ -861,9 +862,8 @@ static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
bh_result, create, GET_BLOCK_WRITE_BEGIN);
}
-static int ntfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, u32 len, u32 flags, struct page **pagep,
- void **fsdata)
+int ntfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, u32 len, struct page **pagep, void **fsdata)
{
int err;
struct inode *inode = mapping->host;
@@ -872,7 +872,7 @@ static int ntfs_write_begin(struct file *file, struct address_space *mapping,
*pagep = NULL;
if (is_resident(ni)) {
struct page *page = grab_cache_page_write_begin(
- mapping, pos >> PAGE_SHIFT, flags);
+ mapping, pos >> PAGE_SHIFT);
if (!page) {
err = -ENOMEM;
@@ -894,7 +894,7 @@ static int ntfs_write_begin(struct file *file, struct address_space *mapping,
goto out;
}
- err = block_write_begin(mapping, pos, len, flags, pagep,
+ err = block_write_begin(mapping, pos, len, pagep,
ntfs_get_block_write_begin);
out:
@@ -904,10 +904,9 @@ out:
/*
* ntfs_write_end - Address_space_operations::write_end.
*/
-static int ntfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, u32 len, u32 copied, struct page *page,
- void *fsdata)
-
+int ntfs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, u32 len, u32 copied, struct page *page,
+ void *fsdata)
{
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
@@ -975,7 +974,7 @@ int reset_log_file(struct inode *inode)
len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
- err = block_write_begin(mapping, pos, len, 0, &page,
+ err = block_write_begin(mapping, pos, len, &page,
ntfs_get_block_write_begin);
if (err)
goto out;
@@ -1631,7 +1630,7 @@ out4:
ni->mi.dirty = false;
discard_new_inode(inode);
out3:
- ntfs_mark_rec_free(sbi, ino);
+ ntfs_mark_rec_free(sbi, ino, false);
out2:
__putname(new_de);
@@ -1654,7 +1653,6 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
struct ntfs_inode *ni = ntfs_i(inode);
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
struct NTFS_DE *de;
- struct ATTR_FILE_NAME *de_name;
/* Allocate PATH_MAX bytes. */
de = __getname();
@@ -1669,15 +1667,6 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
if (err)
goto out;
- de_name = (struct ATTR_FILE_NAME *)(de + 1);
- /* Fill duplicate info. */
- de_name->dup.cr_time = de_name->dup.m_time = de_name->dup.c_time =
- de_name->dup.a_time = kernel2nt(&inode->i_ctime);
- de_name->dup.alloc_size = de_name->dup.data_size =
- cpu_to_le64(inode->i_size);
- de_name->dup.fa = ni->std_fa;
- de_name->dup.ea_size = de_name->dup.reparse = 0;
-
err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
out:
__putname(de);
@@ -1730,9 +1719,7 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
if (inode->i_nlink)
mark_inode_dirty(inode);
} else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
- make_bad_inode(inode);
- ntfs_inode_err(inode, "failed to undo unlink");
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(inode);
} else {
if (ni_is_dirty(dir))
mark_inode_dirty(dir);
@@ -1937,12 +1924,10 @@ const struct inode_operations ntfs_link_inode_operations = {
.setattr = ntfs3_setattr,
.listxattr = ntfs_listxattr,
.permission = ntfs_permission,
- .get_acl = ntfs_get_acl,
- .set_acl = ntfs_set_acl,
};
const struct address_space_operations ntfs_aops = {
- .readpage = ntfs_readpage,
+ .read_folio = ntfs_read_folio,
.readahead = ntfs_readahead,
.writepage = ntfs_writepage,
.writepages = ntfs_writepages,
@@ -1950,11 +1935,12 @@ const struct address_space_operations ntfs_aops = {
.write_end = ntfs_write_end,
.direct_IO = ntfs_direct_IO,
.bmap = ntfs_bmap,
- .set_page_dirty = __set_page_dirty_buffers,
+ .dirty_folio = block_dirty_folio,
+ .invalidate_folio = block_invalidate_folio,
};
const struct address_space_operations ntfs_aops_cmpr = {
- .readpage = ntfs_readpage,
+ .read_folio = ntfs_read_folio,
.readahead = ntfs_readahead,
};
// clang-format on
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index bc741213ad84..bc22cc321a74 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -208,7 +208,7 @@ static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
}
/*
- * ntfs_rmdir - inode_operations::rm_dir
+ * ntfs_rmdir - inode_operations::rmdir
*/
static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
{
@@ -308,9 +308,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *dir,
err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad);
if (is_bad) {
/* Restore after failed rename failed too. */
- make_bad_inode(inode);
- ntfs_inode_err(inode, "failed to undo rename");
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ _ntfs_bad_inode(inode);
} else if (!err) {
inode->i_ctime = dir->i_ctime = dir->i_mtime =
current_time(dir);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 8aaec7e0804e..2c791222c4e2 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -11,7 +11,6 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
-#include <linux/cleancache.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
@@ -221,6 +220,7 @@ struct ntfs_sb_info {
u32 flags; // See NTFS_FLAGS_XXX.
+ CLST zone_max; // Maximum MFT zone length in clusters
CLST bad_clusters; // The count of marked bad clusters.
u16 max_bytes_per_attr; // Maximum attribute size in record.
@@ -409,8 +409,6 @@ enum REPARSE_SIGN {
};
/* Functions from attrib.c */
-int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
- struct runs_tree *run, const CLST *vcn);
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
@@ -441,6 +439,7 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
u64 new_valid);
int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
+int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size);
/* Functions from attrlist.c */
@@ -529,7 +528,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
const __le16 *name, u8 name_len,
const struct runs_tree *run, CLST svcn, CLST len,
__le16 flags, struct ATTRIB **new_attr,
- struct mft_inode **mi);
+ struct mft_inode **mi, struct ATTR_LIST_ENTRY **le);
int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
enum ATTR_TYPE type, const __le16 *name, u8 name_len,
struct ATTRIB **new_attr, struct mft_inode **mi,
@@ -590,10 +589,12 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
enum ALLOCATE_OPT opt);
int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
struct ntfs_inode *ni, struct mft_inode **mi);
-void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno);
+void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft);
int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to);
int ntfs_refresh_zone(struct ntfs_sb_info *sbi);
-int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
+void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
+void ntfs_bad_inode(struct inode *inode, const char *hint);
+#define _ntfs_bad_inode(i) ntfs_bad_inode(i, __func__)
enum NTFS_DIRTY_FLAGS {
NTFS_DIRTY_CLEAR = 0,
NTFS_DIRTY_DIRTY = 1,
@@ -618,7 +619,7 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
struct ntfs_buffers *nb, int sync);
int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
- u32 op);
+ enum req_op op);
int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
u64 vbo, u64 *lbo, u64 *bytes);
@@ -690,6 +691,11 @@ int ntfs_set_size(struct inode *inode, u64 new_size);
int reset_log_file(struct inode *inode);
int ntfs_get_block(struct inode *inode, sector_t vbn,
struct buffer_head *bh_result, int create);
+int ntfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, u32 len, struct page **pagep, void **fsdata);
+int ntfs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, u32 len, u32 copied, struct page *page,
+ void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode);
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
@@ -734,7 +740,6 @@ static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
int mi_write(struct mft_inode *mi, int wait);
int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
__le16 flags, bool is_mft);
-void mi_mark_free(struct mft_inode *mi);
struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
const __le16 *name, u8 name_len, u32 asize,
u16 name_off);
@@ -776,10 +781,10 @@ bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
void run_truncate(struct runs_tree *run, CLST vcn);
void run_truncate_head(struct runs_tree *run, CLST vcn);
void run_truncate_around(struct runs_tree *run, CLST vcn);
-bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
bool is_mft);
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
+bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len);
bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
CLST *lcn, CLST *len);
bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
@@ -798,6 +803,7 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
#define run_unpack_ex run_unpack
#endif
int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
+int run_clone(const struct runs_tree *run, struct runs_tree *new_run);
/* Globals from super.c */
void *ntfs_set_shared(void *ptr, u32 bytes);
@@ -892,13 +898,8 @@ static inline struct page *ntfs_map_page(struct address_space *mapping,
{
struct page *page = read_mapping_page(mapping, index, NULL);
- if (!IS_ERR(page)) {
+ if (!IS_ERR(page))
kmap(page);
- if (!PageError(page))
- return page;
- ntfs_unmap_page(page);
- return ERR_PTR(-EIO);
- }
return page;
}
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 861e35791506..7d2fac5ee215 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -395,28 +395,6 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
}
/*
- * mi_mark_free - Mark record as unused and marks it as free in bitmap.
- */
-void mi_mark_free(struct mft_inode *mi)
-{
- CLST rno = mi->rno;
- struct ntfs_sb_info *sbi = mi->sbi;
-
- if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
- ntfs_clear_mft_tail(sbi, rno, rno + 1);
- mi->dirty = false;
- return;
- }
-
- if (mi->mrec) {
- clear_rec_inuse(mi->mrec);
- mi->dirty = true;
- mi_write(mi, 0);
- }
- ntfs_mark_rec_free(sbi, rno);
-}
-
-/*
* mi_insert_attr - Reserve space for new attribute.
*
* Return: Not full constructed attribute or NULL if not possible to create.
@@ -445,12 +423,11 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
attr = NULL;
while ((attr = mi_enum_attr(mi, attr))) {
diff = compare_attr(attr, type, name, name_len, upcase);
- if (diff > 0)
- break;
+
if (diff < 0)
continue;
- if (!is_attr_indexed(attr))
+ if (!diff && !is_attr_indexed(attr))
return NULL;
break;
}
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index a8fec651f973..aaaa0d3d35a2 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -31,7 +31,7 @@ struct ntfs_run {
* Case of entry missing from list 'index' will be set to
* point to insertion position for the entry question.
*/
-bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
+static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
{
size_t min_idx, max_idx, mid_idx;
struct ntfs_run *r;
@@ -547,6 +547,48 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
return true;
}
+/* run_insert_range
+ *
+ * Helper for attr_insert_range(),
+ * which is helper for fallocate(insert_range).
+ */
+bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
+{
+ size_t index;
+ struct ntfs_run *r, *e;
+
+ if (WARN_ON(!run_lookup(run, vcn, &index)))
+ return false; /* Should never be here. */
+
+ e = run->runs + run->count;
+ r = run->runs + index;
+
+ if (vcn > r->vcn)
+ r += 1;
+
+ for (; r < e; r++)
+ r->vcn += len;
+
+ r = run->runs + index;
+
+ if (vcn > r->vcn) {
+ /* split fragment. */
+ CLST len1 = vcn - r->vcn;
+ CLST len2 = r->len - len1;
+ CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1);
+
+ r->len = len1;
+
+ if (!run_add_entry(run, vcn + len, lcn2, len2, false))
+ return false;
+ }
+
+ if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
+ return false;
+
+ return true;
+}
+
/*
* run_get_entry - Return index-th mapped region.
*/
@@ -778,26 +820,36 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
CLST next_vcn, vcn, lcn;
CLST prev_lcn = 0;
CLST evcn1 = svcn + len;
+ const struct ntfs_run *r, *r_end;
int packed_size = 0;
size_t i;
- bool ok;
s64 dlcn;
int offset_size, size_size, tmp;
- next_vcn = vcn = svcn;
-
*packed_vcns = 0;
if (!len)
goto out;
- ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
+ /* Check all required entries [svcn, encv1) available. */
+ if (!run_lookup(run, svcn, &i))
+ return -ENOENT;
+
+ r_end = run->runs + run->count;
+ r = run->runs + i;
- if (!ok)
- goto error;
+ for (next_vcn = r->vcn + r->len; next_vcn < evcn1;
+ next_vcn = r->vcn + r->len) {
+ if (++r >= r_end || r->vcn != next_vcn)
+ return -ENOENT;
+ }
- if (next_vcn != vcn)
- goto error;
+ /* Repeat cycle above and pack runs. Assume no errors. */
+ r = run->runs + i;
+ len = svcn - r->vcn;
+ vcn = svcn;
+ lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len);
+ len = r->len - len;
for (;;) {
next_vcn = vcn + len;
@@ -846,12 +898,10 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
goto out;
- ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
- if (!ok)
- goto error;
-
- if (next_vcn != vcn)
- goto error;
+ r += 1;
+ vcn = r->vcn;
+ lcn = r->lcn;
+ len = r->len;
}
out:
@@ -860,9 +910,6 @@ out:
run_buf[0] = 0;
return packed_size + 1;
-
-error:
- return -EOPNOTSUPP;
}
/*
@@ -1109,3 +1156,28 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
*highest_vcn = vcn64 - 1;
return 0;
}
+
+/*
+ * run_clone
+ *
+ * Make a copy of run
+ */
+int run_clone(const struct runs_tree *run, struct runs_tree *new_run)
+{
+ size_t bytes = run->count * sizeof(struct ntfs_run);
+
+ if (bytes > new_run->allocated) {
+ struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL);
+
+ if (!new_ptr)
+ return -ENOMEM;
+
+ kvfree(new_run->runs);
+ new_run->runs = new_ptr;
+ new_run->allocated = bytes;
+ }
+
+ memcpy(new_run->runs, run->runs, bytes);
+ new_run->count = run->count;
+ return 0;
+}
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 29813200c7af..47012c9bf505 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -30,6 +30,7 @@
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/log2.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/seq_file.h>
@@ -390,7 +391,7 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
return -EINVAL;
}
- memcpy(sbi->options, new_opts, sizeof(*new_opts));
+ swap(sbi->options, fc->fs_private);
return 0;
}
@@ -399,7 +400,7 @@ static struct kmem_cache *ntfs_inode_cachep;
static struct inode *ntfs_alloc_inode(struct super_block *sb)
{
- struct ntfs_inode *ni = kmem_cache_alloc(ntfs_inode_cachep, GFP_NOFS);
+ struct ntfs_inode *ni = alloc_inode_sb(sb, ntfs_inode_cachep, GFP_NOFS);
if (!ni)
return NULL;
@@ -668,9 +669,11 @@ static u32 format_size_gb(const u64 bytes, u32 *mb)
static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
{
- return boot->sectors_per_clusters <= 0x80
- ? boot->sectors_per_clusters
- : (1u << (0 - boot->sectors_per_clusters));
+ if (boot->sectors_per_clusters <= 0x80)
+ return boot->sectors_per_clusters;
+ if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */
+ return 1U << (0 - boot->sectors_per_clusters);
+ return -EINVAL;
}
/*
@@ -713,6 +716,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
/* cluster size: 512, 1K, 2K, 4K, ... 2M */
sct_per_clst = true_sectors_per_clst(boot);
+ if ((int)sct_per_clst < 0)
+ goto out;
if (!is_power_of_2(sct_per_clst))
goto out;
@@ -866,6 +871,13 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
#endif
+ /*
+ * Compute the MFT zone at two steps.
+ * It would be nice if we are able to allocate 1/8 of
+ * total clusters for MFT but not more then 512 MB.
+ */
+ sbi->zone_max = min_t(CLST, 0x20000000 >> sbi->cluster_bits, clusters >> 3);
+
err = 0;
out:
@@ -882,7 +894,6 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
int err;
struct ntfs_sb_info *sbi = sb->s_fs_info;
struct block_device *bdev = sb->s_bdev;
- struct request_queue *rq;
struct inode *inode;
struct ntfs_inode *ni;
size_t i, tt;
@@ -897,6 +908,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
ref.high = 0;
sbi->sb = sb;
+ sbi->options = fc->fs_private;
+ fc->fs_private = NULL;
sb->s_flags |= SB_NODIRATIME;
sb->s_magic = 0x7366746e; // "ntfs"
sb->s_op = &ntfs_sops;
@@ -912,15 +925,14 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto out;
}
- rq = bdev_get_queue(bdev);
- if (blk_queue_discard(rq) && rq->limits.discard_granularity) {
- sbi->discard_granularity = rq->limits.discard_granularity;
+ if (bdev_max_discard_sectors(bdev) && bdev_discard_granularity(bdev)) {
+ sbi->discard_granularity = bdev_discard_granularity(bdev);
sbi->discard_granularity_mask_inv =
~(u64)(sbi->discard_granularity - 1);
}
/* Parse boot. */
- err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
+ err = ntfs_init_from_boot(sb, bdev_logical_block_size(bdev),
bdev_nr_bytes(bdev));
if (err)
goto out;
@@ -1260,8 +1272,6 @@ load_root:
goto put_inode_out;
}
- fc->fs_private = NULL;
-
return 0;
put_inode_out:
@@ -1335,7 +1345,7 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
return 0;
err = blkdev_issue_discard(sb->s_bdev, start >> 9, (end - start) >> 9,
- GFP_NOFS, 0);
+ GFP_NOFS);
if (err == -EOPNOTSUPP)
sbi->flags |= NTFS_FLAGS_NODISCARD;
@@ -1376,7 +1386,7 @@ static const struct fs_context_operations ntfs_context_ops = {
/*
* ntfs_init_fs_context - Initialize spi and opts
*
- * This will called when mount/remount. We will first initiliaze
+ * This will called when mount/remount. We will first initialize
* options so that if remount we can use just that.
*/
static int ntfs_init_fs_context(struct fs_context *fc)
@@ -1414,7 +1424,6 @@ static int ntfs_init_fs_context(struct fs_context *fc)
mutex_init(&sbi->compress.mtx_lzx);
#endif
- sbi->options = opts;
fc->s_fs_info = sbi;
ok:
fc->fs_private = opts;
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index afd0ddad826f..7de8718c68a9 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -112,13 +112,13 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
return -ENOMEM;
if (!size) {
- ;
+ /* EA info persists, but xattr is empty. Looks like EA problem. */
} else if (attr_ea->non_res) {
struct runs_tree run;
run_init(&run);
- err = attr_load_runs(attr_ea, ni, &run, NULL);
+ err = attr_load_runs_range(ni, ATTR_EA, NULL, 0, &run, 0, size);
if (!err)
err = ntfs_read_run_nb(sbi, &run, 0, ea_p, size, NULL);
run_close(&run);
@@ -259,7 +259,7 @@ out:
static noinline int ntfs_set_ea(struct inode *inode, const char *name,
size_t name_len, const void *value,
- size_t val_size, int flags)
+ size_t val_size, int flags, bool locked)
{
struct ntfs_inode *ni = ntfs_i(inode);
struct ntfs_sb_info *sbi = ni->mi.sbi;
@@ -278,7 +278,8 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
u64 new_sz;
void *p;
- ni_lock(ni);
+ if (!locked)
+ ni_lock(ni);
run_init(&ea_run);
@@ -443,6 +444,11 @@ update_ea:
/* Delete xattr, ATTR_EA */
ni_remove_attr_le(ni, attr, mi, le);
} else if (attr->non_res) {
+ err = attr_load_runs_range(ni, ATTR_EA, NULL, 0, &ea_run, 0,
+ size);
+ if (err)
+ goto out;
+
err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size, 0);
if (err)
goto out;
@@ -467,7 +473,8 @@ update_ea:
mark_inode_dirty(&ni->vfs_inode);
out:
- ni_unlock(ni);
+ if (!locked)
+ ni_unlock(ni);
run_close(&ea_run);
kfree(ea_all);
@@ -476,8 +483,7 @@ out:
}
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
- struct inode *inode, int type,
+static struct posix_acl *ntfs_get_acl_ex(struct inode *inode, int type,
int locked)
{
struct ntfs_inode *ni = ntfs_i(inode);
@@ -512,7 +518,7 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
/* Translate extended attribute to acl. */
if (err >= 0) {
- acl = posix_acl_from_xattr(mnt_userns, buf, err);
+ acl = posix_acl_from_xattr(&init_user_ns, buf, err);
} else if (err == -ENODATA) {
acl = NULL;
} else {
@@ -535,37 +541,32 @@ struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu)
if (rcu)
return ERR_PTR(-ECHILD);
- /* TODO: init_user_ns? */
- return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
+ return ntfs_get_acl_ex(inode, type, 0);
}
static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
struct inode *inode, struct posix_acl *acl,
- int type)
+ int type, bool init_acl)
{
const char *name;
size_t size, name_len;
- void *value = NULL;
- int err = 0;
+ void *value;
+ int err;
int flags;
+ umode_t mode;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
+ mode = inode->i_mode;
switch (type) {
case ACL_TYPE_ACCESS:
- if (acl) {
- umode_t mode = inode->i_mode;
-
+ /* Do not change i_mode if we are in init_acl */
+ if (acl && !init_acl) {
err = posix_acl_update_mode(mnt_userns, inode, &mode,
&acl);
if (err)
- goto out;
-
- if (inode->i_mode != mode) {
- inode->i_mode = mode;
- mark_inode_dirty(inode);
- }
+ return err;
}
name = XATTR_NAME_POSIX_ACL_ACCESS;
name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
@@ -592,17 +593,22 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
value = kmalloc(size, GFP_NOFS);
if (!value)
return -ENOMEM;
- err = posix_acl_to_xattr(mnt_userns, acl, value, size);
+ err = posix_acl_to_xattr(&init_user_ns, acl, value, size);
if (err < 0)
goto out;
flags = 0;
}
- err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+ err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
if (err == -ENODATA && !size)
err = 0; /* Removing non existed xattr. */
- if (!err)
+ if (!err) {
set_cached_acl(inode, type, acl);
+ if (inode->i_mode != mode) {
+ inode->i_mode = mode;
+ mark_inode_dirty(inode);
+ }
+ }
out:
kfree(value);
@@ -616,7 +622,7 @@ out:
int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
struct posix_acl *acl, int type)
{
- return ntfs_set_acl_ex(mnt_userns, inode, acl, type);
+ return ntfs_set_acl_ex(mnt_userns, inode, acl, type, false);
}
/*
@@ -636,19 +642,19 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
if (default_acl) {
err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
- ACL_TYPE_DEFAULT);
+ ACL_TYPE_DEFAULT, true);
posix_acl_release(default_acl);
} else {
inode->i_default_acl = NULL;
}
- if (!acl)
- inode->i_acl = NULL;
- else {
+ if (acl) {
if (!err)
err = ntfs_set_acl_ex(mnt_userns, inode, acl,
- ACL_TYPE_ACCESS);
+ ACL_TYPE_ACCESS, true);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
return err;
@@ -898,9 +904,12 @@ set_new_fa:
}
/* Deal with NTFS extended attribute. */
- err = ntfs_set_ea(inode, name, name_len, value, size, flags);
+ err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
out:
+ inode->i_ctime = current_time(inode);
+ mark_inode_dirty(inode);
+
return err;
}
@@ -913,35 +922,37 @@ int ntfs_save_wsl_perm(struct inode *inode)
{
int err;
__le32 value;
+ struct ntfs_inode *ni = ntfs_i(inode);
- /* TODO: refactor this, so we don't lock 4 times in ntfs_set_ea */
+ ni_lock(ni);
value = cpu_to_le32(i_uid_read(inode));
err = ntfs_set_ea(inode, "$LXUID", sizeof("$LXUID") - 1, &value,
- sizeof(value), 0);
+ sizeof(value), 0, true); /* true == already locked. */
if (err)
goto out;
value = cpu_to_le32(i_gid_read(inode));
err = ntfs_set_ea(inode, "$LXGID", sizeof("$LXGID") - 1, &value,
- sizeof(value), 0);
+ sizeof(value), 0, true);
if (err)
goto out;
value = cpu_to_le32(inode->i_mode);
err = ntfs_set_ea(inode, "$LXMOD", sizeof("$LXMOD") - 1, &value,
- sizeof(value), 0);
+ sizeof(value), 0, true);
if (err)
goto out;
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
value = cpu_to_le32(inode->i_rdev);
err = ntfs_set_ea(inode, "$LXDEV", sizeof("$LXDEV") - 1, &value,
- sizeof(value), 0);
+ sizeof(value), 0, true);
if (err)
goto out;
}
out:
+ ni_unlock(ni);
/* In case of error should we delete all WSL xattr? */
return err;
}
@@ -981,7 +992,7 @@ static bool ntfs_xattr_user_list(struct dentry *dentry)
}
// clang-format off
-static const struct xattr_handler ntfs_xattr_handler = {
+static const struct xattr_handler ntfs_other_xattr_handler = {
.prefix = "",
.get = ntfs_getxattr,
.set = ntfs_setxattr,
@@ -989,7 +1000,11 @@ static const struct xattr_handler ntfs_xattr_handler = {
};
const struct xattr_handler *ntfs_xattr_handlers[] = {
- &ntfs_xattr_handler,
+#ifdef CONFIG_NTFS3_FS_POSIX_ACL
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
+#endif
+ &ntfs_other_xattr_handler,
NULL,
};
// clang-format on