1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (C) 2016-2023 Christoph Hellwig.
*/
#include <linux/bio-integrity.h>
#include <linux/iomap.h>
#include <linux/pagemap.h>
#include "internal.h"
#include "trace.h"
static DEFINE_SPINLOCK(failed_read_lock);
static struct bio_list failed_read_list = BIO_EMPTY_LIST;
static u32 __iomap_read_end_io(struct bio *bio, int error)
{
struct folio_iter fi;
u32 folio_count = 0;
bio_for_each_folio_all(fi, bio) {
iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
folio_count++;
}
if (bio_integrity(bio))
fs_bio_integrity_free(bio);
bio_put(bio);
return folio_count;
}
static void
iomap_fail_reads(
struct work_struct *work)
{
struct bio *bio;
struct bio_list tmp = BIO_EMPTY_LIST;
unsigned long flags;
spin_lock_irqsave(&failed_read_lock, flags);
bio_list_merge_init(&tmp, &failed_read_list);
spin_unlock_irqrestore(&failed_read_lock, flags);
while ((bio = bio_list_pop(&tmp)) != NULL) {
__iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
cond_resched();
}
}
static DECLARE_WORK(failed_read_work, iomap_fail_reads);
static void iomap_fail_buffered_read(struct bio *bio)
{
unsigned long flags;
/*
* Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
* in the fserror code. The caller no longer owns the bio reference
* after the spinlock drops.
*/
spin_lock_irqsave(&failed_read_lock, flags);
if (bio_list_empty(&failed_read_list))
WARN_ON_ONCE(!schedule_work(&failed_read_work));
bio_list_add(&failed_read_list, bio);
spin_unlock_irqrestore(&failed_read_lock, flags);
}
static void iomap_read_end_io(struct bio *bio)
{
if (bio->bi_status) {
iomap_fail_buffered_read(bio);
return;
}
__iomap_read_end_io(bio, 0);
}
u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend)
{
return __iomap_read_end_io(&ioend->io_bio, ioend->io_error);
}
static void iomap_bio_submit_read(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx)
{
struct bio *bio = ctx->read_ctx;
if (iter->iomap.flags & IOMAP_F_INTEGRITY)
fs_bio_integrity_alloc(bio);
submit_bio(bio);
}
static struct bio_set *iomap_read_bio_set(struct iomap_read_folio_ctx *ctx)
{
if (ctx->ops && ctx->ops->bio_set)
return ctx->ops->bio_set;
return &fs_bio_set;
}
static void iomap_read_alloc_bio(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx, size_t plen)
{
const struct iomap *iomap = &iter->iomap;
unsigned int nr_vecs = DIV_ROUND_UP(iomap_length(iter), PAGE_SIZE);
struct bio_set *bio_set = iomap_read_bio_set(ctx);
struct folio *folio = ctx->cur_folio;
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
struct bio *bio;
/* Submit the existing range if there was one. */
if (ctx->read_ctx)
ctx->ops->submit_read(iter, ctx);
/* Same as readahead_gfp_mask: */
if (ctx->rac)
gfp |= __GFP_NORETRY | __GFP_NOWARN;
/*
* If the bio_alloc fails, try it again for a single page to avoid
* having to deal with partial page reads. This emulates what
* do_mpage_read_folio does.
*/
bio = bio_alloc_bioset(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
gfp, bio_set);
if (!bio)
bio = bio_alloc_bioset(iomap->bdev, 1, REQ_OP_READ, orig_gfp,
bio_set);
if (ctx->rac)
bio->bi_opf |= REQ_RAHEAD;
bio->bi_iter.bi_sector = iomap_sector(iomap, iter->pos);
bio->bi_end_io = iomap_read_end_io;
bio_add_folio_nofail(bio, folio, plen,
offset_in_folio(folio, iter->pos));
ctx->read_ctx = bio;
ctx->read_ctx_file_offset = iter->pos;
}
int iomap_bio_read_folio_range(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx, size_t plen)
{
struct folio *folio = ctx->cur_folio;
struct bio *bio = ctx->read_ctx;
if (!bio ||
bio_end_sector(bio) != iomap_sector(&iter->iomap, iter->pos) ||
bio->bi_iter.bi_size > iomap_max_bio_size(&iter->iomap) - plen ||
!bio_add_folio(bio, folio, plen, offset_in_folio(folio, iter->pos)))
iomap_read_alloc_bio(iter, ctx, plen);
return 0;
}
EXPORT_SYMBOL_GPL(iomap_bio_read_folio_range);
const struct iomap_read_ops iomap_bio_read_ops = {
.read_folio_range = iomap_bio_read_folio_range,
.submit_read = iomap_bio_submit_read,
};
EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
sector_t sector = iomap_sector(srcmap, pos);
struct bio_vec bvec;
struct bio bio;
int error;
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = sector;
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
if (srcmap->flags & IOMAP_F_INTEGRITY)
fs_bio_integrity_alloc(&bio);
error = submit_bio_wait(&bio);
if (srcmap->flags & IOMAP_F_INTEGRITY) {
if (!error)
error = fs_bio_integrity_verify(&bio, sector, len);
fs_bio_integrity_free(&bio);
}
return error;
}
|