aboutsummaryrefslogtreecommitdiffstats
path: root/fs/splice.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/splice.c')
-rw-r--r--fs/splice.c88
1 files changed, 42 insertions, 46 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 5dbce4dcc1a7..0878b852b355 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -46,45 +46,45 @@
static bool page_cache_pipe_buf_try_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- struct page *page = buf->page;
+ struct folio *folio = page_folio(buf->page);
struct address_space *mapping;
- lock_page(page);
+ folio_lock(folio);
- mapping = page_mapping(page);
+ mapping = folio_mapping(folio);
if (mapping) {
- WARN_ON(!PageUptodate(page));
+ WARN_ON(!folio_test_uptodate(folio));
/*
* At least for ext2 with nobh option, we need to wait on
- * writeback completing on this page, since we'll remove it
+ * writeback completing on this folio, since we'll remove it
* from the pagecache. Otherwise truncate wont wait on the
- * page, allowing the disk blocks to be reused by someone else
+ * folio, allowing the disk blocks to be reused by someone else
* before we actually wrote our data to them. fs corruption
* ensues.
*/
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
- if (page_has_private(page) &&
- !try_to_release_page(page, GFP_KERNEL))
+ if (folio_has_private(folio) &&
+ !filemap_release_folio(folio, GFP_KERNEL))
goto out_unlock;
/*
* If we succeeded in removing the mapping, set LRU flag
* and return good.
*/
- if (remove_mapping(mapping, page)) {
+ if (remove_mapping(mapping, folio)) {
buf->flags |= PIPE_BUF_FLAG_LRU;
return true;
}
}
/*
- * Raced with truncate or failed to remove page from current
+ * Raced with truncate or failed to remove folio from current
* address space, unlock and return failure.
*/
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
return false;
}
@@ -301,11 +301,9 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
{
struct iov_iter to;
struct kiocb kiocb;
- unsigned int i_head;
int ret;
iov_iter_pipe(&to, READ, pipe, len);
- i_head = to.head;
init_sync_kiocb(&kiocb, in);
kiocb.ki_pos = *ppos;
ret = call_read_iter(in, &kiocb, &to);
@@ -313,9 +311,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
*ppos = kiocb.ki_pos;
file_accessed(in);
} else if (ret < 0) {
- to.head = i_head;
- to.iov_offset = 0;
- iov_iter_advance(&to, 0); /* to free what was emitted */
+ /* free what was emitted */
+ pipe_discard_from(pipe, to.start_head);
/*
* callers of ->splice_read() expect -EAGAIN on
* "can't put anything in there", rather than -EFAULT.
@@ -814,17 +811,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
{
struct pipe_inode_info *pipe;
long ret, bytes;
- umode_t i_mode;
size_t len;
int i, flags, more;
/*
- * We require the input being a regular file, as we don't want to
- * randomly drop data for eg socket -> socket splicing. Use the
- * piped splicing for that!
+ * We require the input to be seekable, as we don't want to randomly
+ * drop data for eg socket -> socket splicing. Use the piped splicing
+ * for that!
*/
- i_mode = file_inode(in)->i_mode;
- if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
+ if (unlikely(!(in->f_mode & FMODE_LSEEK)))
return -EINVAL;
/*
@@ -1163,39 +1158,40 @@ static int iter_to_pipe(struct iov_iter *from,
};
size_t total = 0;
int ret = 0;
- bool failed = false;
- while (iov_iter_count(from) && !failed) {
+ while (iov_iter_count(from)) {
struct page *pages[16];
- ssize_t copied;
+ ssize_t left;
size_t start;
- int n;
+ int i, n;
- copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start);
- if (copied <= 0) {
- ret = copied;
+ left = iov_iter_get_pages2(from, pages, ~0UL, 16, &start);
+ if (left <= 0) {
+ ret = left;
break;
}
- for (n = 0; copied; n++, start = 0) {
- int size = min_t(int, copied, PAGE_SIZE - start);
- if (!failed) {
- buf.page = pages[n];
- buf.offset = start;
- buf.len = size;
- ret = add_to_pipe(pipe, &buf);
- if (unlikely(ret < 0)) {
- failed = true;
- } else {
- iov_iter_advance(from, ret);
- total += ret;
- }
- } else {
- put_page(pages[n]);
+ n = DIV_ROUND_UP(left + start, PAGE_SIZE);
+ for (i = 0; i < n; i++) {
+ int size = min_t(int, left, PAGE_SIZE - start);
+
+ buf.page = pages[i];
+ buf.offset = start;
+ buf.len = size;
+ ret = add_to_pipe(pipe, &buf);
+ if (unlikely(ret < 0)) {
+ iov_iter_revert(from, left);
+ // this one got dropped by add_to_pipe()
+ while (++i < n)
+ put_page(pages[i]);
+ goto out;
}
- copied -= size;
+ total += ret;
+ left -= size;
+ start = 0;
}
}
+out:
return total ? total : ret;
}