aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/uio.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/uio.h')
-rw-r--r--include/linux/uio.h220
1 files changed, 143 insertions, 77 deletions
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 9576fd8158d7..2e3134b14ffd 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/thread_info.h>
-#include <crypto/hash.h>
+#include <linux/mm_types.h>
#include <uapi/linux/uio.h>
struct page;
@@ -20,27 +20,38 @@ struct kvec {
enum iter_type {
/* iter types */
- ITER_IOVEC = 4,
- ITER_KVEC = 8,
- ITER_BVEC = 16,
- ITER_PIPE = 32,
- ITER_DISCARD = 64,
+ ITER_IOVEC,
+ ITER_KVEC,
+ ITER_BVEC,
+ ITER_PIPE,
+ ITER_XARRAY,
+ ITER_DISCARD,
+ ITER_UBUF,
};
-struct iov_iter {
- /*
- * Bit 0 is the read/write bit, set if we're writing.
- * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
- * the caller isn't expecting to drop a page reference when done.
- */
- unsigned int type;
+struct iov_iter_state {
size_t iov_offset;
size_t count;
+ unsigned long nr_segs;
+};
+
+struct iov_iter {
+ u8 iter_type;
+ bool nofault;
+ bool data_source;
+ bool user_backed;
+ union {
+ size_t iov_offset;
+ int last_offset;
+ };
+ size_t count;
union {
const struct iovec *iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
+ struct xarray *xarray;
struct pipe_inode_info *pipe;
+ void __user *ubuf;
};
union {
unsigned long nr_segs;
@@ -48,12 +59,26 @@ struct iov_iter {
unsigned int head;
unsigned int start_head;
};
+ loff_t xarray_start;
};
};
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
- return i->type & ~(READ | WRITE);
+ return i->iter_type;
+}
+
+static inline void iov_iter_save_state(struct iov_iter *iter,
+ struct iov_iter_state *state)
+{
+ state->iov_offset = iter->iov_offset;
+ state->count = iter->count;
+ state->nr_segs = iter->nr_segs;
+}
+
+static inline bool iter_is_ubuf(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_UBUF;
}
static inline bool iter_is_iovec(const struct iov_iter *i)
@@ -81,9 +106,19 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i)
return iov_iter_type(i) == ITER_DISCARD;
}
+static inline bool iov_iter_is_xarray(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_XARRAY;
+}
+
static inline unsigned char iov_iter_rw(const struct iov_iter *i)
{
- return i->type & (READ | WRITE);
+ return i->data_source ? WRITE : READ;
+}
+
+static inline bool user_backed_iter(const struct iov_iter *i)
+{
+ return i->user_backed;
}
/*
@@ -112,11 +147,12 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
};
}
-size_t iov_iter_copy_from_user_atomic(struct page *page,
- struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
+ size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
+size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
@@ -125,59 +161,62 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
-bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
-bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
+
+static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_page_to_iter(&folio->page, offset, bytes, i);
+}
static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, true))
return _copy_to_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return false;
- else
- return _copy_from_iter_full(addr, bytes, i);
+ size_t copied = copy_from_iter(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
}
static __always_inline __must_check
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter_nocache(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return false;
- else
- return _copy_from_iter_full_nocache(addr, bytes, i);
+ size_t copied = copy_from_iter_nocache(addr, bytes, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
}
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
* Note, users like pmem that depend on the stricter semantics of
- * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
+ * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
* destination is flushed from the cache on return.
*/
@@ -186,31 +225,15 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#define _copy_from_iter_flushcache _copy_from_iter_nocache
#endif
-#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
-size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
#else
-#define _copy_to_iter_mcsafe _copy_to_iter
+#define _copy_mc_to_iter _copy_to_iter
#endif
-static __always_inline __must_check
-size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
- return _copy_from_iter_flushcache(addr, bytes, i);
-}
-
-static __always_inline __must_check
-size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
- return _copy_to_iter_mcsafe(addr, bytes, i);
-}
-
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
+bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
@@ -222,11 +245,14 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_
void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
-ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
+void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
+ loff_t start, size_t count);
+ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
+ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
@@ -261,28 +287,68 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
+
+static inline int
+iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
+{
+ size_t shorted = 0;
+ int npages;
+
+ if (iov_iter_count(i) > max_bytes) {
+ shorted = iov_iter_count(i) - max_bytes;
+ iov_iter_truncate(i, max_bytes);
+ }
+ npages = iov_iter_npages(i, maxpages);
+ if (shorted)
+ iov_iter_reexpand(i, iov_iter_count(i) + shorted);
+
+ return npages;
+}
+
+struct csum_state {
+ __wsum csum;
+ size_t off;
+};
+
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+
+static __always_inline __must_check
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
+ __wsum *csum, struct iov_iter *i)
+{
+ size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
+}
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i);
-ssize_t import_iovec(int type, const struct iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i);
-
-#ifdef CONFIG_COMPAT
-struct compat_iovec;
-ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector,
- unsigned nr_segs, unsigned fast_segs,
- struct iovec **iov, struct iov_iter *i);
-#endif
-
+struct iovec *iovec_from_user(const struct iovec __user *uvector,
+ unsigned long nr_segs, unsigned long fast_segs,
+ struct iovec *fast_iov, bool compat);
+ssize_t import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i);
+ssize_t __import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i, bool compat);
int import_single_range(int type, void __user *buf, size_t len,
struct iovec *iov, struct iov_iter *i);
-int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
- int (*f)(struct kvec *vec, void *context),
- void *context);
+static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
+ void __user *buf, size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter) {
+ .iter_type = ITER_UBUF,
+ .user_backed = true,
+ .data_source = direction,
+ .ubuf = buf,
+ .count = count
+ };
+}
#endif