aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/pmem.h
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-06-01 23:07:43 -0700
committerDan Williams <dan.j.williams@intel.com>2016-07-12 15:13:48 -0700
commit7c8a6a71904d57ae5fb24140f9661ec22ca9ee85 (patch)
tree83e6e13de0d4eac94859bc8ae40624abf9a41ffb /include/linux/pmem.h
parentlibnvdimm, pmem: use nvdimm_flush() for namespace I/O writes (diff)
downloadlinux-dev-7c8a6a71904d57ae5fb24140f9661ec22ca9ee85.tar.xz
linux-dev-7c8a6a71904d57ae5fb24140f9661ec22ca9ee85.zip
pmem: kill wmb_pmem()
All users have been replaced with flushing in the pmem driver. Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include/linux/pmem.h')
-rw-r--r--include/linux/pmem.h47
1 files changed, 4 insertions, 43 deletions
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 57d146fe44dd..9e3ea94b8157 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -26,16 +26,6 @@
* calling these symbols with arch_has_pmem_api() and redirect to the
* implementation in asm/pmem.h.
*/
-static inline bool __arch_has_wmb_pmem(void)
-{
- return false;
-}
-
-static inline void arch_wmb_pmem(void)
-{
- BUG();
-}
-
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t n)
{
@@ -101,20 +91,6 @@ static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
return default_memcpy_from_pmem(dst, src, size);
}
-/**
- * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
- *
- * For a given cpu implementation within an architecture it is possible
- * that wmb_pmem() resolves to a nop. In the case this returns
- * false, pmem api users are unable to ensure durability and may want to
- * fall back to a different data consistency model, or otherwise notify
- * the user.
- */
-static inline bool arch_has_wmb_pmem(void)
-{
- return arch_has_pmem_api() && __arch_has_wmb_pmem();
-}
-
/*
* These defaults seek to offer decent performance and minimize the
* window between i/o completion and writes being durable on media.
@@ -152,7 +128,7 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size)
* being effectively evicted from, or never written to, the processor
* cache hierarchy after the copy completes. After memcpy_to_pmem()
* data may still reside in cpu or platform buffers, so this operation
- * must be followed by a wmb_pmem().
+ * must be followed by a blkdev_issue_flush() on the pmem block device.
*/
static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
{
@@ -163,28 +139,13 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
}
/**
- * wmb_pmem - synchronize writes to persistent memory
- *
- * After a series of memcpy_to_pmem() operations this drains data from
- * cpu write buffers and any platform (memory controller) buffers to
- * ensure that written data is durable on persistent memory media.
- */
-static inline void wmb_pmem(void)
-{
- if (arch_has_wmb_pmem())
- arch_wmb_pmem();
- else
- wmb();
-}
-
-/**
* copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
* @bytes: number of bytes to copy
* @i: iterator with source data
*
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i)
@@ -200,7 +161,7 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
* @size: number of bytes to zero
*
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void clear_pmem(void __pmem *addr, size_t size)
{
@@ -230,7 +191,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size)
* @size: number of bytes to write back
*
* Write back the processor cache range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void wb_cache_pmem(void __pmem *addr, size_t size)
{