diff options
author | Dan Williams <dan.j.williams@intel.com> | 2016-06-01 23:07:43 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2016-07-12 15:13:48 -0700 |
commit | 7c8a6a71904d57ae5fb24140f9661ec22ca9ee85 (patch) | |
tree | 83e6e13de0d4eac94859bc8ae40624abf9a41ffb /arch/x86/include/asm/pmem.h | |
parent | 91131dbd1d50637dc338526502a1a2ec5a7f97df (diff) |
pmem: kill wmb_pmem()
All users have been replaced with flushing in the pmem driver.
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'arch/x86/include/asm/pmem.h')
-rw-r--r-- | arch/x86/include/asm/pmem.h | 36 |
1 files changed, 2 insertions, 34 deletions
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index fbc5e92e1ecc..a8cf2a6b14d9 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -26,8 +26,7 @@ * @n: length of the copy in bytes * * Copy data to persistent memory media via non-temporal stores so that - * a subsequent arch_wmb_pmem() can flush cpu and memory controller - * write buffers to guarantee durability. + * a subsequent pmem driver flush operation will drain posted write queues. */ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) @@ -57,32 +56,12 @@ static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, } /** - * arch_wmb_pmem - synchronize writes to persistent memory - * - * After a series of arch_memcpy_to_pmem() operations this drains data - * from cpu write buffers and any platform (memory controller) buffers - * to ensure that written data is durable on persistent memory media. - */ -static inline void arch_wmb_pmem(void) -{ - /* - * wmb() to 'sfence' all previous writes such that they are - * architecturally visible to 'pcommit'. Note, that we've - * already arranged for pmem writes to avoid the cache via - * arch_memcpy_to_pmem(). - */ - wmb(); - pcommit_sfence(); -} - -/** * arch_wb_cache_pmem - write back a cache range with CLWB * @vaddr: virtual start address * @size: number of bytes to write back * * Write back a cache range using the CLWB (cache line write back) - * instruction. This function requires explicit ordering with an - * arch_wmb_pmem() call. + * instruction. */ static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) { @@ -113,7 +92,6 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i) * @i: iterator with source data * * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. - * This function requires explicit ordering with an arch_wmb_pmem() call. */ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, struct iov_iter *i) @@ -136,7 +114,6 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, * @size: number of bytes to zero * * Write zeros into the memory range starting at 'addr' for 'size' bytes. - * This function requires explicit ordering with an arch_wmb_pmem() call. */ static inline void arch_clear_pmem(void __pmem *addr, size_t size) { @@ -150,14 +127,5 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) { clflush_cache_range((void __force *) addr, size); } - -static inline bool __arch_has_wmb_pmem(void) -{ - /* - * We require that wmb() be an 'sfence', that is only guaranteed on - * 64-bit builds - */ - return static_cpu_has(X86_FEATURE_PCOMMIT); -} #endif /* CONFIG_ARCH_HAS_PMEM_API */ #endif /* __ASM_X86_PMEM_H__ */ |