提交 4e4f00a9 编写于 作者: D Dan Williams

x86, dax, libnvdimm: remove wb_cache_pmem() indirection

With all handling of the CONFIG_ARCH_HAS_PMEM_API case being moved to
libnvdimm and the pmem driver directly we do not need to provide global
wrappers and fallbacks in the CONFIG_ARCH_HAS_PMEM_API=n case. The pmem
driver will simply not link to arch_wb_cache_pmem() in that case.  Same
as before, pmem flushing is only defined for x86_64, via
clean_cache_range(), but it is straightforward to add other archs in the
future.

arch_wb_cache_pmem() is an exported function since the pmem module needs
to find it, but it is privately declared in drivers/nvdimm/pmem.h because
there are no consumers outside of the pmem driver.

Cc: <x86@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Oliver O'Halloran <oohall@gmail.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Suggested-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NDan Williams <dan.j.williams@intel.com>
上级 81f55870
...@@ -44,27 +44,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) ...@@ -44,27 +44,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
BUG(); BUG();
} }
/**
* arch_wb_cache_pmem - write back a cache range with CLWB
* @vaddr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
* instruction. Note that @size is internally rounded up to be cache
* line size aligned.
*/
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1;
void *vend = addr + size;
void *p;
for (p = (void *)((unsigned long)addr & ~clflush_mask);
p < vend; p += x86_clflush_size)
clwb(p);
}
static inline void arch_invalidate_pmem(void *addr, size_t size) static inline void arch_invalidate_pmem(void *addr, size_t size)
{ {
clflush_cache_range(addr, size); clflush_cache_range(addr, size);
......
...@@ -97,6 +97,12 @@ static void clean_cache_range(void *addr, size_t size) ...@@ -97,6 +97,12 @@ static void clean_cache_range(void *addr, size_t size)
clwb(p); clwb(p);
} }
void arch_wb_cache_pmem(void *addr, size_t size)
{
clean_cache_range(addr, size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
{ {
unsigned long flushed, dest = (unsigned long) dst; unsigned long flushed, dest = (unsigned long) dst;
......
...@@ -245,7 +245,7 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, ...@@ -245,7 +245,7 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t size) void *addr, size_t size)
{ {
wb_cache_pmem(addr, size); arch_wb_cache_pmem(addr, size);
} }
static const struct dax_operations pmem_dax_ops = { static const struct dax_operations pmem_dax_ops = {
......
...@@ -5,6 +5,14 @@ ...@@ -5,6 +5,14 @@
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/fs.h> #include <linux/fs.h>
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
#else
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
}
#endif
/* this definition is in it's own header for tools/testing/nvdimm to consume */ /* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device { struct pmem_device {
/* One contiguous memory region per device */ /* One contiguous memory region per device */
......
...@@ -31,11 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) ...@@ -31,11 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
BUG(); BUG();
} }
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
BUG();
}
static inline void arch_invalidate_pmem(void *addr, size_t size) static inline void arch_invalidate_pmem(void *addr, size_t size)
{ {
BUG(); BUG();
...@@ -80,18 +75,4 @@ static inline void invalidate_pmem(void *addr, size_t size) ...@@ -80,18 +75,4 @@ static inline void invalidate_pmem(void *addr, size_t size)
if (arch_has_pmem_api()) if (arch_has_pmem_api())
arch_invalidate_pmem(addr, size); arch_invalidate_pmem(addr, size);
} }
/**
* wb_cache_pmem - write back processor cache for PMEM memory range
* @addr: virtual start address
* @size: number of bytes to write back
*
* Write back the processor cache range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem().
*/
static inline void wb_cache_pmem(void *addr, size_t size)
{
if (arch_has_pmem_api())
arch_wb_cache_pmem(addr, size);
}
#endif /* __PMEM_H__ */ #endif /* __PMEM_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册