2015-08-19 03:55:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2015 Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_X86_PMEM_H__
|
|
|
|
#define __ASM_X86_PMEM_H__
|
|
|
|
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
#include <asm/special_insns.h>
|
|
|
|
|
2015-08-19 03:55:38 +08:00
|
|
|
#ifdef CONFIG_ARCH_HAS_PMEM_API
|
2015-08-19 03:55:36 +08:00
|
|
|
/**
|
|
|
|
* arch_memcpy_to_pmem - copy data to persistent memory
|
|
|
|
* @dst: destination buffer for the copy
|
|
|
|
* @src: source buffer for the copy
|
|
|
|
* @n: length of the copy in bytes
|
|
|
|
*
|
|
|
|
* Copy data to persistent memory media via non-temporal stores so that
|
|
|
|
* a subsequent arch_wmb_pmem() can flush cpu and memory controller
|
|
|
|
* write buffers to guarantee durability.
|
|
|
|
*/
|
|
|
|
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
|
|
|
|
size_t n)
|
|
|
|
{
|
|
|
|
int unwritten;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are copying between two kernel buffers, if
|
|
|
|
* __copy_from_user_inatomic_nocache() returns an error (page
|
|
|
|
* fault) we would have already reported a general protection fault
|
|
|
|
* before the WARN+BUG.
|
|
|
|
*/
|
|
|
|
unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
|
|
|
|
(void __user *) src, n);
|
|
|
|
if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
|
|
|
|
__func__, dst, src, unwritten))
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2016-03-09 02:30:19 +08:00
|
|
|
static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
|
|
|
|
size_t n)
|
|
|
|
{
|
|
|
|
if (static_cpu_has(X86_FEATURE_MCE_RECOVERY))
|
|
|
|
return memcpy_mcsafe(dst, (void __force *) src, n);
|
|
|
|
memcpy(dst, (void __force *) src, n);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-19 03:55:36 +08:00
|
|
|
/**
|
|
|
|
* arch_wmb_pmem - synchronize writes to persistent memory
|
|
|
|
*
|
|
|
|
* After a series of arch_memcpy_to_pmem() operations this drains data
|
|
|
|
* from cpu write buffers and any platform (memory controller) buffers
|
|
|
|
* to ensure that written data is durable on persistent memory media.
|
|
|
|
*/
|
|
|
|
static inline void arch_wmb_pmem(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* wmb() to 'sfence' all previous writes such that they are
|
|
|
|
* architecturally visible to 'pcommit'. Note, that we've
|
|
|
|
* already arranged for pmem writes to avoid the cache via
|
|
|
|
* arch_memcpy_to_pmem().
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
pcommit_sfence();
|
|
|
|
}
|
|
|
|
|
2015-08-19 03:55:39 +08:00
|
|
|
/**
|
2016-01-23 07:10:37 +08:00
|
|
|
* arch_wb_cache_pmem - write back a cache range with CLWB
|
2015-08-19 03:55:39 +08:00
|
|
|
* @vaddr: virtual start address
|
|
|
|
* @size: number of bytes to write back
|
|
|
|
*
|
|
|
|
* Write back a cache range using the CLWB (cache line write back)
|
|
|
|
* instruction. This function requires explicit ordering with an
|
2016-01-23 07:10:37 +08:00
|
|
|
* arch_wmb_pmem() call.
|
2015-08-19 03:55:39 +08:00
|
|
|
*/
|
2016-01-23 07:10:37 +08:00
|
|
|
static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
|
2015-08-19 03:55:39 +08:00
|
|
|
{
|
|
|
|
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
|
|
|
|
unsigned long clflush_mask = x86_clflush_size - 1;
|
2016-01-23 07:10:37 +08:00
|
|
|
void *vaddr = (void __force *)addr;
|
2015-08-19 03:55:39 +08:00
|
|
|
void *vend = vaddr + size;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
|
|
|
|
p < vend; p += x86_clflush_size)
|
|
|
|
clwb(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
|
|
|
|
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
|
|
|
|
*/
|
|
|
|
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
|
|
|
|
{
|
|
|
|
return iter_is_iovec(i) == false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
|
|
|
|
* @addr: PMEM destination address
|
|
|
|
* @bytes: number of bytes to copy
|
|
|
|
* @i: iterator with source data
|
|
|
|
*
|
|
|
|
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
|
|
|
|
* This function requires explicit ordering with an arch_wmb_pmem() call.
|
|
|
|
*/
|
|
|
|
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
|
|
|
|
struct iov_iter *i)
|
|
|
|
{
|
|
|
|
void *vaddr = (void __force *)addr;
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
/* TODO: skip the write-back by always using non-temporal stores */
|
|
|
|
len = copy_from_iter_nocache(vaddr, bytes, i);
|
|
|
|
|
|
|
|
if (__iter_needs_pmem_wb(i))
|
2016-01-23 07:10:37 +08:00
|
|
|
arch_wb_cache_pmem(addr, bytes);
|
2015-08-19 03:55:39 +08:00
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* arch_clear_pmem - zero a PMEM memory range
|
|
|
|
* @addr: virtual start address
|
|
|
|
* @size: number of bytes to zero
|
|
|
|
*
|
|
|
|
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
|
|
|
|
* This function requires explicit ordering with an arch_wmb_pmem() call.
|
|
|
|
*/
|
|
|
|
static inline void arch_clear_pmem(void __pmem *addr, size_t size)
|
|
|
|
{
|
|
|
|
void *vaddr = (void __force *)addr;
|
|
|
|
|
pmem, dax: clean up clear_pmem()
To date, we have implemented two I/O usage models for persistent memory,
PMEM (a persistent "ram disk") and DAX (mmap persistent memory into
userspace). This series adds a third, DAX-GUP, that allows DAX mappings
to be the target of direct-i/o. It allows userspace to coordinate
DMA/RDMA from/to persistent memory.
The implementation leverages the ZONE_DEVICE mm-zone that went into
4.3-rc1 (also discussed at kernel summit) to flag pages that are owned
and dynamically mapped by a device driver. The pmem driver, after
mapping a persistent memory range into the system memmap via
devm_memremap_pages(), arranges for DAX to distinguish pfn-only versus
page-backed pmem-pfns via flags in the new pfn_t type.
The DAX code, upon seeing a PFN_DEV+PFN_MAP flagged pfn, flags the
resulting pte(s) inserted into the process page tables with a new
_PAGE_DEVMAP flag. Later, when get_user_pages() is walking ptes it keys
off _PAGE_DEVMAP to pin the device hosting the page range active.
Finally, get_page() and put_page() are modified to take references
against the device driver established page mapping.
Finally, this need for "struct page" for persistent memory requires
memory capacity to store the memmap array. Given the memmap array for a
large pool of persistent may exhaust available DRAM introduce a
mechanism to allocate the memmap from persistent memory. The new
"struct vmem_altmap *" parameter to devm_memremap_pages() enables
arch_add_memory() to use reserved pmem capacity rather than the page
allocator.
This patch (of 25):
Both __dax_pmd_fault, and clear_pmem() were taking special steps to
clear memory a page at a time to take advantage of non-temporal
clear_page() implementations. However, x86_64 does not use non-temporal
instructions for clear_page(), and arch_clear_pmem() was always
incurring the cost of __arch_wb_cache_pmem().
Clean up the assumption that doing clear_pmem() a page at a time is more
performant.
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reported-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Chinner <david@fromorbit.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jan Kara <jack@suse.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Jens Axboe <axboe@fb.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Toshi Kani <toshi.kani@hpe.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 08:55:49 +08:00
|
|
|
memset(vaddr, 0, size);
|
2016-01-23 07:10:37 +08:00
|
|
|
arch_wb_cache_pmem(addr, size);
|
2015-08-19 03:55:39 +08:00
|
|
|
}
|
|
|
|
|
2016-03-08 23:16:07 +08:00
|
|
|
static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
|
|
|
|
{
|
|
|
|
clflush_cache_range((void __force *) addr, size);
|
|
|
|
}
|
|
|
|
|
2015-08-25 06:29:38 +08:00
|
|
|
static inline bool __arch_has_wmb_pmem(void)
|
2015-08-19 03:55:36 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We require that wmb() be an 'sfence', that is only guaranteed on
|
|
|
|
* 64-bit builds
|
|
|
|
*/
|
|
|
|
return static_cpu_has(X86_FEATURE_PCOMMIT);
|
|
|
|
}
|
2015-08-19 03:55:38 +08:00
|
|
|
#endif /* CONFIG_ARCH_HAS_PMEM_API */
|
2015-08-19 03:55:36 +08:00
|
|
|
#endif /* __ASM_X86_PMEM_H__ */
|