mm: adjust apply_to_pfn_range interface for dropped token.

mm/pgtable: drop pgtable_t variable from pte_fn_t functions
drops the token came in via the hmm tree, this caused lots of
conflicts, but applying this cleanup patch should reduce it
to something easier to handle. Just accept the token is unused
at this point.

Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2019-07-15 15:16:20 +10:00
parent f27b99a1ce
commit 6dfc43d3a1
3 changed files with 6 additions and 8 deletions

View File

@ -2687,7 +2687,7 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
struct pfn_range_apply;
typedef int (*pter_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
typedef int (*pter_fn_t)(pte_t *pte, unsigned long addr,
struct pfn_range_apply *closure);
struct pfn_range_apply {
struct mm_struct *mm;

View File

@ -26,7 +26,6 @@ struct apply_as {
/**
* apply_pt_wrprotect - Leaf pte callback to write-protect a pte
* @pte: Pointer to the pte
* @token: Page table token, see apply_to_pfn_range()
* @addr: The virtual page address
* @closure: Pointer to a struct pfn_range_apply embedded in a
* struct apply_as
@ -36,7 +35,7 @@ struct apply_as {
*
* Return: Always zero.
*/
static int apply_pt_wrprotect(pte_t *pte, pgtable_t token,
static int apply_pt_wrprotect(pte_t *pte,
unsigned long addr,
struct pfn_range_apply *closure)
{
@ -78,7 +77,6 @@ struct apply_as_clean {
/**
* apply_pt_clean - Leaf pte callback to clean a pte
* @pte: Pointer to the pte
* @token: Page table token, see apply_to_pfn_range()
* @addr: The virtual page address
* @closure: Pointer to a struct pfn_range_apply embedded in a
* struct apply_as_clean
@ -91,7 +89,7 @@ struct apply_as_clean {
*
* Return: Always zero.
*/
static int apply_pt_clean(pte_t *pte, pgtable_t token,
static int apply_pt_clean(pte_t *pte,
unsigned long addr,
struct pfn_range_apply *closure)
{

View File

@ -2053,7 +2053,7 @@ static int apply_to_pte_range(struct pfn_range_apply *closure, pmd_t *pmd,
token = pmd_pgtable(*pmd);
do {
err = closure->ptefn(pte++, token, addr, closure);
err = closure->ptefn(pte++, addr, closure);
if (err)
break;
} while (addr += PAGE_SIZE, addr != end);
@ -2194,14 +2194,14 @@ struct page_range_apply {
* Callback wrapper to enable use of apply_to_pfn_range for
* the apply_to_page_range interface
*/
static int apply_to_page_range_wrapper(pte_t *pte, pgtable_t token,
static int apply_to_page_range_wrapper(pte_t *pte,
unsigned long addr,
struct pfn_range_apply *pter)
{
struct page_range_apply *pra =
container_of(pter, typeof(*pra), pter);
return pra->fn(pte, token, addr, pra->data);
return pra->fn(pte, NULL, addr, pra->data);
}
/*