lib: test_hmm add module param for zone device type
In order to configure device coherent in test_hmm, two module parameters should be passed, which correspond to the SP start address of each device (2) spm_addr_dev0 & spm_addr_dev1. If no parameters are passed, private device type is configured. Link: https://lkml.kernel.org/r/20220715150521.18165-10-alex.sierra@amd.com Signed-off-by: Alex Sierra <alex.sierra@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Alistair Poppple <apopple@nvidia.com> Cc: David Hildenbrand <david@redhat.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
188f48268d
commit
25b80162d5
|
@ -37,6 +37,16 @@
|
||||||
#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
|
#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
|
||||||
#define DEVMEM_CHUNKS_RESERVE 16
|
#define DEVMEM_CHUNKS_RESERVE 16
|
||||||
|
|
||||||
|
static unsigned long spm_addr_dev0;
|
||||||
|
module_param(spm_addr_dev0, long, 0644);
|
||||||
|
MODULE_PARM_DESC(spm_addr_dev0,
|
||||||
|
"Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
|
||||||
|
|
||||||
|
static unsigned long spm_addr_dev1;
|
||||||
|
module_param(spm_addr_dev1, long, 0644);
|
||||||
|
MODULE_PARM_DESC(spm_addr_dev1,
|
||||||
|
"Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
|
||||||
|
|
||||||
static const struct dev_pagemap_ops dmirror_devmem_ops;
|
static const struct dev_pagemap_ops dmirror_devmem_ops;
|
||||||
static const struct mmu_interval_notifier_ops dmirror_min_ops;
|
static const struct mmu_interval_notifier_ops dmirror_min_ops;
|
||||||
static dev_t dmirror_dev;
|
static dev_t dmirror_dev;
|
||||||
|
@ -455,28 +465,44 @@ fini:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
|
static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
|
||||||
struct page **ppage)
|
struct page **ppage)
|
||||||
{
|
{
|
||||||
struct dmirror_chunk *devmem;
|
struct dmirror_chunk *devmem;
|
||||||
struct resource *res;
|
struct resource *res = NULL;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
unsigned long pfn_first;
|
unsigned long pfn_first;
|
||||||
unsigned long pfn_last;
|
unsigned long pfn_last;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
|
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
|
||||||
if (!devmem)
|
if (!devmem)
|
||||||
return false;
|
return ret;
|
||||||
|
|
||||||
res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
|
switch (mdevice->zone_device_type) {
|
||||||
"hmm_dmirror");
|
case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE:
|
||||||
if (IS_ERR(res))
|
res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
|
||||||
|
"hmm_dmirror");
|
||||||
|
if (IS_ERR_OR_NULL(res))
|
||||||
|
goto err_devmem;
|
||||||
|
devmem->pagemap.range.start = res->start;
|
||||||
|
devmem->pagemap.range.end = res->end;
|
||||||
|
devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
|
||||||
|
break;
|
||||||
|
case HMM_DMIRROR_MEMORY_DEVICE_COHERENT:
|
||||||
|
devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
|
||||||
|
spm_addr_dev0 :
|
||||||
|
spm_addr_dev1;
|
||||||
|
devmem->pagemap.range.end = devmem->pagemap.range.start +
|
||||||
|
DEVMEM_CHUNK_SIZE - 1;
|
||||||
|
devmem->pagemap.type = MEMORY_DEVICE_COHERENT;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ret = -EINVAL;
|
||||||
goto err_devmem;
|
goto err_devmem;
|
||||||
|
}
|
||||||
|
|
||||||
devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
|
|
||||||
devmem->pagemap.range.start = res->start;
|
|
||||||
devmem->pagemap.range.end = res->end;
|
|
||||||
devmem->pagemap.nr_range = 1;
|
devmem->pagemap.nr_range = 1;
|
||||||
devmem->pagemap.ops = &dmirror_devmem_ops;
|
devmem->pagemap.ops = &dmirror_devmem_ops;
|
||||||
devmem->pagemap.owner = mdevice;
|
devmem->pagemap.owner = mdevice;
|
||||||
|
@ -497,10 +523,14 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
|
||||||
mdevice->devmem_capacity = new_capacity;
|
mdevice->devmem_capacity = new_capacity;
|
||||||
mdevice->devmem_chunks = new_chunks;
|
mdevice->devmem_chunks = new_chunks;
|
||||||
}
|
}
|
||||||
|
|
||||||
ptr = memremap_pages(&devmem->pagemap, numa_node_id());
|
ptr = memremap_pages(&devmem->pagemap, numa_node_id());
|
||||||
if (IS_ERR(ptr))
|
if (IS_ERR_OR_NULL(ptr)) {
|
||||||
|
if (ptr)
|
||||||
|
ret = PTR_ERR(ptr);
|
||||||
|
else
|
||||||
|
ret = -EFAULT;
|
||||||
goto err_release;
|
goto err_release;
|
||||||
|
}
|
||||||
|
|
||||||
devmem->mdevice = mdevice;
|
devmem->mdevice = mdevice;
|
||||||
pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
|
pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
|
||||||
|
@ -529,15 +559,17 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
|
||||||
}
|
}
|
||||||
spin_unlock(&mdevice->lock);
|
spin_unlock(&mdevice->lock);
|
||||||
|
|
||||||
return true;
|
return 0;
|
||||||
|
|
||||||
err_release:
|
err_release:
|
||||||
mutex_unlock(&mdevice->devmem_lock);
|
mutex_unlock(&mdevice->devmem_lock);
|
||||||
release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
|
if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
|
||||||
|
release_mem_region(devmem->pagemap.range.start,
|
||||||
|
range_len(&devmem->pagemap.range));
|
||||||
err_devmem:
|
err_devmem:
|
||||||
kfree(devmem);
|
kfree(devmem);
|
||||||
|
|
||||||
return false;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
|
static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
|
||||||
|
@ -562,7 +594,7 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
|
||||||
spin_unlock(&mdevice->lock);
|
spin_unlock(&mdevice->lock);
|
||||||
} else {
|
} else {
|
||||||
spin_unlock(&mdevice->lock);
|
spin_unlock(&mdevice->lock);
|
||||||
if (!dmirror_allocate_chunk(mdevice, &dpage))
|
if (dmirror_allocate_chunk(mdevice, &dpage))
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1238,10 +1270,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Build a list of free ZONE_DEVICE private struct pages */
|
/* Build a list of free ZONE_DEVICE struct pages */
|
||||||
dmirror_allocate_chunk(mdevice, NULL);
|
return dmirror_allocate_chunk(mdevice, NULL);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmirror_device_remove(struct dmirror_device *mdevice)
|
static void dmirror_device_remove(struct dmirror_device *mdevice)
|
||||||
|
@ -1254,8 +1284,9 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
|
||||||
mdevice->devmem_chunks[i];
|
mdevice->devmem_chunks[i];
|
||||||
|
|
||||||
memunmap_pages(&devmem->pagemap);
|
memunmap_pages(&devmem->pagemap);
|
||||||
release_mem_region(devmem->pagemap.range.start,
|
if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
|
||||||
range_len(&devmem->pagemap.range));
|
release_mem_region(devmem->pagemap.range.start,
|
||||||
|
range_len(&devmem->pagemap.range));
|
||||||
kfree(devmem);
|
kfree(devmem);
|
||||||
}
|
}
|
||||||
kfree(mdevice->devmem_chunks);
|
kfree(mdevice->devmem_chunks);
|
||||||
|
|
|
@ -66,6 +66,7 @@ enum {
|
||||||
enum {
|
enum {
|
||||||
/* 0 is reserved to catch uninitialized type fields */
|
/* 0 is reserved to catch uninitialized type fields */
|
||||||
HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1,
|
HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1,
|
||||||
|
HMM_DMIRROR_MEMORY_DEVICE_COHERENT,
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _LIB_TEST_HMM_UAPI_H */
|
#endif /* _LIB_TEST_HMM_UAPI_H */
|
||||||
|
|
Loading…
Reference in New Issue