Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm fixes from Dan Williams: - Two bug fixes for misuse of PAGE_MASK in scatterlist and dma-debug. These are tagged for -stable. The scatterlist impact is potentially corrupted dma addresses on HIGHMEM enabled platforms. - A minor locking fix for the NFIT hot-add implementation that is new in 4.4-rc. This would only trigger in the case a hot-add raced driver removal. * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: dma-debug: Fix dma_debug_entry offset calculation Revert "scatterlist: use sg_phys()" nfit: acpi_nfit_notify(): Do not leave device locked
This commit is contained in:
commit
d7637d01be
|
@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
||||||
phys_addr_t phys = sg_phys(s) & PAGE_MASK;
|
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||||
|
|
||||||
if (!is_coherent &&
|
if (!is_coherent &&
|
||||||
|
|
|
@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
/* FIXME this part of code is untested */
|
/* FIXME this part of code is untested */
|
||||||
for_each_sg(sgl, sg, nents, i) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
sg->dma_address = sg_phys(sg);
|
sg->dma_address = sg_phys(sg);
|
||||||
__dma_sync(sg_phys(sg), sg->length, direction);
|
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
|
||||||
|
sg->length, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
return nents;
|
return nents;
|
||||||
|
|
|
@ -1810,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
|
||||||
if (!dev->driver) {
|
if (!dev->driver) {
|
||||||
/* dev->driver may be null if we're being removed */
|
/* dev->driver may be null if we're being removed */
|
||||||
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
|
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
|
||||||
return;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!acpi_desc) {
|
if (!acpi_desc) {
|
||||||
|
|
|
@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||||
sg_res = aligned_nrpages(sg->offset, sg->length);
|
sg_res = aligned_nrpages(sg->offset, sg->length);
|
||||||
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
pteval = (sg_phys(sg) & PAGE_MASK) | prot;
|
pteval = page_to_phys(sg_page(sg)) | prot;
|
||||||
phys_pfn = pteval >> VTD_PAGE_SHIFT;
|
phys_pfn = pteval >> VTD_PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
|
||||||
|
|
||||||
for_each_sg(sglist, sg, nelems, i) {
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
BUG_ON(!sg_page(sg));
|
BUG_ON(!sg_page(sg));
|
||||||
sg->dma_address = sg_phys(sg);
|
sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
}
|
}
|
||||||
return nelems;
|
return nelems;
|
||||||
|
|
|
@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
phys_addr_t phys = sg_phys(s);
|
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are mapping on IOMMU page boundaries, so offset within
|
* We are mapping on IOMMU page boundaries, so offset within
|
||||||
|
|
|
@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
|
||||||
err:
|
err:
|
||||||
sg = table->sgl;
|
sg = table->sgl;
|
||||||
for (i -= 1; i >= 0; i--) {
|
for (i -= 1; i >= 0; i--) {
|
||||||
gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
|
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
|
||||||
sg->length);
|
sg->length);
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||||
gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
|
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
|
||||||
sg->length);
|
sg->length);
|
||||||
}
|
}
|
||||||
chunk_heap->allocated -= allocated_size;
|
chunk_heap->allocated -= allocated_size;
|
||||||
|
|
|
@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||||
entry->type = dma_debug_coherent;
|
entry->type = dma_debug_coherent;
|
||||||
entry->dev = dev;
|
entry->dev = dev;
|
||||||
entry->pfn = page_to_pfn(virt_to_page(virt));
|
entry->pfn = page_to_pfn(virt_to_page(virt));
|
||||||
entry->offset = (size_t) virt & PAGE_MASK;
|
entry->offset = (size_t) virt & ~PAGE_MASK;
|
||||||
entry->size = size;
|
entry->size = size;
|
||||||
entry->dev_addr = dma_addr;
|
entry->dev_addr = dma_addr;
|
||||||
entry->direction = DMA_BIDIRECTIONAL;
|
entry->direction = DMA_BIDIRECTIONAL;
|
||||||
|
@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||||
.type = dma_debug_coherent,
|
.type = dma_debug_coherent,
|
||||||
.dev = dev,
|
.dev = dev,
|
||||||
.pfn = page_to_pfn(virt_to_page(virt)),
|
.pfn = page_to_pfn(virt_to_page(virt)),
|
||||||
.offset = (size_t) virt & PAGE_MASK,
|
.offset = (size_t) virt & ~PAGE_MASK,
|
||||||
.dev_addr = addr,
|
.dev_addr = addr,
|
||||||
.size = size,
|
.size = size,
|
||||||
.direction = DMA_BIDIRECTIONAL,
|
.direction = DMA_BIDIRECTIONAL,
|
||||||
|
|
Loading…
Reference in New Issue