Merge branch 'sg' of git://git.kernel.dk/linux-2.6-block
* 'sg' of git://git.kernel.dk/linux-2.6-block: Add CONFIG_DEBUG_SG sg validation Change table chaining layout Update arch/ to use sg helpers Update swiotlb to use sg helpers Update net/ to use sg helpers Update fs/ to use sg helpers [SG] Update drivers to use sg helpers [SG] Update crypto/ to sg helpers [SG] Update block layer to use sg helpers [SG] Add helpers for manipulating SG entries
This commit is contained in:
commit
69450bb5eb
|
@ -465,7 +465,7 @@ EXPORT_SYMBOL(pci_free_consistent);
|
|||
Write dma_length of each leader with the combined lengths of
|
||||
the mergable followers. */
|
||||
|
||||
#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
|
||||
#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
|
||||
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
|
||||
|
||||
static void
|
||||
|
|
|
@ -442,7 +442,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
struct page *page = sg->page;
|
||||
struct page *page = sg_page(sg);
|
||||
unsigned int offset = sg->offset;
|
||||
unsigned int length = sg->length;
|
||||
void *ptr = page_address(page) + offset;
|
||||
|
|
|
@ -160,8 +160,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
for (i = 0; i < nents; i++, sg++) {
|
||||
sg->dma_address = (dma_addr_t)(page_address(sg->page) +
|
||||
sg->offset);
|
||||
sg->dma_address = (dma_addr_t) sg_virt(sg);
|
||||
|
||||
invalidate_dcache_range(sg_dma_address(sg),
|
||||
sg_dma_address(sg) +
|
||||
|
|
|
@ -246,7 +246,7 @@ static int reserve_sba_gart = 1;
|
|||
static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
|
||||
static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
|
||||
|
||||
#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
|
||||
#define sba_sg_address(sg) sg_virt((sg))
|
||||
|
||||
#ifdef FULL_VALID_PDIR
|
||||
static u64 prefetch_spill_page;
|
||||
|
|
|
@ -131,7 +131,7 @@ simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
|
|||
stat.fd = desc[sc->device->id];
|
||||
|
||||
scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) {
|
||||
req.addr = __pa(page_address(sl->page) + sl->offset);
|
||||
req.addr = __pa(sg_virt(sl));
|
||||
req.len = sl->length;
|
||||
if (DBG)
|
||||
printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
|
||||
|
@ -212,7 +212,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
|
|||
if (!len)
|
||||
break;
|
||||
thislen = min(len, slp->length);
|
||||
memcpy(page_address(slp->page) + slp->offset, buf, thislen);
|
||||
memcpy(sg_virt(slp), buf, thislen);
|
||||
len -= thislen;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <asm/sn/pcidev.h>
|
||||
#include <asm/sn/sn_sal.h>
|
||||
|
||||
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
|
||||
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
|
||||
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
|
||||
|
||||
/**
|
||||
|
|
|
@ -121,7 +121,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < nents; sg++, i++) {
|
||||
sg->dma_address = page_to_phys(sg->page) + sg->offset;
|
||||
sg->dma_address = sg_phys(sg);
|
||||
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
|
||||
}
|
||||
return nents;
|
||||
|
|
|
@ -165,12 +165,11 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
for (i = 0; i < nents; i++, sg++) {
|
||||
unsigned long addr;
|
||||
|
||||
addr = (unsigned long) page_address(sg->page);
|
||||
addr = (unsigned long) sg_virt(sg);
|
||||
if (!plat_device_is_coherent(dev) && addr)
|
||||
__dma_sync(addr + sg->offset, sg->length, direction);
|
||||
__dma_sync(addr, sg->length, direction);
|
||||
sg->dma_address = plat_map_dma_mem(dev,
|
||||
(void *)(addr + sg->offset),
|
||||
sg->length);
|
||||
(void *)addr, sg->length);
|
||||
}
|
||||
|
||||
return nents;
|
||||
|
@ -223,10 +222,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
|||
for (i = 0; i < nhwentries; i++, sg++) {
|
||||
if (!plat_device_is_coherent(dev) &&
|
||||
direction != DMA_TO_DEVICE) {
|
||||
addr = (unsigned long) page_address(sg->page);
|
||||
addr = (unsigned long) sg_virt(sg);
|
||||
if (addr)
|
||||
__dma_sync(addr + sg->offset, sg->length,
|
||||
direction);
|
||||
__dma_sync(addr, sg->length, direction);
|
||||
}
|
||||
plat_unmap_dma_mem(sg->dma_address);
|
||||
}
|
||||
|
@ -304,7 +302,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
|||
/* Make sure that gcc doesn't leave the empty loop body. */
|
||||
for (i = 0; i < nelems; i++, sg++) {
|
||||
if (cpu_is_noncoherent_r10000(dev))
|
||||
__dma_sync((unsigned long)page_address(sg->page),
|
||||
__dma_sync((unsigned long)page_address(sg_page(sg)),
|
||||
sg->length, direction);
|
||||
plat_unmap_dma_mem(sg->dma_address);
|
||||
}
|
||||
|
@ -322,7 +320,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
|
|||
/* Make sure that gcc doesn't leave the empty loop body. */
|
||||
for (i = 0; i < nelems; i++, sg++) {
|
||||
if (!plat_device_is_coherent(dev))
|
||||
__dma_sync((unsigned long)page_address(sg->page),
|
||||
__dma_sync((unsigned long)page_address(sg_page(sg)),
|
||||
sg->length, direction);
|
||||
plat_unmap_dma_mem(sg->dma_address);
|
||||
}
|
||||
|
|
|
@ -161,8 +161,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
|
||||
dma_direct_offset;
|
||||
sg->dma_address = sg_phys(sg) | dma_direct_offset;
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
|
||||
|
|
|
@ -102,8 +102,7 @@ static int ibmebus_map_sg(struct device *dev,
|
|||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = (dma_addr_t)page_address(sg->page)
|
||||
+ sg->offset;
|
||||
sg->dma_address = (dma_addr_t) sg_virt(sg);
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
|
||||
|
|
|
@ -307,7 +307,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
continue;
|
||||
}
|
||||
/* Allocate iommu entries for that segment */
|
||||
vaddr = (unsigned long)page_address(s->page) + s->offset;
|
||||
vaddr = (unsigned long) sg_virt(s);
|
||||
npages = iommu_num_pages(vaddr, slen);
|
||||
entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
|
||||
|
||||
|
|
|
@ -628,9 +628,8 @@ static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
|
|||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
int result = ps3_dma_map(dev->d_region,
|
||||
page_to_phys(sg->page) + sg->offset, sg->length,
|
||||
&sg->dma_address, 0);
|
||||
int result = ps3_dma_map(dev->d_region, sg_phys(sg),
|
||||
sg->length, &sg->dma_address, 0);
|
||||
|
||||
if (result) {
|
||||
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
|
||||
|
|
|
@ -727,9 +727,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
|||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
/* IIep is write-through, not flushing. */
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg->page) == NULL);
|
||||
sg->dvma_address =
|
||||
virt_to_phys(page_address(sg->page)) + sg->offset;
|
||||
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||
sg->dvma_address = virt_to_phys(sg_virt(sg));
|
||||
sg->dvma_length = sg->length;
|
||||
}
|
||||
return nents;
|
||||
|
@ -748,9 +747,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
|||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg->page) == NULL);
|
||||
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
(unsigned long) page_address(sg->page),
|
||||
(unsigned long) page_address(sg_page(sg)),
|
||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
|
@ -798,9 +797,9 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
|
|||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg->page) == NULL);
|
||||
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
(unsigned long) page_address(sg->page),
|
||||
(unsigned long) page_address(sg_page(sg)),
|
||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
|
@ -814,9 +813,9 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
|
|||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
if (direction != PCI_DMA_TODEVICE) {
|
||||
for_each_sg(sgl, sg, nents, n) {
|
||||
BUG_ON(page_address(sg->page) == NULL);
|
||||
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||
mmu_inval_dma_area(
|
||||
(unsigned long) page_address(sg->page),
|
||||
(unsigned long) page_address(sg_page(sg)),
|
||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
|
|||
spin_lock_irqsave(&iounit->lock, flags);
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
|
||||
sg->dvma_address = iounit_get_area(iounit, sg_virt(sg), sg->length);
|
||||
sg->dvma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
|
|
@ -238,7 +238,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
|
|||
while (sz != 0) {
|
||||
--sz;
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
@ -252,7 +252,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
|
|||
while (sz != 0) {
|
||||
--sz;
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
|
|||
* XXX Is this a good assumption?
|
||||
* XXX What if someone else unmaps it here and races us?
|
||||
*/
|
||||
if ((page = (unsigned long) page_address(sg->page)) != 0) {
|
||||
if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (page != oldpage) { /* Already flushed? */
|
||||
flush_page_for_dma(page);
|
||||
|
@ -283,7 +283,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
|
|||
}
|
||||
}
|
||||
|
||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
||||
sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
|
||||
sg->dvma_length = (__u32) sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
|
|
@ -1228,7 +1228,7 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
|
|||
{
|
||||
while (sz != 0) {
|
||||
--sz;
|
||||
sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
|
||||
sg->dvma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
|
||||
sg->dvma_length = sg->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
|
|
@ -472,8 +472,7 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
|
|||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
#define SG_ENT_PHYS_ADDRESS(SG) \
|
||||
(__pa(page_address((SG)->page)) + (SG)->offset)
|
||||
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
|
||||
|
||||
static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
||||
int nused, int nelems,
|
||||
|
@ -565,9 +564,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
/* Fast path single entry scatterlists. */
|
||||
if (nelems == 1) {
|
||||
sglist->dma_address =
|
||||
dma_4u_map_single(dev,
|
||||
(page_address(sglist->page) +
|
||||
sglist->offset),
|
||||
dma_4u_map_single(dev, sg_virt(sglist),
|
||||
sglist->length, direction);
|
||||
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
|
||||
return 0;
|
||||
|
|
|
@ -73,7 +73,7 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
|
|||
|
||||
daddr = dma_sg->dma_address;
|
||||
sglen = sg->length;
|
||||
sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
|
||||
sgaddr = (unsigned long) sg_virt(sg);
|
||||
while (dlen > 0) {
|
||||
unsigned long paddr;
|
||||
|
||||
|
@ -123,7 +123,7 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
|
|||
sg = sg_next(sg);
|
||||
if (--nents <= 0)
|
||||
break;
|
||||
sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
|
||||
sgaddr = (unsigned long) sg_virt(sg);
|
||||
sglen = sg->length;
|
||||
}
|
||||
if (dlen < 0) {
|
||||
|
@ -191,7 +191,7 @@ void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int np
|
|||
printk("sg(%d): page_addr(%p) off(%x) length(%x) "
|
||||
"dma_address[%016x] dma_length[%016x]\n",
|
||||
i,
|
||||
page_address(sg->page), sg->offset,
|
||||
page_address(sg_page(sg)), sg->offset,
|
||||
sg->length,
|
||||
sg->dma_address, sg->dma_length);
|
||||
}
|
||||
|
@ -207,15 +207,14 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents)
|
|||
unsigned long prev;
|
||||
u32 dent_addr, dent_len;
|
||||
|
||||
prev = (unsigned long) (page_address(sg->page) + sg->offset);
|
||||
prev = (unsigned long) sg_virt(sg);
|
||||
prev += (unsigned long) (dent_len = sg->length);
|
||||
dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset)
|
||||
& (IO_PAGE_SIZE - 1UL));
|
||||
dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
|
||||
while (--nents) {
|
||||
unsigned long addr;
|
||||
|
||||
sg = sg_next(sg);
|
||||
addr = (unsigned long) (page_address(sg->page) + sg->offset);
|
||||
addr = (unsigned long) sg_virt(sg);
|
||||
if (! VCONTIG(prev, addr)) {
|
||||
dma_sg->dma_address = dent_addr;
|
||||
dma_sg->dma_length = dent_len;
|
||||
|
|
|
@ -2057,7 +2057,7 @@ static void fill_cookies(struct cookie_state *sp, unsigned long pa,
|
|||
|
||||
static int sg_count_one(struct scatterlist *sg)
|
||||
{
|
||||
unsigned long base = page_to_pfn(sg->page) << PAGE_SHIFT;
|
||||
unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
|
||||
long len = sg->length;
|
||||
|
||||
if ((sg->offset | len) & (8UL - 1))
|
||||
|
|
|
@ -365,8 +365,7 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
|
|||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
#define SG_ENT_PHYS_ADDRESS(SG) \
|
||||
(__pa(page_address((SG)->page)) + (SG)->offset)
|
||||
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
|
||||
|
||||
static long fill_sg(long entry, struct device *dev,
|
||||
struct scatterlist *sg,
|
||||
|
@ -477,9 +476,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
/* Fast path single entry scatterlists. */
|
||||
if (nelems == 1) {
|
||||
sglist->dma_address =
|
||||
dma_4v_map_single(dev,
|
||||
(page_address(sglist->page) +
|
||||
sglist->offset),
|
||||
dma_4v_map_single(dev, sg_virt(sglist),
|
||||
sglist->length, direction);
|
||||
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
|
||||
return 0;
|
||||
|
|
|
@ -1115,7 +1115,7 @@ static void do_ubd_request(struct request_queue *q)
|
|||
}
|
||||
prepare_request(req, io_req,
|
||||
(unsigned long long) req->sector << 9,
|
||||
sg->offset, sg->length, sg->page);
|
||||
sg->offset, sg->length, sg_page(sg));
|
||||
|
||||
last_sectors = sg->length >> 9;
|
||||
n = os_write_file(thread_fd, &io_req,
|
||||
|
|
|
@ -411,8 +411,10 @@ static int calgary_nontranslate_map_sg(struct device* dev,
|
|||
int i;
|
||||
|
||||
for_each_sg(sg, s, nelems, i) {
|
||||
BUG_ON(!s->page);
|
||||
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
||||
struct page *p = sg_page(s);
|
||||
|
||||
BUG_ON(!p);
|
||||
s->dma_address = virt_to_bus(sg_virt(s));
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
return nelems;
|
||||
|
@ -432,9 +434,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
|
||||
|
||||
for_each_sg(sg, s, nelems, i) {
|
||||
BUG_ON(!s->page);
|
||||
BUG_ON(!sg_page(s));
|
||||
|
||||
vaddr = (unsigned long)page_address(s->page) + s->offset;
|
||||
vaddr = (unsigned long) sg_virt(s);
|
||||
npages = num_dma_pages(vaddr, s->length);
|
||||
|
||||
entry = iommu_range_alloc(tbl, npages);
|
||||
|
|
|
@ -302,7 +302,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
|||
#endif
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
unsigned long addr = page_to_phys(s->page) + s->offset;
|
||||
unsigned long addr = sg_phys(s);
|
||||
if (nonforced_iommu(dev, addr, s->length)) {
|
||||
addr = dma_map_area(dev, addr, s->length, dir);
|
||||
if (addr == bad_dma_address) {
|
||||
|
@ -397,7 +397,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
start_sg = sgmap = sg;
|
||||
ps = NULL; /* shut up gcc */
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
dma_addr_t addr = page_to_phys(s->page) + s->offset;
|
||||
dma_addr_t addr = sg_phys(s);
|
||||
s->dma_address = addr;
|
||||
BUG_ON(s->length == 0);
|
||||
|
||||
|
|
|
@ -62,8 +62,8 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
|||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
BUG_ON(!s->page);
|
||||
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
||||
BUG_ON(!sg_page(s));
|
||||
s->dma_address = virt_to_bus(sg_virt(s));
|
||||
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
||||
return 0;
|
||||
s->dma_length = s->length;
|
||||
|
|
|
@ -1354,8 +1354,9 @@ new_segment:
|
|||
else
|
||||
sg = sg_next(sg);
|
||||
|
||||
memset(sg, 0, sizeof(*sg));
|
||||
sg->page = bvec->bv_page;
|
||||
sg_dma_len(sg) = 0;
|
||||
sg_dma_address(sg) = 0;
|
||||
sg_set_page(sg, bvec->bv_page);
|
||||
sg->length = nbytes;
|
||||
sg->offset = bvec->bv_offset;
|
||||
nsegs++;
|
||||
|
@ -1363,6 +1364,9 @@ new_segment:
|
|||
bvprv = bvec;
|
||||
} /* segments in rq */
|
||||
|
||||
if (sg)
|
||||
__sg_mark_end(sg);
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ static int update2(struct hash_desc *desc,
|
|||
return 0;
|
||||
|
||||
for (;;) {
|
||||
struct page *pg = sg->page;
|
||||
struct page *pg = sg_page(sg);
|
||||
unsigned int offset = sg->offset;
|
||||
unsigned int l = sg->length;
|
||||
|
||||
|
|
|
@ -159,7 +159,8 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
|
|||
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
sg_set_buf(sg1, ipad, bs);
|
||||
sg1[1].page = (void *)sg;
|
||||
|
||||
sg_set_page(&sg[1], (void *) sg);
|
||||
sg1[1].length = 0;
|
||||
sg_set_buf(sg2, opad, bs + ds);
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
|
|||
if (out) {
|
||||
struct page *page;
|
||||
|
||||
page = walk->sg->page + ((walk->offset - 1) >> PAGE_SHIFT);
|
||||
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,13 +22,13 @@
|
|||
|
||||
static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
|
||||
{
|
||||
return (++sg)->length ? sg : (void *)sg->page;
|
||||
return (++sg)->length ? sg : (void *) sg_page(sg);
|
||||
}
|
||||
|
||||
static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
|
||||
struct scatter_walk *walk_out)
|
||||
{
|
||||
return !(((walk_in->sg->page - walk_out->sg->page) << PAGE_SHIFT) +
|
||||
return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
|
||||
(int)(walk_in->offset - walk_out->offset));
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
|
|||
|
||||
static inline struct page *scatterwalk_page(struct scatter_walk *walk)
|
||||
{
|
||||
return walk->sg->page + (walk->offset >> PAGE_SHIFT);
|
||||
return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void scatterwalk_unmap(void *vaddr, int out)
|
||||
|
|
|
@ -317,7 +317,7 @@ static void test_cipher(char *algo, int enc,
|
|||
goto out;
|
||||
}
|
||||
|
||||
q = kmap(sg[0].page) + sg[0].offset;
|
||||
q = kmap(sg_page(&sg[0])) + sg[0].offset;
|
||||
hexdump(q, cipher_tv[i].rlen);
|
||||
|
||||
printk("%s\n",
|
||||
|
@ -390,7 +390,7 @@ static void test_cipher(char *algo, int enc,
|
|||
temp = 0;
|
||||
for (k = 0; k < cipher_tv[i].np; k++) {
|
||||
printk("page %u\n", k);
|
||||
q = kmap(sg[k].page) + sg[k].offset;
|
||||
q = kmap(sg_page(&sg[k])) + sg[k].offset;
|
||||
hexdump(q, cipher_tv[i].tap[k]);
|
||||
printk("%s\n",
|
||||
memcmp(q, cipher_tv[i].result + temp,
|
||||
|
|
|
@ -120,7 +120,7 @@ static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
|
|||
|
||||
do {
|
||||
|
||||
struct page *pg = sg[i].page;
|
||||
struct page *pg = sg_page(&sg[i]);
|
||||
unsigned int offset = sg[i].offset;
|
||||
unsigned int slen = sg[i].length;
|
||||
|
||||
|
|
|
@ -4296,7 +4296,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
|
|||
sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
|
||||
if (pad_buf) {
|
||||
struct scatterlist *psg = &qc->pad_sgent;
|
||||
void *addr = kmap_atomic(psg->page, KM_IRQ0);
|
||||
void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
|
||||
memcpy(addr + psg->offset, pad_buf, qc->pad_len);
|
||||
kunmap_atomic(addr, KM_IRQ0);
|
||||
}
|
||||
|
@ -4686,11 +4686,11 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
|
|||
* data in this function or read data in ata_sg_clean.
|
||||
*/
|
||||
offset = lsg->offset + lsg->length - qc->pad_len;
|
||||
psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
|
||||
sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT));
|
||||
psg->offset = offset_in_page(offset);
|
||||
|
||||
if (qc->tf.flags & ATA_TFLAG_WRITE) {
|
||||
void *addr = kmap_atomic(psg->page, KM_IRQ0);
|
||||
void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
|
||||
memcpy(pad_buf, addr + psg->offset, qc->pad_len);
|
||||
kunmap_atomic(addr, KM_IRQ0);
|
||||
}
|
||||
|
@ -4836,7 +4836,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
|
|||
if (qc->curbytes == qc->nbytes - qc->sect_size)
|
||||
ap->hsm_task_state = HSM_ST_LAST;
|
||||
|
||||
page = qc->cursg->page;
|
||||
page = sg_page(qc->cursg);
|
||||
offset = qc->cursg->offset + qc->cursg_ofs;
|
||||
|
||||
/* get the current page and offset */
|
||||
|
@ -4988,7 +4988,7 @@ next_sg:
|
|||
|
||||
sg = qc->cursg;
|
||||
|
||||
page = sg->page;
|
||||
page = sg_page(sg);
|
||||
offset = sg->offset + qc->cursg_ofs;
|
||||
|
||||
/* get the current page and offset */
|
||||
|
|
|
@ -1544,7 +1544,7 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
|
|||
struct scatterlist *sg = scsi_sglist(cmd);
|
||||
|
||||
if (sg) {
|
||||
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
buflen = sg->length;
|
||||
} else {
|
||||
buf = NULL;
|
||||
|
|
|
@ -345,6 +345,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
|
|||
Command->V1.ScatterGatherList =
|
||||
(DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
|
||||
Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
|
||||
sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit);
|
||||
} else {
|
||||
Command->cmd_sglist = Command->V2.ScatterList;
|
||||
Command->V2.ScatterGatherList =
|
||||
|
@ -353,6 +354,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
|
|||
Command->V2.RequestSense =
|
||||
(DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
|
||||
Command->V2.RequestSenseDMA = RequestSenseDMA;
|
||||
sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -2610,7 +2610,7 @@ static void do_cciss_request(struct request_queue *q)
|
|||
(int)creq->nr_sectors);
|
||||
#endif /* CCISS_DEBUG */
|
||||
|
||||
memset(tmp_sg, 0, sizeof(tmp_sg));
|
||||
sg_init_table(tmp_sg, MAXSGENTRIES);
|
||||
seg = blk_rq_map_sg(q, creq, tmp_sg);
|
||||
|
||||
/* get the DMA records for the setup */
|
||||
|
@ -2621,7 +2621,7 @@ static void do_cciss_request(struct request_queue *q)
|
|||
|
||||
for (i = 0; i < seg; i++) {
|
||||
c->SG[i].Len = tmp_sg[i].length;
|
||||
temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
|
||||
temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
|
||||
tmp_sg[i].offset,
|
||||
tmp_sg[i].length, dir);
|
||||
c->SG[i].Addr.lower = temp64.val32.lower;
|
||||
|
|
|
@ -918,6 +918,7 @@ queue_next:
|
|||
DBGPX(
|
||||
printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
|
||||
);
|
||||
sg_init_table(tmp_sg, SG_MAX);
|
||||
seg = blk_rq_map_sg(q, creq, tmp_sg);
|
||||
|
||||
/* Now do all the DMA Mappings */
|
||||
|
@ -929,7 +930,7 @@ DBGPX(
|
|||
{
|
||||
c->req.sg[i].size = tmp_sg[i].length;
|
||||
c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
|
||||
tmp_sg[i].page,
|
||||
sg_page(&tmp_sg[i]),
|
||||
tmp_sg[i].offset,
|
||||
tmp_sg[i].length, dir);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/crypto.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/loop.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
@ -119,14 +120,17 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
|
|||
.tfm = tfm,
|
||||
.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
};
|
||||
struct scatterlist sg_out = { NULL, };
|
||||
struct scatterlist sg_in = { NULL, };
|
||||
struct scatterlist sg_out;
|
||||
struct scatterlist sg_in;
|
||||
|
||||
encdec_cbc_t encdecfunc;
|
||||
struct page *in_page, *out_page;
|
||||
unsigned in_offs, out_offs;
|
||||
int err;
|
||||
|
||||
sg_init_table(&sg_out, 1);
|
||||
sg_init_table(&sg_in, 1);
|
||||
|
||||
if (cmd == READ) {
|
||||
in_page = raw_page;
|
||||
in_offs = raw_off;
|
||||
|
@ -146,11 +150,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
|
|||
u32 iv[4] = { 0, };
|
||||
iv[0] = cpu_to_le32(IV & 0xffffffff);
|
||||
|
||||
sg_in.page = in_page;
|
||||
sg_set_page(&sg_in, in_page);
|
||||
sg_in.offset = in_offs;
|
||||
sg_in.length = sz;
|
||||
|
||||
sg_out.page = out_page;
|
||||
sg_set_page(&sg_out, out_page);
|
||||
sg_out.offset = out_offs;
|
||||
sg_out.length = sz;
|
||||
|
||||
|
|
|
@ -388,6 +388,7 @@ static int __send_request(struct request *req)
|
|||
op = VD_OP_BWRITE;
|
||||
}
|
||||
|
||||
sg_init_table(sg, port->ring_cookies);
|
||||
nsg = blk_rq_map_sg(req->q, req, sg);
|
||||
|
||||
len = 0;
|
||||
|
|
|
@ -522,6 +522,7 @@ static struct carm_request *carm_get_request(struct carm_host *host)
|
|||
host->n_msgs++;
|
||||
|
||||
assert(host->n_msgs <= CARM_MAX_REQ);
|
||||
sg_init_table(crq->sg, CARM_MAX_REQ_SG);
|
||||
return crq;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/usb_usual.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <scsi/scsi.h>
|
||||
|
||||
#define DRV_NAME "ub"
|
||||
|
@ -656,6 +657,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
|
|||
if ((cmd = ub_get_cmd(lun)) == NULL)
|
||||
return -1;
|
||||
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
|
||||
sg_init_table(cmd->sgv, UB_MAX_REQ_SG);
|
||||
|
||||
blkdev_dequeue_request(rq);
|
||||
|
||||
|
@ -1309,9 +1311,8 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
else
|
||||
pipe = sc->send_bulk_pipe;
|
||||
sc->last_pipe = pipe;
|
||||
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
|
||||
page_address(sg->page) + sg->offset, sg->length,
|
||||
ub_urb_complete, sc);
|
||||
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
|
||||
sg->length, ub_urb_complete, sc);
|
||||
sc->work_urb.actual_length = 0;
|
||||
sc->work_urb.error_count = 0;
|
||||
sc->work_urb.status = 0;
|
||||
|
@ -1427,7 +1428,7 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
scmd->state = UB_CMDST_INIT;
|
||||
scmd->nsg = 1;
|
||||
sg = &scmd->sgv[0];
|
||||
sg->page = virt_to_page(sc->top_sense);
|
||||
sg_set_page(sg, virt_to_page(sc->top_sense));
|
||||
sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
|
||||
sg->length = UB_SENSE_SIZE;
|
||||
scmd->len = UB_SENSE_SIZE;
|
||||
|
@ -1863,7 +1864,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
|
|||
cmd->state = UB_CMDST_INIT;
|
||||
cmd->nsg = 1;
|
||||
sg = &cmd->sgv[0];
|
||||
sg->page = virt_to_page(p);
|
||||
sg_set_page(sg, virt_to_page(p));
|
||||
sg->offset = (unsigned long)p & (PAGE_SIZE-1);
|
||||
sg->length = 8;
|
||||
cmd->len = 8;
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/vio.h>
|
||||
|
@ -270,6 +271,7 @@ static int send_request(struct request *req)
|
|||
d = req->rq_disk->private_data;
|
||||
|
||||
/* Now build the scatter-gather list */
|
||||
sg_init_table(sg, VIOMAXBLOCKDMA);
|
||||
nsg = blk_rq_map_sg(req->q, req, sg);
|
||||
nsg = dma_map_sg(d->dev, sg, nsg, direction);
|
||||
|
||||
|
|
|
@ -935,11 +935,11 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
|
|||
* than two possibly non-adjacent physical 4kB pages.
|
||||
*/
|
||||
/* group sequential buffers into one large buffer */
|
||||
addr = page_to_phys(sg->page) + sg->offset;
|
||||
addr = sg_phys(sg);
|
||||
size = sg_dma_len(sg);
|
||||
while (--i) {
|
||||
sg = sg_next(sg);
|
||||
if ((addr + size) != page_to_phys(sg->page) + sg->offset)
|
||||
if ((addr + size) != sg_phys(sg))
|
||||
break;
|
||||
size += sg_dma_len(sg);
|
||||
}
|
||||
|
|
|
@ -1317,12 +1317,14 @@ static int hwif_init(ide_hwif_t *hwif)
|
|||
if (!hwif->sg_max_nents)
|
||||
hwif->sg_max_nents = PRD_ENTRIES;
|
||||
|
||||
hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
|
||||
hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
|
||||
GFP_KERNEL);
|
||||
if (!hwif->sg_table) {
|
||||
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_init_table(hwif->sg_table, hwif->sg_max_nents);
|
||||
|
||||
if (init_irq(hwif) == 0)
|
||||
goto done;
|
||||
|
|
|
@ -261,7 +261,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
|
|||
hwif->cursg = sg;
|
||||
}
|
||||
|
||||
page = cursg->page;
|
||||
page = sg_page(cursg);
|
||||
offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
|
||||
|
||||
/* get the current page and offset */
|
||||
|
|
|
@ -276,8 +276,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
|
|||
|
||||
if (iswrite) {
|
||||
if(!put_source_flags(ahwif->tx_chan,
|
||||
(void*)(page_address(sg->page)
|
||||
+ sg->offset),
|
||||
(void*) sg_virt(sg),
|
||||
tc, flags)) {
|
||||
printk(KERN_ERR "%s failed %d\n",
|
||||
__FUNCTION__, __LINE__);
|
||||
|
@ -285,8 +284,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
|
|||
} else
|
||||
{
|
||||
if(!put_dest_flags(ahwif->rx_chan,
|
||||
(void*)(page_address(sg->page)
|
||||
+ sg->offset),
|
||||
(void*) sg_virt(sg),
|
||||
tc, flags)) {
|
||||
printk(KERN_ERR "%s failed %d\n",
|
||||
__FUNCTION__, __LINE__);
|
||||
|
|
|
@ -111,7 +111,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
|
|||
unsigned long va =
|
||||
(unsigned long)dma->kvirt + (i << PAGE_SHIFT);
|
||||
|
||||
dma->sglist[i].page = vmalloc_to_page((void *)va);
|
||||
sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va));
|
||||
dma->sglist[i].length = PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
|
|
@ -1466,7 +1466,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
|
|||
cmd->dma_size = sgpnt[0].length;
|
||||
cmd->dma_type = CMD_DMA_PAGE;
|
||||
cmd->cmd_dma = dma_map_page(hi->host->device.parent,
|
||||
sgpnt[0].page, sgpnt[0].offset,
|
||||
sg_page(&sgpnt[0]), sgpnt[0].offset,
|
||||
cmd->dma_size, cmd->dma_dir);
|
||||
|
||||
orb->data_descriptor_lo = cmd->cmd_dma;
|
||||
|
|
|
@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
|
|||
ib_dma_unmap_sg(dev, chunk->page_list,
|
||||
chunk->nents, DMA_BIDIRECTIONAL);
|
||||
for (i = 0; i < chunk->nents; ++i) {
|
||||
struct page *page = sg_page(&chunk->page_list[i]);
|
||||
|
||||
if (umem->writable && dirty)
|
||||
set_page_dirty_lock(chunk->page_list[i].page);
|
||||
put_page(chunk->page_list[i].page);
|
||||
set_page_dirty_lock(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
kfree(chunk);
|
||||
|
@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
}
|
||||
|
||||
chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
|
||||
sg_init_table(chunk->page_list, chunk->nents);
|
||||
for (i = 0; i < chunk->nents; ++i) {
|
||||
if (vma_list &&
|
||||
!is_vm_hugetlb_page(vma_list[i + off]))
|
||||
umem->hugetlb = 0;
|
||||
chunk->page_list[i].page = page_list[i + off];
|
||||
sg_set_page(&chunk->page_list[i], page_list[i + off]);
|
||||
chunk->page_list[i].offset = 0;
|
||||
chunk->page_list[i].length = PAGE_SIZE;
|
||||
}
|
||||
|
@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
DMA_BIDIRECTIONAL);
|
||||
if (chunk->nmap <= 0) {
|
||||
for (i = 0; i < chunk->nents; ++i)
|
||||
put_page(chunk->page_list[i].page);
|
||||
put_page(sg_page(&chunk->page_list[i]));
|
||||
kfree(chunk);
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -108,7 +108,7 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
|
|||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
addr = (u64) page_address(sg->page);
|
||||
addr = (u64) page_address(sg_page(sg));
|
||||
/* TODO: handle highmem pages */
|
||||
if (!addr) {
|
||||
ret = 0;
|
||||
|
@ -127,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev,
|
|||
|
||||
static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
|
||||
{
|
||||
u64 addr = (u64) page_address(sg->page);
|
||||
u64 addr = (u64) page_address(sg_page(sg));
|
||||
|
||||
if (addr)
|
||||
addr += sg->offset;
|
||||
|
|
|
@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
for (i = 0; i < chunk->nents; i++) {
|
||||
void *vaddr;
|
||||
|
||||
vaddr = page_address(chunk->page_list[i].page);
|
||||
vaddr = page_address(sg_page(&chunk->page_list[i]));
|
||||
if (!vaddr) {
|
||||
ret = ERR_PTR(-EINVAL);
|
||||
goto bail;
|
||||
|
|
|
@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
|
|||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
__free_pages(chunk->mem[i].page,
|
||||
__free_pages(sg_page(&chunk->mem[i]),
|
||||
get_order(chunk->mem[i].length));
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun
|
|||
|
||||
for (i = 0; i < chunk->npages; ++i) {
|
||||
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
|
||||
lowmem_page_address(chunk->mem[i].page),
|
||||
lowmem_page_address(sg_page(&chunk->mem[i])),
|
||||
sg_dma_address(&chunk->mem[i]));
|
||||
}
|
||||
}
|
||||
|
@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
|
|||
|
||||
static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
|
||||
{
|
||||
mem->page = alloc_pages(gfp_mask, order);
|
||||
if (!mem->page)
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(gfp_mask, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
sg_set_page(mem, page);
|
||||
mem->length = PAGE_SIZE << order;
|
||||
mem->offset = 0;
|
||||
return 0;
|
||||
|
@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
|
|||
if (!chunk)
|
||||
goto fail;
|
||||
|
||||
sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
|
||||
chunk->npages = 0;
|
||||
chunk->nsg = 0;
|
||||
list_add_tail(&chunk->list, &icm->chunk_list);
|
||||
|
@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h
|
|||
* so if we found the page, dma_handle has already
|
||||
* been assigned to. */
|
||||
if (chunk->mem[i].length > offset) {
|
||||
page = chunk->mem[i].page;
|
||||
page = sg_page(&chunk->mem[i]);
|
||||
goto out;
|
||||
}
|
||||
offset -= chunk->mem[i].length;
|
||||
|
@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
|
|||
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
||||
struct mthca_user_db_table *db_tab, int index, u64 uaddr)
|
||||
{
|
||||
struct page *pages[1];
|
||||
int ret = 0;
|
||||
u8 status;
|
||||
int i;
|
||||
|
@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||
}
|
||||
|
||||
ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
|
||||
&db_tab->page[i].mem.page, NULL);
|
||||
pages, NULL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
sg_set_page(&db_tab->page[i].mem, pages[0]);
|
||||
db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
|
||||
db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
|
||||
|
||||
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
|
||||
if (ret < 0) {
|
||||
put_page(db_tab->page[i].mem.page);
|
||||
put_page(pages[0]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||
ret = -EINVAL;
|
||||
if (ret) {
|
||||
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
|
||||
put_page(db_tab->page[i].mem.page);
|
||||
put_page(sg_page(&db_tab->page[i].mem));
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||
if (db_tab->page[i].uvirt) {
|
||||
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
|
||||
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
|
||||
put_page(db_tab->page[i].mem.page);
|
||||
put_page(sg_page(&db_tab->page[i].mem));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
|||
|
||||
p = mem;
|
||||
for_each_sg(sgl, sg, data->size, i) {
|
||||
from = kmap_atomic(sg->page, KM_USER0);
|
||||
from = kmap_atomic(sg_page(sg), KM_USER0);
|
||||
memcpy(p,
|
||||
from + sg->offset,
|
||||
sg->length);
|
||||
|
@ -191,7 +191,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
|
|||
|
||||
p = mem;
|
||||
for_each_sg(sgl, sg, sg_size, i) {
|
||||
to = kmap_atomic(sg->page, KM_SOFTIRQ0);
|
||||
to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
|
||||
memcpy(to + sg->offset,
|
||||
p,
|
||||
sg->length);
|
||||
|
@ -300,7 +300,7 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
|
|||
for_each_sg(sgl, sg, data->dma_nents, i) {
|
||||
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
|
||||
"offset: %ld sz: %ld\n", i,
|
||||
(unsigned long)page_to_phys(sg->page),
|
||||
(unsigned long)sg_phys(sg),
|
||||
(unsigned long)sg->offset,
|
||||
(unsigned long)sg->length); */
|
||||
end_addr = ib_sg_dma_address(ibdev, sg) +
|
||||
|
@ -336,7 +336,7 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
|
|||
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
|
||||
"off:0x%x sz:0x%x dma_len:0x%x\n",
|
||||
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
|
||||
sg->page, sg->offset,
|
||||
sg_page(sg), sg->offset,
|
||||
sg->length, ib_sg_dma_len(ibdev, sg));
|
||||
}
|
||||
|
||||
|
|
|
@ -348,16 +348,17 @@ static int crypt_convert(struct crypt_config *cc,
|
|||
ctx->idx_out < ctx->bio_out->bi_vcnt) {
|
||||
struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
|
||||
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
|
||||
struct scatterlist sg_in = {
|
||||
.page = bv_in->bv_page,
|
||||
.offset = bv_in->bv_offset + ctx->offset_in,
|
||||
.length = 1 << SECTOR_SHIFT
|
||||
};
|
||||
struct scatterlist sg_out = {
|
||||
.page = bv_out->bv_page,
|
||||
.offset = bv_out->bv_offset + ctx->offset_out,
|
||||
.length = 1 << SECTOR_SHIFT
|
||||
};
|
||||
struct scatterlist sg_in, sg_out;
|
||||
|
||||
sg_init_table(&sg_in, 1);
|
||||
sg_set_page(&sg_in, bv_in->bv_page);
|
||||
sg_in.offset = bv_in->bv_offset + ctx->offset_in;
|
||||
sg_in.length = 1 << SECTOR_SHIFT;
|
||||
|
||||
sg_init_table(&sg_out, 1);
|
||||
sg_set_page(&sg_out, bv_out->bv_page);
|
||||
sg_out.offset = bv_out->bv_offset + ctx->offset_out;
|
||||
sg_out.length = 1 << SECTOR_SHIFT;
|
||||
|
||||
ctx->offset_in += sg_in.length;
|
||||
if (ctx->offset_in >= bv_in->bv_len) {
|
||||
|
|
|
@ -112,12 +112,13 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
|
|||
sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
|
||||
if (NULL == sglist)
|
||||
return NULL;
|
||||
sg_init_table(sglist, nr_pages);
|
||||
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
|
||||
pg = vmalloc_to_page(virt);
|
||||
if (NULL == pg)
|
||||
goto err;
|
||||
BUG_ON(PageHighMem(pg));
|
||||
sglist[i].page = pg;
|
||||
sg_set_page(&sglist[i], pg);
|
||||
sglist[i].length = PAGE_SIZE;
|
||||
}
|
||||
return sglist;
|
||||
|
|
|
@ -63,10 +63,10 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info
|
|||
memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
|
||||
kunmap_atomic(src, KM_BOUNCE_READ);
|
||||
local_irq_restore(flags);
|
||||
dma->SGlist[map_offset].page = dma->bouncemap[map_offset];
|
||||
sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset]);
|
||||
}
|
||||
else {
|
||||
dma->SGlist[map_offset].page = dma->map[map_offset];
|
||||
sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset]);
|
||||
}
|
||||
offset = 0;
|
||||
map_offset++;
|
||||
|
|
|
@ -60,12 +60,13 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
|
|||
sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
|
||||
if (NULL == sglist)
|
||||
return NULL;
|
||||
sg_init_table(sglist, nr_pages);
|
||||
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
|
||||
pg = vmalloc_to_page(virt);
|
||||
if (NULL == pg)
|
||||
goto err;
|
||||
BUG_ON(PageHighMem(pg));
|
||||
sglist[i].page = pg;
|
||||
sg_set_page(&sglist[i], pg);
|
||||
sglist[i].length = PAGE_SIZE;
|
||||
}
|
||||
return sglist;
|
||||
|
@ -86,13 +87,14 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
|
|||
sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
|
||||
if (NULL == sglist)
|
||||
return NULL;
|
||||
sg_init_table(sglist, nr_pages);
|
||||
|
||||
if (NULL == pages[0])
|
||||
goto nopage;
|
||||
if (PageHighMem(pages[0]))
|
||||
/* DMA to highmem pages might not work */
|
||||
goto highmem;
|
||||
sglist[0].page = pages[0];
|
||||
sg_set_page(&sglist[0], pages[0]);
|
||||
sglist[0].offset = offset;
|
||||
sglist[0].length = PAGE_SIZE - offset;
|
||||
for (i = 1; i < nr_pages; i++) {
|
||||
|
@ -100,7 +102,7 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
|
|||
goto nopage;
|
||||
if (PageHighMem(pages[i]))
|
||||
goto highmem;
|
||||
sglist[i].page = pages[i];
|
||||
sg_set_page(&sglist[i], pages[i]);
|
||||
sglist[i].length = PAGE_SIZE;
|
||||
}
|
||||
return sglist;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/mmc/host.h>
|
||||
|
@ -153,19 +154,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
|||
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
|
||||
blk_queue_max_segment_size(mq->queue, bouncesz);
|
||||
|
||||
mq->sg = kzalloc(sizeof(struct scatterlist),
|
||||
mq->sg = kmalloc(sizeof(struct scatterlist),
|
||||
GFP_KERNEL);
|
||||
if (!mq->sg) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup_queue;
|
||||
}
|
||||
sg_init_table(mq->sg, 1);
|
||||
|
||||
mq->bounce_sg = kzalloc(sizeof(struct scatterlist) *
|
||||
mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
|
||||
bouncesz / 512, GFP_KERNEL);
|
||||
if (!mq->bounce_sg) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup_queue;
|
||||
}
|
||||
sg_init_table(mq->bounce_sg, bouncesz / 512);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -302,12 +305,12 @@ static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
|
|||
BUG_ON(dst_len == 0);
|
||||
|
||||
if (dst_size == 0) {
|
||||
dst_buf = page_address(dst->page) + dst->offset;
|
||||
dst_buf = sg_virt(dst);
|
||||
dst_size = dst->length;
|
||||
}
|
||||
|
||||
if (src_size == 0) {
|
||||
src_buf = page_address(src->page) + src->offset;
|
||||
src_buf = sg_virt(dst);
|
||||
src_size = src->length;
|
||||
}
|
||||
|
||||
|
@ -353,9 +356,7 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
|
|||
return 1;
|
||||
}
|
||||
|
||||
mq->sg[0].page = virt_to_page(mq->bounce_buf);
|
||||
mq->sg[0].offset = offset_in_page(mq->bounce_buf);
|
||||
mq->sg[0].length = 0;
|
||||
sg_init_one(mq->sg, mq->bounce_buf, 0);
|
||||
|
||||
while (sg_len) {
|
||||
mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
|
||||
|
|
|
@ -149,7 +149,7 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
|
|||
|
||||
sg = &data->sg[i];
|
||||
|
||||
sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
|
||||
sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
|
||||
amount = min(size, sg->length);
|
||||
size -= amount;
|
||||
|
||||
|
@ -226,7 +226,7 @@ static void at91_mci_pre_dma_read(struct at91mci_host *host)
|
|||
sg = &data->sg[host->transfer_index++];
|
||||
pr_debug("sg = %p\n", sg);
|
||||
|
||||
sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
|
||||
sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
|
||||
|
||||
pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
|
||||
|
||||
|
@ -283,7 +283,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
|
|||
int index;
|
||||
|
||||
/* Swap the contents of the buffer */
|
||||
buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
|
||||
buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
|
||||
pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
|
||||
|
||||
for (index = 0; index < (sg->length / 4); index++)
|
||||
|
@ -292,7 +292,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
|
|||
kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
|
||||
}
|
||||
|
||||
flush_dcache_page(sg->page);
|
||||
flush_dcache_page(sg_page(sg));
|
||||
}
|
||||
|
||||
/* Is there another transfer to trigger? */
|
||||
|
|
|
@ -340,7 +340,7 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
|
|||
|
||||
/* This is the pointer to the data buffer */
|
||||
sg = &data->sg[host->pio.index];
|
||||
sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
|
||||
sg_ptr = sg_virt(sg) + host->pio.offset;
|
||||
|
||||
/* This is the space left inside the buffer */
|
||||
sg_len = data->sg[host->pio.index].length - host->pio.offset;
|
||||
|
@ -400,7 +400,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
|
|||
|
||||
if (host->pio.index < host->dma.len) {
|
||||
sg = &data->sg[host->pio.index];
|
||||
sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
|
||||
sg_ptr = sg_virt(sg) + host->pio.offset;
|
||||
|
||||
/* This is the space left inside the buffer */
|
||||
sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
|
||||
|
@ -613,14 +613,11 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
|
|||
|
||||
if (host->flags & HOST_F_XMIT){
|
||||
ret = au1xxx_dbdma_put_source_flags(channel,
|
||||
(void *) (page_address(sg->page) +
|
||||
sg->offset),
|
||||
len, flags);
|
||||
(void *) sg_virt(sg), len, flags);
|
||||
}
|
||||
else {
|
||||
ret = au1xxx_dbdma_put_dest_flags(channel,
|
||||
(void *) (page_address(sg->page) +
|
||||
sg->offset),
|
||||
(void *) sg_virt(sg),
|
||||
len, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -262,7 +262,7 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
|
|||
}
|
||||
|
||||
/* Convert back to virtual address */
|
||||
host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
|
||||
host->data_ptr = (u16*)sg_virt(sg);
|
||||
host->data_cnt = 0;
|
||||
|
||||
clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
|
||||
|
|
|
@ -813,7 +813,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
|
|||
&& dir == DMA_FROM_DEVICE)
|
||||
dir = DMA_BIDIRECTIONAL;
|
||||
|
||||
dma_addr = dma_map_page(dma_dev, sg->page, 0,
|
||||
dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
|
||||
PAGE_SIZE, dir);
|
||||
if (direction == DMA_TO_DEVICE)
|
||||
t->tx_dma = dma_addr + sg->offset;
|
||||
|
@ -822,7 +822,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
|
|||
}
|
||||
|
||||
/* allow pio too; we don't allow highmem */
|
||||
kmap_addr = kmap(sg->page);
|
||||
kmap_addr = kmap(sg_page(sg));
|
||||
if (direction == DMA_TO_DEVICE)
|
||||
t->tx_buf = kmap_addr + sg->offset;
|
||||
else
|
||||
|
@ -855,8 +855,8 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
|
|||
|
||||
/* discard mappings */
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
flush_kernel_dcache_page(sg->page);
|
||||
kunmap(sg->page);
|
||||
flush_kernel_dcache_page(sg_page(sg));
|
||||
kunmap(sg_page(sg));
|
||||
if (dma_dev)
|
||||
dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
|
||||
|
||||
|
|
|
@ -24,10 +24,10 @@
|
|||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <asm/mach-types.h>
|
||||
|
||||
#include <asm/arch/board.h>
|
||||
|
@ -383,7 +383,7 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
|
|||
|
||||
sg = host->data->sg + host->sg_idx;
|
||||
host->buffer_bytes_left = sg->length;
|
||||
host->buffer = page_address(sg->page) + sg->offset;
|
||||
host->buffer = sg_virt(sg);
|
||||
if (host->buffer_bytes_left > host->total_bytes_left)
|
||||
host->buffer_bytes_left = host->total_bytes_left;
|
||||
}
|
||||
|
|
|
@ -231,7 +231,7 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
|
|||
|
||||
static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
|
||||
{
|
||||
return page_address(host->cur_sg->page) + host->cur_sg->offset;
|
||||
return sg_virt(host->cur_sg);
|
||||
}
|
||||
|
||||
static inline int sdhci_next_sg(struct sdhci_host* host)
|
||||
|
|
|
@ -192,7 +192,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host)
|
|||
}
|
||||
off = sg[host->sg_pos].offset + host->block_pos;
|
||||
|
||||
pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
|
||||
pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
|
||||
p_off = offset_in_page(off);
|
||||
p_cnt = PAGE_SIZE - p_off;
|
||||
p_cnt = min(p_cnt, cnt);
|
||||
|
@ -241,18 +241,18 @@ static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
|
|||
}
|
||||
off = sg[host->sg_pos].offset + host->block_pos;
|
||||
|
||||
pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
|
||||
pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
|
||||
p_off = offset_in_page(off);
|
||||
p_cnt = PAGE_SIZE - p_off;
|
||||
p_cnt = min(p_cnt, cnt);
|
||||
p_cnt = min(p_cnt, t_size);
|
||||
|
||||
if (r_data->flags & MMC_DATA_WRITE)
|
||||
tifm_sd_copy_page(host->bounce_buf.page,
|
||||
tifm_sd_copy_page(sg_page(&host->bounce_buf),
|
||||
r_data->blksz - t_size,
|
||||
pg, p_off, p_cnt);
|
||||
else if (r_data->flags & MMC_DATA_READ)
|
||||
tifm_sd_copy_page(pg, p_off, host->bounce_buf.page,
|
||||
tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf),
|
||||
r_data->blksz - t_size, p_cnt);
|
||||
|
||||
t_size -= p_cnt;
|
||||
|
|
|
@ -269,7 +269,7 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
|
|||
|
||||
static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
|
||||
{
|
||||
return page_address(host->cur_sg->page) + host->cur_sg->offset;
|
||||
return sg_virt(host->cur_sg);
|
||||
}
|
||||
|
||||
static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
|
||||
|
@ -283,7 +283,7 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
|
|||
len = data->sg_len;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
sgbuf = page_address(sg[i].page) + sg[i].offset;
|
||||
sgbuf = sg_virt(&sg[i]);
|
||||
memcpy(dmabuf, sgbuf, sg[i].length);
|
||||
dmabuf += sg[i].length;
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
|
|||
len = data->sg_len;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
sgbuf = page_address(sg[i].page) + sg[i].offset;
|
||||
sgbuf = sg_virt(&sg[i]);
|
||||
memcpy(sgbuf, dmabuf, sg[i].length);
|
||||
dmabuf += sg[i].length;
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
|
|||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
__free_pages(chunk->mem[i].page,
|
||||
__free_pages(sg_page(&chunk->mem[i]),
|
||||
get_order(chunk->mem[i].length));
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
|
|||
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
|
||||
lowmem_page_address(chunk->mem[i].page),
|
||||
lowmem_page_address(sg_page(&chunk->mem[i])),
|
||||
sg_dma_address(&chunk->mem[i]));
|
||||
}
|
||||
|
||||
|
@ -95,10 +95,13 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
|
|||
|
||||
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
|
||||
{
|
||||
mem->page = alloc_pages(gfp_mask, order);
|
||||
if (!mem->page)
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(gfp_mask, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
sg_set_page(mem, page);
|
||||
mem->length = PAGE_SIZE << order;
|
||||
mem->offset = 0;
|
||||
return 0;
|
||||
|
@ -145,6 +148,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
|||
if (!chunk)
|
||||
goto fail;
|
||||
|
||||
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
|
||||
chunk->npages = 0;
|
||||
chunk->nsg = 0;
|
||||
list_add_tail(&chunk->list, &icm->chunk_list);
|
||||
|
@ -334,7 +338,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
|
|||
* been assigned to.
|
||||
*/
|
||||
if (chunk->mem[i].length > offset) {
|
||||
page = chunk->mem[i].page;
|
||||
page = sg_page(&chunk->mem[i]);
|
||||
goto out;
|
||||
}
|
||||
offset -= chunk->mem[i].length;
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/ppp_defs.h>
|
||||
#include <linux/ppp-comp.h>
|
||||
#include <asm/scatterlist.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include "ppp_mppe.h"
|
||||
|
||||
|
@ -68,9 +68,7 @@ MODULE_VERSION("1.0.2");
|
|||
static unsigned int
|
||||
setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
|
||||
{
|
||||
sg[0].page = virt_to_page(address);
|
||||
sg[0].offset = offset_in_page(address);
|
||||
sg[0].length = length;
|
||||
sg_init_one(sg, address, length);
|
||||
return length;
|
||||
}
|
||||
|
||||
|
|
|
@ -1840,7 +1840,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
|||
(scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
|
||||
if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
|
||||
struct scatterlist *sg = scsi_sglist(srb);
|
||||
char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
char *buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
}
|
||||
|
@ -1919,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
|
|||
char *buf;
|
||||
unsigned long flags = 0;
|
||||
local_irq_save(flags);
|
||||
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -1469,7 +1469,7 @@ static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
|
|||
struct scatterlist *sg = scsi_sglist(cmd);
|
||||
|
||||
local_irq_save(flags);
|
||||
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
transfer_len = min(sg->length, len);
|
||||
|
||||
memcpy(buf, data, transfer_len);
|
||||
|
|
|
@ -298,8 +298,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd * cmd)
|
|||
if (cmd->use_sg) {
|
||||
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
|
||||
cmd->SCp.buffers_residual = cmd->use_sg - 1;
|
||||
cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+
|
||||
cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
} else {
|
||||
cmd->SCp.buffer = NULL;
|
||||
|
@ -2143,8 +2142,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
|
|||
++cmd->SCp.buffer;
|
||||
--cmd->SCp.buffers_residual;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+
|
||||
cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -927,7 +927,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
|
|||
esp->dma_mmu_get_scsi_sgl(esp, sp);
|
||||
else
|
||||
sp->SCp.ptr =
|
||||
(char *) virt_to_phys((page_address(sp->SCp.buffer->page) + sp->SCp.buffer->offset));
|
||||
(char *) virt_to_phys(sg_virt(sp->SCp.buffer));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1748,7 +1748,7 @@ static inline void advance_sg(struct NCR_ESP *esp, Scsi_Cmnd *sp)
|
|||
if (esp->dma_advance_sg)
|
||||
esp->dma_advance_sg (sp);
|
||||
else
|
||||
sp->SCp.ptr = (char *) virt_to_phys((page_address(sp->SCp.buffer->page) + sp->SCp.buffer->offset));
|
||||
sp->SCp.ptr = (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -875,8 +875,7 @@ static void NCR53c406a_intr(void *dev_id)
|
|||
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
|
||||
#if USE_PIO
|
||||
scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
|
||||
NCR53c406a_pio_write(page_address(sg->page) + sg->offset,
|
||||
sg->length);
|
||||
NCR53c406a_pio_write(sg_virt(sg), sg->length);
|
||||
}
|
||||
REG0;
|
||||
#endif /* USE_PIO */
|
||||
|
@ -897,8 +896,7 @@ static void NCR53c406a_intr(void *dev_id)
|
|||
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
|
||||
#if USE_PIO
|
||||
scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
|
||||
NCR53c406a_pio_read(page_address(sg->page) + sg->offset,
|
||||
sg->length);
|
||||
NCR53c406a_pio_read(sg_virt(sg), sg->length);
|
||||
}
|
||||
REG0;
|
||||
#endif /* USE_PIO */
|
||||
|
|
|
@ -356,7 +356,7 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
|
|||
int transfer_len;
|
||||
struct scatterlist *sg = scsi_sglist(scsicmd);
|
||||
|
||||
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
transfer_len = min(sg->length, len + offset);
|
||||
|
||||
transfer_len -= offset;
|
||||
|
|
|
@ -613,7 +613,7 @@ struct aha152x_scdata {
|
|||
#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
|
||||
#define SCSEM(SCpnt) SCDATA(SCpnt)->done
|
||||
|
||||
#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
|
||||
#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
|
||||
|
||||
/* state handling */
|
||||
static void seldi_run(struct Scsi_Host *shpnt);
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
#include "aha1542.h"
|
||||
|
||||
#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
|
||||
#define SCSI_SG_PA(sgent) (isa_page_to_bus((sgent)->page) + (sgent)->offset)
|
||||
#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
|
||||
|
||||
static void BAD_DMA(void *address, unsigned int length)
|
||||
{
|
||||
|
@ -66,8 +66,7 @@ static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
|
|||
int badseg)
|
||||
{
|
||||
printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
|
||||
badseg, nseg,
|
||||
page_address(sgp->page) + sgp->offset,
|
||||
badseg, nseg, sg_virt(sgp),
|
||||
(unsigned long long)SCSI_SG_PA(sgp),
|
||||
sgp->length);
|
||||
|
||||
|
@ -712,8 +711,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
|||
printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
|
||||
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
|
||||
printk(KERN_CRIT "%d: %p %d\n", i,
|
||||
(page_address(sg->page) +
|
||||
sg->offset), sg->length);
|
||||
sg_virt(sg), sg->length);
|
||||
};
|
||||
printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
|
||||
ptr = (unsigned char *) &cptr[i];
|
||||
|
|
|
@ -1343,7 +1343,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
|
|||
/* 4 bytes: Areca io control code */
|
||||
|
||||
sg = scsi_sglist(cmd);
|
||||
buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
if (scsi_sg_count(cmd) > 1) {
|
||||
retvalue = ARCMSR_MESSAGE_FAIL;
|
||||
goto message_out;
|
||||
|
@ -1593,7 +1593,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
|
|||
strncpy(&inqdata[32], "R001", 4); /* Product Revision */
|
||||
|
||||
sg = scsi_sglist(cmd);
|
||||
buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
|
||||
memcpy(buffer, inqdata, sizeof(inqdata));
|
||||
sg = scsi_sglist(cmd);
|
||||
|
|
|
@ -515,8 +515,7 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
|
|||
if (cmd->use_sg) {
|
||||
cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer;
|
||||
cmd->SCp.buffers_residual = cmd->use_sg - 1;
|
||||
cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page) +
|
||||
cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
/* ++roman: Try to merge some scatter-buffers if they are at
|
||||
* contiguous physical addresses.
|
||||
|
@ -2054,8 +2053,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
++cmd->SCp.buffer;
|
||||
--cmd->SCp.buffers_residual;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
|
||||
cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
/* ++roman: Try to merge some scatter-buffers if
|
||||
* they are at contiguous physical addresses.
|
||||
*/
|
||||
|
|
|
@ -172,7 +172,7 @@ static void IncStat(struct scsi_pointer *SCp, unsigned int Increment)
|
|||
SCp->Status = 0;
|
||||
else {
|
||||
SCp->buffer++;
|
||||
SCp->ptr = page_address(SCp->buffer->page) + SCp->buffer->offset;
|
||||
SCp->ptr = sg_virt(SCp->buffer);
|
||||
SCp->this_residual = SCp->buffer->length;
|
||||
}
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
|
|||
} else {
|
||||
cmd->SCp.buffer = cmd->request_buffer;
|
||||
cmd->SCp.buffers_residual = cmd->use_sg;
|
||||
cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
}
|
||||
cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes
|
||||
|
|
|
@ -973,7 +973,7 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
|
|||
if (current_SC->SCp.buffers_residual) {
|
||||
--current_SC->SCp.buffers_residual;
|
||||
++current_SC->SCp.buffer;
|
||||
current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
|
||||
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
|
||||
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
|
||||
} else
|
||||
break;
|
||||
|
@ -1006,7 +1006,7 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
|
|||
if (!current_SC->SCp.this_residual && current_SC->SCp.buffers_residual) {
|
||||
--current_SC->SCp.buffers_residual;
|
||||
++current_SC->SCp.buffer;
|
||||
current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
|
||||
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
|
||||
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
|
||||
}
|
||||
}
|
||||
|
@ -1109,7 +1109,7 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
|||
|
||||
if (current_SC->use_sg) {
|
||||
current_SC->SCp.buffer = (struct scatterlist *) current_SC->request_buffer;
|
||||
current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
|
||||
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
|
||||
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
|
||||
current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
|
||||
} else {
|
||||
|
|
|
@ -1321,7 +1321,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
|
|||
if (current_SC->SCp.buffers_residual) {
|
||||
--current_SC->SCp.buffers_residual;
|
||||
++current_SC->SCp.buffer;
|
||||
current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
|
||||
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
|
||||
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
|
||||
} else
|
||||
break;
|
||||
|
@ -1354,7 +1354,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
|
|||
&& current_SC->SCp.buffers_residual) {
|
||||
--current_SC->SCp.buffers_residual;
|
||||
++current_SC->SCp.buffer;
|
||||
current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
|
||||
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
|
||||
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
|
||||
}
|
||||
}
|
||||
|
@ -1439,8 +1439,7 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
|
|||
|
||||
if (scsi_sg_count(current_SC)) {
|
||||
current_SC->SCp.buffer = scsi_sglist(current_SC);
|
||||
current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page)
|
||||
+ current_SC->SCp.buffer->offset;
|
||||
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
|
||||
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
|
||||
current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
|
||||
} else {
|
||||
|
|
|
@ -2374,18 +2374,18 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
|
|||
if (cpsum+cpnow > cpcount)
|
||||
cpnow = cpcount - cpsum;
|
||||
cpsum += cpnow;
|
||||
if (!sl->page) {
|
||||
if (!sg_page(sl)) {
|
||||
printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
|
||||
ha->hanum);
|
||||
return;
|
||||
}
|
||||
local_irq_save(flags);
|
||||
address = kmap_atomic(sl->page, KM_BIO_SRC_IRQ) + sl->offset;
|
||||
address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
|
||||
if (to_buffer)
|
||||
memcpy(buffer, address, cpnow);
|
||||
else
|
||||
memcpy(address, buffer, cpnow);
|
||||
flush_dcache_page(sl->page);
|
||||
flush_dcache_page(sg_page(sl));
|
||||
kunmap_atomic(address, KM_BIO_SRC_IRQ);
|
||||
local_irq_restore(flags);
|
||||
if (cpsum == cpcount)
|
||||
|
|
|
@ -1828,7 +1828,7 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
|
|||
BUG_ON(scsi_sg_count(cmd) > 16);
|
||||
|
||||
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
|
||||
ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg->page) + sg->offset);
|
||||
ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg_page(sg)) + sg->offset);
|
||||
ld(shpnt)[ldn].sge[i].byte_length = sg->length;
|
||||
}
|
||||
scb->enable |= IM_POINTER_TO_LIST;
|
||||
|
|
|
@ -175,18 +175,18 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
|
|||
|
||||
while (bcount) {
|
||||
count = min(pc->sg->length - pc->b_count, bcount);
|
||||
if (PageHighMem(pc->sg->page)) {
|
||||
if (PageHighMem(sg_page(pc->sg))) {
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
buf = kmap_atomic(pc->sg->page, KM_IRQ0) +
|
||||
buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
|
||||
pc->sg->offset;
|
||||
drive->hwif->atapi_input_bytes(drive,
|
||||
buf + pc->b_count, count);
|
||||
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
buf = page_address(pc->sg->page) + pc->sg->offset;
|
||||
buf = sg_virt(pc->sg);
|
||||
drive->hwif->atapi_input_bytes(drive,
|
||||
buf + pc->b_count, count);
|
||||
}
|
||||
|
@ -212,18 +212,18 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
|
|||
|
||||
while (bcount) {
|
||||
count = min(pc->sg->length - pc->b_count, bcount);
|
||||
if (PageHighMem(pc->sg->page)) {
|
||||
if (PageHighMem(sg_page(pc->sg))) {
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
buf = kmap_atomic(pc->sg->page, KM_IRQ0) +
|
||||
buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
|
||||
pc->sg->offset;
|
||||
drive->hwif->atapi_output_bytes(drive,
|
||||
buf + pc->b_count, count);
|
||||
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
buf = page_address(pc->sg->page) + pc->sg->offset;
|
||||
buf = sg_virt(pc->sg);
|
||||
drive->hwif->atapi_output_bytes(drive,
|
||||
buf + pc->b_count, count);
|
||||
}
|
||||
|
|
|
@ -705,9 +705,7 @@ static int imm_completion(struct scsi_cmnd *cmd)
|
|||
cmd->SCp.buffer++;
|
||||
cmd->SCp.this_residual =
|
||||
cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr =
|
||||
page_address(cmd->SCp.buffer->page) +
|
||||
cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
|
||||
/*
|
||||
* Make sure that we transfer even number of bytes
|
||||
|
@ -844,9 +842,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
|
|||
cmd->SCp.buffer =
|
||||
(struct scatterlist *) cmd->request_buffer;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr =
|
||||
page_address(cmd->SCp.buffer->page) +
|
||||
cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
} else {
|
||||
/* else fill the only available buffer */
|
||||
cmd->SCp.buffer = NULL;
|
||||
|
|
|
@ -372,7 +372,7 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
|
|||
if (cmd->use_sg) {
|
||||
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
|
||||
cmd->SCp.buffers_residual = cmd->use_sg - 1;
|
||||
cmd->SCp.ptr = (char *) page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
} else {
|
||||
cmd->SCp.buffer = NULL;
|
||||
|
@ -764,7 +764,7 @@ static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
|
|||
++cmd->SCp.buffer;
|
||||
--cmd->SCp.buffers_residual;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
}
|
||||
|
||||
/* Set up hardware registers */
|
||||
|
|
|
@ -2872,6 +2872,7 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
|
|||
}
|
||||
|
||||
scatterlist = sglist->scatterlist;
|
||||
sg_init_table(scatterlist, num_elem);
|
||||
|
||||
sglist->order = order;
|
||||
sglist->num_sg = num_elem;
|
||||
|
@ -2884,12 +2885,12 @@ static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
|
|||
|
||||
/* Free up what we already allocated */
|
||||
for (j = i - 1; j >= 0; j--)
|
||||
__free_pages(scatterlist[j].page, order);
|
||||
__free_pages(sg_page(&scatterlist[j]), order);
|
||||
kfree(sglist);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
scatterlist[i].page = page;
|
||||
sg_set_page(&scatterlist[i], page);
|
||||
}
|
||||
|
||||
return sglist;
|
||||
|
@ -2910,7 +2911,7 @@ static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < sglist->num_sg; i++)
|
||||
__free_pages(sglist->scatterlist[i].page, sglist->order);
|
||||
__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
|
||||
|
||||
kfree(sglist);
|
||||
}
|
||||
|
@ -2940,9 +2941,11 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
|
|||
scatterlist = sglist->scatterlist;
|
||||
|
||||
for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
|
||||
kaddr = kmap(scatterlist[i].page);
|
||||
struct page *page = sg_page(&scatterlist[i]);
|
||||
|
||||
kaddr = kmap(page);
|
||||
memcpy(kaddr, buffer, bsize_elem);
|
||||
kunmap(scatterlist[i].page);
|
||||
kunmap(page);
|
||||
|
||||
scatterlist[i].length = bsize_elem;
|
||||
|
||||
|
@ -2953,9 +2956,11 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
|
|||
}
|
||||
|
||||
if (len % bsize_elem) {
|
||||
kaddr = kmap(scatterlist[i].page);
|
||||
struct page *page = sg_page(&scatterlist[i]);
|
||||
|
||||
kaddr = kmap(page);
|
||||
memcpy(kaddr, buffer, len % bsize_elem);
|
||||
kunmap(scatterlist[i].page);
|
||||
kunmap(page);
|
||||
|
||||
scatterlist[i].length = len % bsize_elem;
|
||||
}
|
||||
|
|
|
@ -1515,7 +1515,7 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
|
|||
/* kmap_atomic() ensures addressability of the user buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
|
||||
buffer[2] == 'P' && buffer[3] == 'P') {
|
||||
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
|
||||
|
@ -3523,7 +3523,7 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
|
|||
/* kmap_atomic() ensures addressability of the data buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
|
||||
buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
|
||||
memcpy(buffer, &cdata[xfer_cnt], min_cnt);
|
||||
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
|
@ -3556,7 +3556,7 @@ ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
|
|||
/* kmap_atomic() ensures addressability of the data buffer.*/
|
||||
/* local_irq_save() protects the KM_IRQ0 address slot. */
|
||||
local_irq_save(flags);
|
||||
buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
|
||||
buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
|
||||
memcpy(&cdata[xfer_cnt], buffer, min_cnt);
|
||||
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
|
||||
local_irq_restore(flags);
|
||||
|
|
|
@ -70,9 +70,7 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
|
|||
static inline void
|
||||
iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
|
||||
{
|
||||
ibuf->sg.page = virt_to_page(vbuf);
|
||||
ibuf->sg.offset = offset_in_page(vbuf);
|
||||
ibuf->sg.length = size;
|
||||
sg_init_one(&ibuf->sg, vbuf, size);
|
||||
ibuf->sent = 0;
|
||||
ibuf->use_sendmsg = 1;
|
||||
}
|
||||
|
@ -80,13 +78,14 @@ iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
|
|||
static inline void
|
||||
iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
|
||||
{
|
||||
ibuf->sg.page = sg->page;
|
||||
sg_init_table(&ibuf->sg, 1);
|
||||
sg_set_page(&ibuf->sg, sg_page(sg));
|
||||
ibuf->sg.offset = sg->offset;
|
||||
ibuf->sg.length = sg->length;
|
||||
/*
|
||||
* Fastpath: sg element fits into single page
|
||||
*/
|
||||
if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
|
||||
if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg_page(sg)))
|
||||
ibuf->use_sendmsg = 0;
|
||||
else
|
||||
ibuf->use_sendmsg = 1;
|
||||
|
@ -716,7 +715,7 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
|
|||
for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
|
||||
char *dest;
|
||||
|
||||
dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
|
||||
dest = kmap_atomic(sg_page(&sg[i]), KM_SOFTIRQ0);
|
||||
rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
|
||||
sg[i].length, offset);
|
||||
kunmap_atomic(dest, KM_SOFTIRQ0);
|
||||
|
@ -1103,9 +1102,9 @@ iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
|
|||
* slab case.
|
||||
*/
|
||||
if (buf->use_sendmsg)
|
||||
res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
|
||||
res = sock_no_sendpage(sk, sg_page(&buf->sg), offset, size, flags);
|
||||
else
|
||||
res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
|
||||
res = tcp_conn->sendpage(sk, sg_page(&buf->sg), offset, size, flags);
|
||||
|
||||
if (res >= 0) {
|
||||
conn->txdata_octets += res;
|
||||
|
|
|
@ -658,7 +658,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
|
|||
struct scatterlist *sg;
|
||||
|
||||
sg = scsi_sglist(cmd);
|
||||
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
|
||||
|
||||
memset(buf, 0, cmd->cmnd[4]);
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
|
@ -1542,10 +1542,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
|
|||
if( cmd->cmnd[0] == INQUIRY && !islogical ) {
|
||||
|
||||
sgl = scsi_sglist(cmd);
|
||||
if( sgl->page ) {
|
||||
c = *(unsigned char *)
|
||||
page_address((&sgl[0])->page) +
|
||||
(&sgl[0])->offset;
|
||||
if( sg_page(sgl) ) {
|
||||
c = *(unsigned char *) sg_virt(&sgl[0]);
|
||||
} else {
|
||||
printk(KERN_WARNING
|
||||
"megaraid: invalid sg.\n");
|
||||
|
|
|
@ -1584,10 +1584,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
|
|||
caddr_t vaddr;
|
||||
|
||||
sgl = scsi_sglist(scp);
|
||||
if (sgl->page) {
|
||||
vaddr = (caddr_t)
|
||||
(page_address((&sgl[0])->page)
|
||||
+ (&sgl[0])->offset);
|
||||
if (sg_page(sgl)) {
|
||||
vaddr = (caddr_t) sg_virt(&sgl[0]);
|
||||
|
||||
memset(vaddr, 0, scp->cmnd[4]);
|
||||
}
|
||||
|
@ -2328,10 +2326,8 @@ megaraid_mbox_dpc(unsigned long devp)
|
|||
&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
|
||||
|
||||
sgl = scsi_sglist(scp);
|
||||
if (sgl->page) {
|
||||
c = *(unsigned char *)
|
||||
(page_address((&sgl[0])->page) +
|
||||
(&sgl[0])->offset);
|
||||
if (sg_page(sgl)) {
|
||||
c = *(unsigned char *) sg_virt(&sgl[0]);
|
||||
} else {
|
||||
con_log(CL_ANN, (KERN_WARNING
|
||||
"megaraid mailbox: invalid sg:%d\n",
|
||||
|
|
|
@ -550,8 +550,7 @@ void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
|
|||
|
||||
void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
|
||||
{
|
||||
sp->SCp.ptr = page_address(sp->SCp.buffer->page)+
|
||||
sp->SCp.buffer->offset;
|
||||
sp->SCp.ptr = sg_virt(sp->SCp.buffer);
|
||||
}
|
||||
|
||||
void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
|
||||
|
@ -564,8 +563,7 @@ void dma_mmu_release_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
|
|||
|
||||
void dma_advance_sg(Scsi_Cmnd *sp)
|
||||
{
|
||||
sp->SCp.ptr = page_address(sp->SCp.buffer->page)+
|
||||
sp->SCp.buffer->offset;
|
||||
sp->SCp.ptr = sg_virt(sp->SCp.buffer);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -542,7 +542,7 @@ static int osst_verify_frame(struct osst_tape * STp, int frame_seq_number, int q
|
|||
if (STp->raw) {
|
||||
if (STp->buffer->syscall_result) {
|
||||
for (i=0; i < STp->buffer->sg_segs; i++)
|
||||
memset(page_address(STp->buffer->sg[i].page),
|
||||
memset(page_address(sg_page(&STp->buffer->sg[i])),
|
||||
0, STp->buffer->sg[i].length);
|
||||
strcpy(STp->buffer->b_data, "READ ERROR ON FRAME");
|
||||
} else
|
||||
|
@ -4437,7 +4437,7 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp)
|
|||
for (i = 0, b_size = 0;
|
||||
(i < STp->buffer->sg_segs) && ((b_size + STp->buffer->sg[i].length) <= OS_DATA_SIZE);
|
||||
b_size += STp->buffer->sg[i++].length);
|
||||
STp->buffer->aux = (os_aux_t *) (page_address(STp->buffer->sg[i].page) + OS_DATA_SIZE - b_size);
|
||||
STp->buffer->aux = (os_aux_t *) (page_address(sg_page(&STp->buffer->sg[i])) + OS_DATA_SIZE - b_size);
|
||||
#if DEBUG
|
||||
printk(OSST_DEB_MSG "%s:D: b_data points to %p in segment 0 at %p\n", name,
|
||||
STp->buffer->b_data, page_address(STp->buffer->sg[0].page));
|
||||
|
@ -5252,25 +5252,26 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
|
|||
/* Try to allocate the first segment up to OS_DATA_SIZE and the others
|
||||
big enough to reach the goal (code assumes no segments in place) */
|
||||
for (b_size = OS_DATA_SIZE, order = OSST_FIRST_ORDER; b_size >= PAGE_SIZE; order--, b_size /= 2) {
|
||||
STbuffer->sg[0].page = alloc_pages(priority, order);
|
||||
struct page *page = alloc_pages(priority, order);
|
||||
|
||||
STbuffer->sg[0].offset = 0;
|
||||
if (STbuffer->sg[0].page != NULL) {
|
||||
if (page != NULL) {
|
||||
sg_set_page(&STbuffer->sg[0], page);
|
||||
STbuffer->sg[0].length = b_size;
|
||||
STbuffer->b_data = page_address(STbuffer->sg[0].page);
|
||||
STbuffer->b_data = page_address(page);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (STbuffer->sg[0].page == NULL) {
|
||||
if (sg_page(&STbuffer->sg[0]) == NULL) {
|
||||
printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n");
|
||||
return 0;
|
||||
}
|
||||
/* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */
|
||||
for (segs=STbuffer->sg_segs=1, got=b_size;
|
||||
segs < max_segs && got < OS_FRAME_SIZE; ) {
|
||||
STbuffer->sg[segs].page =
|
||||
alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
|
||||
struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
|
||||
STbuffer->sg[segs].offset = 0;
|
||||
if (STbuffer->sg[segs].page == NULL) {
|
||||
if (page == NULL) {
|
||||
if (OS_FRAME_SIZE - got <= (max_segs - segs) * b_size / 2 && order) {
|
||||
b_size /= 2; /* Large enough for the rest of the buffers */
|
||||
order--;
|
||||
|
@ -5284,6 +5285,7 @@ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma)
|
|||
normalize_buffer(STbuffer);
|
||||
return 0;
|
||||
}
|
||||
sg_set_page(&STbuffer->sg[segs], page);
|
||||
STbuffer->sg[segs].length = (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size;
|
||||
got += STbuffer->sg[segs].length;
|
||||
STbuffer->buffer_size = got;
|
||||
|
@ -5316,7 +5318,7 @@ static void normalize_buffer(struct osst_buffer *STbuffer)
|
|||
b_size < STbuffer->sg[i].length;
|
||||
b_size *= 2, order++);
|
||||
|
||||
__free_pages(STbuffer->sg[i].page, order);
|
||||
__free_pages(sg_page(&STbuffer->sg[i]), order);
|
||||
STbuffer->buffer_size -= STbuffer->sg[i].length;
|
||||
}
|
||||
#if DEBUG
|
||||
|
@ -5344,7 +5346,7 @@ static int append_to_buffer(const char __user *ubp, struct osst_buffer *st_bp, i
|
|||
for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
|
||||
cnt = st_bp->sg[i].length - offset < do_count ?
|
||||
st_bp->sg[i].length - offset : do_count;
|
||||
res = copy_from_user(page_address(st_bp->sg[i].page) + offset, ubp, cnt);
|
||||
res = copy_from_user(page_address(sg_page(&st_bp->sg[i])) + offset, ubp, cnt);
|
||||
if (res)
|
||||
return (-EFAULT);
|
||||
do_count -= cnt;
|
||||
|
@ -5377,7 +5379,7 @@ static int from_buffer(struct osst_buffer *st_bp, char __user *ubp, int do_count
|
|||
for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
|
||||
cnt = st_bp->sg[i].length - offset < do_count ?
|
||||
st_bp->sg[i].length - offset : do_count;
|
||||
res = copy_to_user(ubp, page_address(st_bp->sg[i].page) + offset, cnt);
|
||||
res = copy_to_user(ubp, page_address(sg_page(&st_bp->sg[i])) + offset, cnt);
|
||||
if (res)
|
||||
return (-EFAULT);
|
||||
do_count -= cnt;
|
||||
|
@ -5410,7 +5412,7 @@ static int osst_zero_buffer_tail(struct osst_buffer *st_bp)
|
|||
i < st_bp->sg_segs && do_count > 0; i++) {
|
||||
cnt = st_bp->sg[i].length - offset < do_count ?
|
||||
st_bp->sg[i].length - offset : do_count ;
|
||||
memset(page_address(st_bp->sg[i].page) + offset, 0, cnt);
|
||||
memset(page_address(sg_page(&st_bp->sg[i])) + offset, 0, cnt);
|
||||
do_count -= cnt;
|
||||
offset = 0;
|
||||
}
|
||||
|
@ -5430,7 +5432,7 @@ static int osst_copy_to_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
|
|||
for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
|
||||
cnt = st_bp->sg[i].length < do_count ?
|
||||
st_bp->sg[i].length : do_count ;
|
||||
memcpy(page_address(st_bp->sg[i].page), ptr, cnt);
|
||||
memcpy(page_address(sg_page(&st_bp->sg[i])), ptr, cnt);
|
||||
do_count -= cnt;
|
||||
ptr += cnt;
|
||||
}
|
||||
|
@ -5451,7 +5453,7 @@ static int osst_copy_from_buffer(struct osst_buffer *st_bp, unsigned char *ptr)
|
|||
for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
|
||||
cnt = st_bp->sg[i].length < do_count ?
|
||||
st_bp->sg[i].length : do_count ;
|
||||
memcpy(ptr, page_address(st_bp->sg[i].page), cnt);
|
||||
memcpy(ptr, page_address(sg_page(&st_bp->sg[i])), cnt);
|
||||
do_count -= cnt;
|
||||
ptr += cnt;
|
||||
}
|
||||
|
|
|
@ -393,7 +393,7 @@ enum _burst_mode {
|
|||
#define MSG_EXT_SDTR 0x01
|
||||
|
||||
/* scatter-gather table */
|
||||
# define BUFFER_ADDR ((char *)((unsigned int)(SCpnt->SCp.buffer->page) + SCpnt->SCp.buffer->offset))
|
||||
# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
|
||||
|
||||
#endif /*__nsp_cs__*/
|
||||
/* end */
|
||||
|
|
|
@ -443,8 +443,7 @@ SYM53C500_intr(int irq, void *dev_id)
|
|||
|
||||
scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
|
||||
SYM53C500_pio_write(fast_pio, port_base,
|
||||
page_address(sg->page) + sg->offset,
|
||||
sg->length);
|
||||
sg_virt(sg), sg->length);
|
||||
}
|
||||
REG0(port_base);
|
||||
}
|
||||
|
@ -463,8 +462,7 @@ SYM53C500_intr(int irq, void *dev_id)
|
|||
|
||||
scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
|
||||
SYM53C500_pio_read(fast_pio, port_base,
|
||||
page_address(sg->page) + sg->offset,
|
||||
sg->length);
|
||||
sg_virt(sg), sg->length);
|
||||
}
|
||||
REG0(port_base);
|
||||
}
|
||||
|
|
|
@ -608,9 +608,7 @@ static int ppa_completion(struct scsi_cmnd *cmd)
|
|||
cmd->SCp.buffer++;
|
||||
cmd->SCp.this_residual =
|
||||
cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr =
|
||||
page_address(cmd->SCp.buffer->page) +
|
||||
cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
}
|
||||
}
|
||||
/* Now check to see if the drive is ready to comunicate */
|
||||
|
@ -756,8 +754,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
|
|||
/* if many buffers are available, start filling the first */
|
||||
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
|
||||
cmd->SCp.buffer->offset;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
} else {
|
||||
/* else fill the only available buffer */
|
||||
cmd->SCp.buffer = NULL;
|
||||
|
|
|
@ -111,14 +111,14 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
|
|||
req_len = act_len = 0;
|
||||
scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
|
||||
if (active) {
|
||||
kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
|
||||
kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
|
||||
len = sgpnt->length;
|
||||
if ((req_len + len) > buflen) {
|
||||
active = 0;
|
||||
len = buflen - req_len;
|
||||
}
|
||||
memcpy(kaddr + sgpnt->offset, buf + req_len, len);
|
||||
flush_kernel_dcache_page(sgpnt->page);
|
||||
flush_kernel_dcache_page(sg_page(sgpnt));
|
||||
kunmap_atomic(kaddr, KM_IRQ0);
|
||||
act_len += len;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf)
|
|||
|
||||
req_len = fin = 0;
|
||||
scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
|
||||
kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
|
||||
kaddr = kmap_atomic(sg_page(sgpnt->page), KM_IRQ0);
|
||||
len = sgpnt->length;
|
||||
if ((req_len + len) > buflen) {
|
||||
len = buflen - req_len;
|
||||
|
|
|
@ -317,7 +317,7 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
|
|||
return ((priv->qabort == 1 ?
|
||||
DID_ABORT : DID_RESET) << 16);
|
||||
}
|
||||
buf = page_address(sg->page) + sg->offset;
|
||||
buf = sg_virt(sg);
|
||||
if (ql_pdma(priv, phase, buf, sg->length))
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -625,7 +625,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
|
|||
scsi_for_each_sg(scp, sg, scp->use_sg, k) {
|
||||
if (active) {
|
||||
kaddr = (unsigned char *)
|
||||
kmap_atomic(sg->page, KM_USER0);
|
||||
kmap_atomic(sg_page(sg), KM_USER0);
|
||||
if (NULL == kaddr)
|
||||
return (DID_ERROR << 16);
|
||||
kaddr_off = (unsigned char *)kaddr + sg->offset;
|
||||
|
@ -672,7 +672,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
|
|||
sg = scsi_sglist(scp);
|
||||
req_len = fin = 0;
|
||||
for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
|
||||
kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0);
|
||||
kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
|
||||
if (NULL == kaddr)
|
||||
return -1;
|
||||
kaddr_off = (unsigned char *)kaddr + sg->offset;
|
||||
|
|
|
@ -295,7 +295,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
|
|||
int i, err, nr_vecs = 0;
|
||||
|
||||
for_each_sg(sgl, sg, nsegs, i) {
|
||||
page = sg->page;
|
||||
page = sg_page(sg);
|
||||
off = sg->offset;
|
||||
len = sg->length;
|
||||
data_len += len;
|
||||
|
@ -764,7 +764,7 @@ struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
|||
if (unlikely(!sgl))
|
||||
goto enomem;
|
||||
|
||||
memset(sgl, 0, sizeof(*sgl) * sgp->size);
|
||||
sg_init_table(sgl, sgp->size);
|
||||
|
||||
/*
|
||||
* first loop through, set initial index and return value
|
||||
|
@ -780,6 +780,13 @@ struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
|||
if (prev)
|
||||
sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
|
||||
|
||||
/*
|
||||
* if we have nothing left, mark the last segment as
|
||||
* end-of-list
|
||||
*/
|
||||
if (!left)
|
||||
sg_mark_end(sgl, this);
|
||||
|
||||
/*
|
||||
* don't allow subsequent mempool allocs to sleep, it would
|
||||
* violate the mempool principle.
|
||||
|
@ -2353,7 +2360,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
|
|||
*offset = *offset - len_complete + sg->offset;
|
||||
|
||||
/* Assumption: contiguous pages can be accessed as "page + i" */
|
||||
page = nth_page(sg->page, (*offset >> PAGE_SHIFT));
|
||||
page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
|
||||
*offset &= ~PAGE_MASK;
|
||||
|
||||
/* Bytes in this sg-entry from *offset to the end of the page */
|
||||
|
|
|
@ -999,14 +999,14 @@ connect_loop:
|
|||
for (i = 0; i < nobuffs; ++i)
|
||||
printk("scsi%d : buffer %d address = %p length = %d\n",
|
||||
hostno, i,
|
||||
page_address(buffer[i].page) + buffer[i].offset,
|
||||
sg_virt(&buffer[i]),
|
||||
buffer[i].length);
|
||||
}
|
||||
#endif
|
||||
|
||||
buffer = (struct scatterlist *) SCint->request_buffer;
|
||||
len = buffer->length;
|
||||
data = page_address(buffer->page) + buffer->offset;
|
||||
data = sg_virt(buffer);
|
||||
} else {
|
||||
DPRINTK (DEBUG_SG, "scsi%d : scatter gather not requested.\n", hostno);
|
||||
buffer = NULL;
|
||||
|
@ -1239,7 +1239,7 @@ connect_loop:
|
|||
--nobuffs;
|
||||
++buffer;
|
||||
len = buffer->length;
|
||||
data = page_address(buffer->page) + buffer->offset;
|
||||
data = sg_virt(buffer);
|
||||
DPRINTK (DEBUG_SG,
|
||||
"scsi%d : next scatter-gather buffer len = %d address = %08x\n",
|
||||
hostno, len, data);
|
||||
|
@ -1396,7 +1396,7 @@ connect_loop:
|
|||
--nobuffs;
|
||||
++buffer;
|
||||
len = buffer->length;
|
||||
data = page_address(buffer->page) + buffer->offset;
|
||||
data = sg_virt(buffer);
|
||||
DPRINTK (DEBUG_SG, "scsi%d : next scatter-gather buffer len = %d address = %08x\n", hostno, len, data);
|
||||
}
|
||||
break;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue