Fixup rq_for_each_segment() indentation
Remove one level of nesting where appropriate. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
bc1c56fde6
commit
6c92e699b5
|
@ -1364,28 +1364,28 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
*/
|
||||
bvprv = NULL;
|
||||
rq_for_each_segment(bvec, rq, iter) {
|
||||
int nbytes = bvec->bv_len;
|
||||
int nbytes = bvec->bv_len;
|
||||
|
||||
if (bvprv && cluster) {
|
||||
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
|
||||
goto new_segment;
|
||||
if (bvprv && cluster) {
|
||||
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
|
||||
goto new_segment;
|
||||
|
||||
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
|
||||
goto new_segment;
|
||||
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
|
||||
goto new_segment;
|
||||
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
|
||||
goto new_segment;
|
||||
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
|
||||
goto new_segment;
|
||||
|
||||
sg[nsegs - 1].length += nbytes;
|
||||
} else {
|
||||
sg[nsegs - 1].length += nbytes;
|
||||
} else {
|
||||
new_segment:
|
||||
memset(&sg[nsegs],0,sizeof(struct scatterlist));
|
||||
sg[nsegs].page = bvec->bv_page;
|
||||
sg[nsegs].length = nbytes;
|
||||
sg[nsegs].offset = bvec->bv_offset;
|
||||
memset(&sg[nsegs],0,sizeof(struct scatterlist));
|
||||
sg[nsegs].page = bvec->bv_page;
|
||||
sg[nsegs].length = nbytes;
|
||||
sg[nsegs].offset = bvec->bv_offset;
|
||||
|
||||
nsegs++;
|
||||
}
|
||||
bvprv = bvec;
|
||||
nsegs++;
|
||||
}
|
||||
bvprv = bvec;
|
||||
} /* segments in rq */
|
||||
|
||||
return nsegs;
|
||||
|
|
|
@ -147,18 +147,18 @@ static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
|
|||
struct bio_vec *bvec;
|
||||
|
||||
rq_for_each_segment(bvec, req, iter) {
|
||||
/* We told the block layer not to give us too many. */
|
||||
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
|
||||
/* If we had a zero-length segment, it would look like
|
||||
* the end of the data referred to by the "struct
|
||||
* lguest_dma", so make sure that doesn't happen. */
|
||||
BUG_ON(!bvec->bv_len);
|
||||
/* Convert page & offset to a physical address */
|
||||
dma->addr[i] = page_to_phys(bvec->bv_page)
|
||||
+ bvec->bv_offset;
|
||||
dma->len[i] = bvec->bv_len;
|
||||
len += bvec->bv_len;
|
||||
i++;
|
||||
/* We told the block layer not to give us too many. */
|
||||
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
|
||||
/* If we had a zero-length segment, it would look like
|
||||
* the end of the data referred to by the "struct
|
||||
* lguest_dma", so make sure that doesn't happen. */
|
||||
BUG_ON(!bvec->bv_len);
|
||||
/* Convert page & offset to a physical address */
|
||||
dma->addr[i] = page_to_phys(bvec->bv_page)
|
||||
+ bvec->bv_offset;
|
||||
dma->len[i] = bvec->bv_len;
|
||||
len += bvec->bv_len;
|
||||
i++;
|
||||
}
|
||||
/* If the array isn't full, we mark the end with a 0 length */
|
||||
if (i < LGUEST_MAX_DMA_SECTIONS)
|
||||
|
|
|
@ -212,19 +212,17 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
|
|||
* whether to set MSG_MORE or not...
|
||||
*/
|
||||
rq_for_each_segment(bvec, req, iter) {
|
||||
flags = 0;
|
||||
if (!rq_iter_last(req, iter))
|
||||
flags = MSG_MORE;
|
||||
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
|
||||
lo->disk->disk_name, req,
|
||||
bvec->bv_len);
|
||||
result = sock_send_bvec(sock, bvec, flags);
|
||||
if (result <= 0) {
|
||||
printk(KERN_ERR "%s: Send data failed (result %d)\n",
|
||||
lo->disk->disk_name,
|
||||
result);
|
||||
goto error_out;
|
||||
}
|
||||
flags = 0;
|
||||
if (!rq_iter_last(req, iter))
|
||||
flags = MSG_MORE;
|
||||
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
|
||||
lo->disk->disk_name, req, bvec->bv_len);
|
||||
result = sock_send_bvec(sock, bvec, flags);
|
||||
if (result <= 0) {
|
||||
printk(KERN_ERR "%s: Send data failed (result %d)\n",
|
||||
lo->disk->disk_name, result);
|
||||
goto error_out;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -323,16 +321,15 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
|
|||
struct bio_vec *bvec;
|
||||
|
||||
rq_for_each_segment(bvec, req, iter) {
|
||||
result = sock_recv_bvec(sock, bvec);
|
||||
if (result <= 0) {
|
||||
printk(KERN_ERR "%s: Receive data failed (result %d)\n",
|
||||
lo->disk->disk_name,
|
||||
result);
|
||||
req->errors++;
|
||||
return req;
|
||||
}
|
||||
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
|
||||
lo->disk->disk_name, req, bvec->bv_len);
|
||||
result = sock_recv_bvec(sock, bvec);
|
||||
if (result <= 0) {
|
||||
printk(KERN_ERR "%s: Receive data failed (result %d)\n",
|
||||
lo->disk->disk_name, result);
|
||||
req->errors++;
|
||||
return req;
|
||||
}
|
||||
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
|
||||
lo->disk->disk_name, req, bvec->bv_len);
|
||||
}
|
||||
}
|
||||
return req;
|
||||
|
|
|
@ -105,16 +105,15 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
|
|||
bio_sectors(iter.bio),
|
||||
(unsigned long)iter.bio->bi_sector);
|
||||
|
||||
size = bvec->bv_len;
|
||||
buf = bvec_kmap_irq(bvec, &flags);
|
||||
if (gather)
|
||||
memcpy(dev->bounce_buf+offset, buf, size);
|
||||
else
|
||||
memcpy(buf, dev->bounce_buf+offset, size);
|
||||
offset += size;
|
||||
flush_kernel_dcache_page(bvec->bv_page);
|
||||
bvec_kunmap_irq(bvec, &flags);
|
||||
|
||||
size = bvec->bv_len;
|
||||
buf = bvec_kmap_irq(bvec, &flags);
|
||||
if (gather)
|
||||
memcpy(dev->bounce_buf+offset, buf, size);
|
||||
else
|
||||
memcpy(buf, dev->bounce_buf+offset, size);
|
||||
offset += size;
|
||||
flush_kernel_dcache_page(bvec->bv_page);
|
||||
bvec_kunmap_irq(bvec, &flags);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -186,31 +186,30 @@ static int blkif_queue_request(struct request *req)
|
|||
|
||||
ring_req->nr_segments = 0;
|
||||
rq_for_each_segment(bvec, req, iter) {
|
||||
BUG_ON(ring_req->nr_segments
|
||||
== BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
|
||||
fsect = bvec->bv_offset >> 9;
|
||||
lsect = fsect + (bvec->bv_len >> 9) - 1;
|
||||
/* install a grant reference. */
|
||||
ref = gnttab_claim_grant_reference(&gref_head);
|
||||
BUG_ON(ref == -ENOSPC);
|
||||
BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
|
||||
fsect = bvec->bv_offset >> 9;
|
||||
lsect = fsect + (bvec->bv_len >> 9) - 1;
|
||||
/* install a grant reference. */
|
||||
ref = gnttab_claim_grant_reference(&gref_head);
|
||||
BUG_ON(ref == -ENOSPC);
|
||||
|
||||
gnttab_grant_foreign_access_ref(
|
||||
gnttab_grant_foreign_access_ref(
|
||||
ref,
|
||||
info->xbdev->otherend_id,
|
||||
buffer_mfn,
|
||||
rq_data_dir(req) );
|
||||
|
||||
info->shadow[id].frame[ring_req->nr_segments] =
|
||||
info->shadow[id].frame[ring_req->nr_segments] =
|
||||
mfn_to_pfn(buffer_mfn);
|
||||
|
||||
ring_req->seg[ring_req->nr_segments] =
|
||||
ring_req->seg[ring_req->nr_segments] =
|
||||
(struct blkif_request_segment) {
|
||||
.gref = ref,
|
||||
.first_sect = fsect,
|
||||
.last_sect = lsect };
|
||||
|
||||
ring_req->nr_segments++;
|
||||
ring_req->nr_segments++;
|
||||
}
|
||||
|
||||
info->ring.req_prod_pvt++;
|
||||
|
|
|
@ -612,18 +612,18 @@ static void idefloppy_input_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, uns
|
|||
int count, done = 0;
|
||||
|
||||
rq_for_each_segment(bvec, rq, iter) {
|
||||
if (!bcount)
|
||||
break;
|
||||
if (!bcount)
|
||||
break;
|
||||
|
||||
count = min(bvec->bv_len, bcount);
|
||||
count = min(bvec->bv_len, bcount);
|
||||
|
||||
data = bvec_kmap_irq(bvec, &flags);
|
||||
drive->hwif->atapi_input_bytes(drive, data, count);
|
||||
bvec_kunmap_irq(data, &flags);
|
||||
data = bvec_kmap_irq(bvec, &flags);
|
||||
drive->hwif->atapi_input_bytes(drive, data, count);
|
||||
bvec_kunmap_irq(data, &flags);
|
||||
|
||||
bcount -= count;
|
||||
pc->b_count += count;
|
||||
done += count;
|
||||
bcount -= count;
|
||||
pc->b_count += count;
|
||||
done += count;
|
||||
}
|
||||
|
||||
idefloppy_do_end_request(drive, 1, done >> 9);
|
||||
|
@ -644,18 +644,18 @@ static void idefloppy_output_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, un
|
|||
char *data;
|
||||
|
||||
rq_for_each_segment(bvec, rq, iter) {
|
||||
if (!bcount)
|
||||
break;
|
||||
if (!bcount)
|
||||
break;
|
||||
|
||||
count = min(bvec->bv_len, bcount);
|
||||
count = min(bvec->bv_len, bcount);
|
||||
|
||||
data = bvec_kmap_irq(bvec, &flags);
|
||||
drive->hwif->atapi_output_bytes(drive, data, count);
|
||||
bvec_kunmap_irq(data, &flags);
|
||||
data = bvec_kmap_irq(bvec, &flags);
|
||||
drive->hwif->atapi_output_bytes(drive, data, count);
|
||||
bvec_kunmap_irq(data, &flags);
|
||||
|
||||
bcount -= count;
|
||||
pc->b_count += count;
|
||||
done += count;
|
||||
bcount -= count;
|
||||
pc->b_count += count;
|
||||
done += count;
|
||||
}
|
||||
|
||||
idefloppy_do_end_request(drive, 1, done >> 9);
|
||||
|
|
|
@ -493,10 +493,10 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
|
|||
/* Check struct bio and count the number of blocks for the request. */
|
||||
count = 0;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
if (bv->bv_len & (blksize - 1))
|
||||
/* Fba can only do full blocks. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
count += bv->bv_len >> (device->s2b_shift + 9);
|
||||
if (bv->bv_len & (blksize - 1))
|
||||
/* Fba can only do full blocks. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
count += bv->bv_len >> (device->s2b_shift + 9);
|
||||
}
|
||||
/* Paranoia. */
|
||||
if (count != last_rec - first_rec + 1)
|
||||
|
@ -514,16 +514,16 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
|
|||
dbio = dreq->bio;
|
||||
recid = first_rec;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
dst = page_address(bv->bv_page) + bv->bv_offset;
|
||||
for (off = 0; off < bv->bv_len; off += blksize) {
|
||||
memset(dbio, 0, sizeof (struct dasd_diag_bio));
|
||||
dbio->type = rw_cmd;
|
||||
dbio->block_number = recid + 1;
|
||||
dbio->buffer = dst;
|
||||
dbio++;
|
||||
dst += blksize;
|
||||
recid++;
|
||||
}
|
||||
dst = page_address(bv->bv_page) + bv->bv_offset;
|
||||
for (off = 0; off < bv->bv_len; off += blksize) {
|
||||
memset(dbio, 0, sizeof (struct dasd_diag_bio));
|
||||
dbio->type = rw_cmd;
|
||||
dbio->block_number = recid + 1;
|
||||
dbio->buffer = dst;
|
||||
dbio++;
|
||||
dst += blksize;
|
||||
recid++;
|
||||
}
|
||||
}
|
||||
cqr->retries = DIAG_MAX_RETRIES;
|
||||
cqr->buildclk = get_clock();
|
||||
|
|
|
@ -1206,14 +1206,13 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
|
|||
count = 0;
|
||||
cidaw = 0;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
if (bv->bv_len & (blksize - 1))
|
||||
/* Eckd can only do full blocks. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
count += bv->bv_len >> (device->s2b_shift + 9);
|
||||
if (bv->bv_len & (blksize - 1))
|
||||
/* Eckd can only do full blocks. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
count += bv->bv_len >> (device->s2b_shift + 9);
|
||||
#if defined(CONFIG_64BIT)
|
||||
if (idal_is_needed (page_address(bv->bv_page),
|
||||
bv->bv_len))
|
||||
cidaw += bv->bv_len >> (device->s2b_shift + 9);
|
||||
if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
|
||||
cidaw += bv->bv_len >> (device->s2b_shift + 9);
|
||||
#endif
|
||||
}
|
||||
/* Paranoia. */
|
||||
|
|
|
@ -257,14 +257,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
|
|||
count = 0;
|
||||
cidaw = 0;
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
if (bv->bv_len & (blksize - 1))
|
||||
/* Fba can only do full blocks. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
count += bv->bv_len >> (device->s2b_shift + 9);
|
||||
if (bv->bv_len & (blksize - 1))
|
||||
/* Fba can only do full blocks. */
|
||||
return ERR_PTR(-EINVAL);
|
||||
count += bv->bv_len >> (device->s2b_shift + 9);
|
||||
#if defined(CONFIG_64BIT)
|
||||
if (idal_is_needed (page_address(bv->bv_page),
|
||||
bv->bv_len))
|
||||
cidaw += bv->bv_len / blksize;
|
||||
if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
|
||||
cidaw += bv->bv_len / blksize;
|
||||
#endif
|
||||
}
|
||||
/* Paranoia. */
|
||||
|
|
|
@ -1173,16 +1173,15 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
|
|||
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
|
||||
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
dst = kmap(bv->bv_page) + bv->bv_offset;
|
||||
for (off = 0; off < bv->bv_len;
|
||||
off += TAPEBLOCK_HSEC_SIZE) {
|
||||
ccw->flags = CCW_FLAG_CC;
|
||||
ccw->cmd_code = READ_FORWARD;
|
||||
ccw->count = TAPEBLOCK_HSEC_SIZE;
|
||||
set_normalized_cda(ccw, (void*) __pa(dst));
|
||||
ccw++;
|
||||
dst += TAPEBLOCK_HSEC_SIZE;
|
||||
}
|
||||
dst = kmap(bv->bv_page) + bv->bv_offset;
|
||||
for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
|
||||
ccw->flags = CCW_FLAG_CC;
|
||||
ccw->cmd_code = READ_FORWARD;
|
||||
ccw->count = TAPEBLOCK_HSEC_SIZE;
|
||||
set_normalized_cda(ccw, (void*) __pa(dst));
|
||||
ccw++;
|
||||
dst += TAPEBLOCK_HSEC_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
|
||||
|
|
|
@ -652,18 +652,17 @@ tape_3590_bread(struct tape_device *device, struct request *req)
|
|||
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
|
||||
|
||||
rq_for_each_segment(bv, req, iter) {
|
||||
dst = page_address(bv->bv_page) + bv->bv_offset;
|
||||
for (off = 0; off < bv->bv_len;
|
||||
off += TAPEBLOCK_HSEC_SIZE) {
|
||||
ccw->flags = CCW_FLAG_CC;
|
||||
ccw->cmd_code = READ_FORWARD;
|
||||
ccw->count = TAPEBLOCK_HSEC_SIZE;
|
||||
set_normalized_cda(ccw, (void *) __pa(dst));
|
||||
ccw++;
|
||||
dst += TAPEBLOCK_HSEC_SIZE;
|
||||
}
|
||||
if (off > bv->bv_len)
|
||||
BUG();
|
||||
dst = page_address(bv->bv_page) + bv->bv_offset;
|
||||
for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
|
||||
ccw->flags = CCW_FLAG_CC;
|
||||
ccw->cmd_code = READ_FORWARD;
|
||||
ccw->count = TAPEBLOCK_HSEC_SIZE;
|
||||
set_normalized_cda(ccw, (void *) __pa(dst));
|
||||
ccw++;
|
||||
dst += TAPEBLOCK_HSEC_SIZE;
|
||||
}
|
||||
if (off > bv->bv_len)
|
||||
BUG();
|
||||
}
|
||||
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
|
||||
DBF_EVENT(6, "xBREDccwg\n");
|
||||
|
|
Loading…
Reference in New Issue