diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index f5c369ce7e73..cd27cb92ac6d 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -117,13 +117,19 @@ out: static void scm_request_done(struct scm_request *scmrq) { - struct msb *msb = &scmrq->aob->msb[0]; - u64 aidaw = msb->data_addr; unsigned long flags; + struct msb *msb; + u64 aidaw; + int i; - if ((msb->flags & MSB_FLAG_IDA) && aidaw && - IS_ALIGNED(aidaw, PAGE_SIZE)) - mempool_free(virt_to_page(aidaw), aidaw_pool); + for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) { + msb = &scmrq->aob->msb[i]; + aidaw = msb->data_addr; + + if ((msb->flags & MSB_FLAG_IDA) && aidaw && + IS_ALIGNED(aidaw, PAGE_SIZE)) + mempool_free(virt_to_page(aidaw), aidaw_pool); + } spin_lock_irqsave(&list_lock, flags); list_add(&scmrq->list, &inactive_requests); @@ -167,51 +173,57 @@ static int scm_request_prepare(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; struct scm_device *scmdev = bdev->gendisk->private_data; - struct msb *msb = &scmrq->aob->msb[0]; + int pos = scmrq->aob->request.msb_count; + struct msb *msb = &scmrq->aob->msb[pos]; + struct request *req = scmrq->request[pos]; struct req_iterator iter; struct aidaw *aidaw; struct bio_vec bv; - aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(scmrq->request)); + aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req)); if (!aidaw) return -ENOMEM; msb->bs = MSB_BS_4K; - scmrq->aob->request.msb_count = 1; - msb->scm_addr = scmdev->address + - ((u64) blk_rq_pos(scmrq->request) << 9); - msb->oc = (rq_data_dir(scmrq->request) == READ) ? - MSB_OC_READ : MSB_OC_WRITE; + scmrq->aob->request.msb_count++; + msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); + msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; msb->flags |= MSB_FLAG_IDA; msb->data_addr = (u64) aidaw; - rq_for_each_segment(bv, scmrq->request, iter) { + rq_for_each_segment(bv, req, iter) { WARN_ON(bv.bv_offset); msb->blk_count += bv.bv_len >> 12; aidaw->data_addr = (u64) page_address(bv.bv_page); aidaw++; } + scmrq->next_aidaw = aidaw; return 0; } +static inline void scm_request_set(struct scm_request *scmrq, + struct request *req) +{ + scmrq->request[scmrq->aob->request.msb_count] = req; +} + static inline void scm_request_init(struct scm_blk_dev *bdev, - struct scm_request *scmrq, - struct request *req) + struct scm_request *scmrq) { struct aob_rq_header *aobrq = to_aobrq(scmrq); struct aob *aob = scmrq->aob; + memset(scmrq->request, 0, sizeof(scmrq->request)); memset(aob, 0, sizeof(*aob)); aobrq->scmdev = bdev->scmdev; aob->request.cmd_code = ARQB_CMD_MOVE; aob->request.data = (u64) aobrq; - scmrq->request = req; scmrq->bdev = bdev; scmrq->retries = 4; scmrq->error = 0; /* We don't use all msbs - place aidaws at the end of the aob page. */ - scmrq->next_aidaw = (void *) &aob->msb[1]; + scmrq->next_aidaw = (void *) &aob->msb[SCM_RQ_PER_IO]; scm_request_cluster_init(scmrq); } @@ -227,9 +239,12 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) void scm_request_requeue(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; + int i; scm_release_cluster(scmrq); - blk_requeue_request(bdev->rq, scmrq->request); + for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) + blk_requeue_request(bdev->rq, scmrq->request[i]); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); scm_ensure_queue_restart(bdev); @@ -238,20 +253,41 @@ void scm_request_requeue(struct scm_request *scmrq) void scm_request_finish(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; + int i; scm_release_cluster(scmrq); - blk_end_request_all(scmrq->request, scmrq->error); + for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) + blk_end_request_all(scmrq->request[i], scmrq->error); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); } +static int scm_request_start(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + int ret; + + atomic_inc(&bdev->queued_reqs); + if (!scmrq->aob->request.msb_count) { + scm_request_requeue(scmrq); + return -EINVAL; + } + + ret = eadm_start_aob(scmrq->aob); + if (ret) { + SCM_LOG(5, "no subchannel"); + scm_request_requeue(scmrq); + } + return ret; +} + static void scm_blk_request(struct request_queue *rq) { struct scm_device *scmdev = rq->queuedata; struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); - struct scm_request *scmrq; + struct scm_request *scmrq = NULL; struct request *req; - int ret; while ((req = blk_peek_request(rq))) { if (req->cmd_type != REQ_TYPE_FS) { @@ -261,47 +297,64 @@ static void scm_blk_request(struct request_queue *rq) continue; } - if (!scm_permit_request(bdev, req)) { - scm_ensure_queue_restart(bdev); - return; - } - scmrq = scm_request_fetch(); + if (!scm_permit_request(bdev, req)) + goto out; + if (!scmrq) { - SCM_LOG(5, "no request"); - scm_ensure_queue_restart(bdev); - return; + scmrq = scm_request_fetch(); + if (!scmrq) { + SCM_LOG(5, "no request"); + goto out; + } + scm_request_init(bdev, scmrq); } - scm_request_init(bdev, scmrq, req); + scm_request_set(scmrq, req); + if (!scm_reserve_cluster(scmrq)) { SCM_LOG(5, "cluster busy"); + scm_request_set(scmrq, NULL); + if (scmrq->aob->request.msb_count) + goto out; + scm_request_done(scmrq); return; } + if (scm_need_cluster_request(scmrq)) { - atomic_inc(&bdev->queued_reqs); - blk_start_request(req); - scm_initiate_cluster_request(scmrq); - return; + if (scmrq->aob->request.msb_count) { + /* Start cluster requests separately. */ + scm_request_set(scmrq, NULL); + if (scm_request_start(scmrq)) + return; + } else { + atomic_inc(&bdev->queued_reqs); + blk_start_request(req); + scm_initiate_cluster_request(scmrq); + } + scmrq = NULL; + continue; } if (scm_request_prepare(scmrq)) { - SCM_LOG(5, "no aidaw"); - scm_release_cluster(scmrq); - scm_request_done(scmrq); - scm_ensure_queue_restart(bdev); - return; + SCM_LOG(5, "aidaw alloc failed"); + scm_request_set(scmrq, NULL); + goto out; } - - atomic_inc(&bdev->queued_reqs); blk_start_request(req); - ret = eadm_start_aob(scmrq->aob); - if (ret) { - SCM_LOG(5, "no subchannel"); - scm_request_requeue(scmrq); + if (scmrq->aob->request.msb_count < SCM_RQ_PER_IO) + continue; + + if (scm_request_start(scmrq)) return; - } + + scmrq = NULL; } +out: + if (scmrq) + scm_request_start(scmrq); + else + scm_ensure_queue_restart(bdev); } static void __scmrq_log_error(struct scm_request *scmrq) diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 6334e1609208..3dae0a3570ce 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -11,6 +11,7 @@ #include #define SCM_NR_PARTS 8 +#define SCM_RQ_PER_IO 8 #define SCM_QUEUE_DELAY 5 struct scm_blk_dev { @@ -31,7 +32,7 @@ struct scm_blk_dev { struct scm_request { struct scm_blk_dev *bdev; struct aidaw *next_aidaw; - struct request *request; + struct request *request[SCM_RQ_PER_IO]; struct aob *aob; struct list_head list; u8 retries; diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 2fd01320b978..09db45296eed 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq) scmrq->cluster.state = CLUSTER_NONE; } -static bool clusters_intersect(struct scm_request *A, struct scm_request *B) +static bool clusters_intersect(struct request *A, struct request *B) { unsigned long firstA, lastA, firstB, lastB; - firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; - lastA = (((u64) blk_rq_pos(A->request) << 9) + - blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; + firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE; + lastA = (((u64) blk_rq_pos(A) << 9) + + blk_rq_bytes(A) - 1) / CLUSTER_SIZE; - firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; - lastB = (((u64) blk_rq_pos(B->request) << 9) + - blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; + firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE; + lastB = (((u64) blk_rq_pos(B) << 9) + + blk_rq_bytes(B) - 1) / CLUSTER_SIZE; return (firstB <= lastA && firstA <= lastB); } bool scm_reserve_cluster(struct scm_request *scmrq) { + struct request *req = scmrq->request[scmrq->aob->request.msb_count]; struct scm_blk_dev *bdev = scmrq->bdev; struct scm_request *iter; + int pos, add = 1; if (write_cluster_size == 0) return true; spin_lock(&bdev->lock); list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { - if (clusters_intersect(scmrq, iter) && - (rq_data_dir(scmrq->request) == WRITE || - rq_data_dir(iter->request) == WRITE)) { - spin_unlock(&bdev->lock); - return false; + if (iter == scmrq) { + /* + * We don't have to use clusters_intersect here, since + * cluster requests are always started separately. + */ + add = 0; + continue; + } + for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { + if (clusters_intersect(req, iter->request[pos]) && + (rq_data_dir(req) == WRITE || + rq_data_dir(iter->request[pos]) == WRITE)) { + spin_unlock(&bdev->lock); + return false; + } } } - list_add(&scmrq->cluster.list, &bdev->cluster_list); + if (add) + list_add(&scmrq->cluster.list, &bdev->cluster_list); spin_unlock(&bdev->lock); return true; @@ -118,7 +131,7 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; struct scm_device *scmdev = bdev->gendisk->private_data; - struct request *req = scmrq->request; + struct request *req = scmrq->request[0]; struct msb *msb = &scmrq->aob->msb[0]; struct req_iterator iter; struct aidaw *aidaw; @@ -183,10 +196,12 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq) bool scm_need_cluster_request(struct scm_request *scmrq) { - if (rq_data_dir(scmrq->request) == READ) + int pos = scmrq->aob->request.msb_count; + + if (rq_data_dir(scmrq->request[pos]) == READ) return false; - return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; + return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE; } /* Called with queue lock held. */