block: centralize PI remapping logic to the block layer
Currently t10_pi_prepare/t10_pi_complete functions are called during the NVMe and SCSi layers command preparetion/completion, but their actual place should be the block layer since T10-PI is a general data integrity feature that is used by block storage protocols. Introduce .prepare_fn and .complete_fn callbacks within the integrity profile that each type can implement according to its needs. Suggested-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Suggested-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Max Gurtovoy <maxg@mellanox.com> Fixed to not call queue integrity functions if BLK_DEV_INTEGRITY isn't defined in the config. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5eaed68dd3
commit
54d4e6ab91
|
@ -34,6 +34,7 @@
|
|||
#include <linux/ratelimit.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
#include <linux/t10-pi.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/psi.h>
|
||||
|
@ -1436,6 +1437,12 @@ bool blk_update_request(struct request *req, blk_status_t error,
|
|||
if (!req->bio)
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
|
||||
error == BLK_STS_OK)
|
||||
req->q->integrity.profile->complete_fn(req, nr_bytes);
|
||||
#endif
|
||||
|
||||
if (unlikely(error && !blk_rq_is_passthrough(req) &&
|
||||
!(req->rq_flags & RQF_QUIET)))
|
||||
print_req_error(req, error, __func__);
|
||||
|
|
|
@ -368,10 +368,21 @@ static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
|
|||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void blk_integrity_nop_prepare(struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static void blk_integrity_nop_complete(struct request *rq,
|
||||
unsigned int nr_bytes)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct blk_integrity_profile nop_profile = {
|
||||
.name = "nop",
|
||||
.generate_fn = blk_integrity_nop_fn,
|
||||
.verify_fn = blk_integrity_nop_fn,
|
||||
.prepare_fn = blk_integrity_nop_prepare,
|
||||
.complete_fn = blk_integrity_nop_complete,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <trace/events/block.h>
|
||||
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/t10-pi.h>
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
|
@ -700,6 +701,11 @@ void blk_mq_start_request(struct request *rq)
|
|||
*/
|
||||
rq->nr_phys_segments++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
|
||||
q->integrity.profile->prepare_fn(rq);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_start_request);
|
||||
|
||||
|
|
144
block/t10-pi.c
144
block/t10-pi.c
|
@ -120,76 +120,22 @@ static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
|
|||
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
|
||||
}
|
||||
|
||||
static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
|
||||
}
|
||||
|
||||
static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
|
||||
}
|
||||
|
||||
static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
|
||||
}
|
||||
|
||||
static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
|
||||
}
|
||||
|
||||
const struct blk_integrity_profile t10_pi_type1_crc = {
|
||||
.name = "T10-DIF-TYPE1-CRC",
|
||||
.generate_fn = t10_pi_type1_generate_crc,
|
||||
.verify_fn = t10_pi_type1_verify_crc,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type1_crc);
|
||||
|
||||
const struct blk_integrity_profile t10_pi_type1_ip = {
|
||||
.name = "T10-DIF-TYPE1-IP",
|
||||
.generate_fn = t10_pi_type1_generate_ip,
|
||||
.verify_fn = t10_pi_type1_verify_ip,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type1_ip);
|
||||
|
||||
const struct blk_integrity_profile t10_pi_type3_crc = {
|
||||
.name = "T10-DIF-TYPE3-CRC",
|
||||
.generate_fn = t10_pi_type3_generate_crc,
|
||||
.verify_fn = t10_pi_type3_verify_crc,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type3_crc);
|
||||
|
||||
const struct blk_integrity_profile t10_pi_type3_ip = {
|
||||
.name = "T10-DIF-TYPE3-IP",
|
||||
.generate_fn = t10_pi_type3_generate_ip,
|
||||
.verify_fn = t10_pi_type3_verify_ip,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type3_ip);
|
||||
|
||||
/**
|
||||
* t10_pi_prepare - prepare PI prior submitting request to device
|
||||
* t10_pi_type1_prepare - prepare PI prior submitting request to device
|
||||
* @rq: request with PI that should be prepared
|
||||
* @protection_type: PI type (Type 1/Type 2/Type 3)
|
||||
*
|
||||
* For Type 1/Type 2, the virtual start sector is the one that was
|
||||
* originally submitted by the block layer for the ref_tag usage. Due to
|
||||
* partitioning, MD/DM cloning, etc. the actual physical start sector is
|
||||
* likely to be different. Remap protection information to match the
|
||||
* physical LBA.
|
||||
*
|
||||
* Type 3 does not have a reference tag so no remapping is required.
|
||||
*/
|
||||
void t10_pi_prepare(struct request *rq, u8 protection_type)
|
||||
static void t10_pi_type1_prepare(struct request *rq)
|
||||
{
|
||||
const int tuple_sz = rq->q->integrity.tuple_size;
|
||||
u32 ref_tag = t10_pi_ref_tag(rq);
|
||||
struct bio *bio;
|
||||
|
||||
if (protection_type == T10_PI_TYPE3_PROTECTION)
|
||||
return;
|
||||
|
||||
__rq_for_each_bio(bio, rq) {
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
u32 virt = bip_get_seed(bip) & 0xffffffff;
|
||||
|
@ -222,13 +168,11 @@ void t10_pi_prepare(struct request *rq, u8 protection_type)
|
|||
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(t10_pi_prepare);
|
||||
|
||||
/**
|
||||
* t10_pi_complete - prepare PI prior returning request to the block layer
|
||||
* t10_pi_type1_complete - prepare PI prior returning request to the blk layer
|
||||
* @rq: request with PI that should be prepared
|
||||
* @protection_type: PI type (Type 1/Type 2/Type 3)
|
||||
* @intervals: total elements to prepare
|
||||
* @nr_bytes: total bytes to prepare
|
||||
*
|
||||
* For Type 1/Type 2, the virtual start sector is the one that was
|
||||
* originally submitted by the block layer for the ref_tag usage. Due to
|
||||
|
@ -236,19 +180,14 @@ EXPORT_SYMBOL(t10_pi_prepare);
|
|||
* likely to be different. Since the physical start sector was submitted
|
||||
* to the device, we should remap it back to virtual values expected by the
|
||||
* block layer.
|
||||
*
|
||||
* Type 3 does not have a reference tag so no remapping is required.
|
||||
*/
|
||||
void t10_pi_complete(struct request *rq, u8 protection_type,
|
||||
unsigned int intervals)
|
||||
static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
|
||||
{
|
||||
unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
|
||||
const int tuple_sz = rq->q->integrity.tuple_size;
|
||||
u32 ref_tag = t10_pi_ref_tag(rq);
|
||||
struct bio *bio;
|
||||
|
||||
if (protection_type == T10_PI_TYPE3_PROTECTION)
|
||||
return;
|
||||
|
||||
__rq_for_each_bio(bio, rq) {
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
u32 virt = bip_get_seed(bip) & 0xffffffff;
|
||||
|
@ -276,4 +215,73 @@ void t10_pi_complete(struct request *rq, u8 protection_type,
|
|||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(t10_pi_complete);
|
||||
|
||||
static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
|
||||
}
|
||||
|
||||
static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
|
||||
}
|
||||
|
||||
static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
|
||||
}
|
||||
|
||||
static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
|
||||
{
|
||||
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
|
||||
}
|
||||
|
||||
/**
|
||||
* Type 3 does not have a reference tag so no remapping is required.
|
||||
*/
|
||||
static void t10_pi_type3_prepare(struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* Type 3 does not have a reference tag so no remapping is required.
|
||||
*/
|
||||
static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
|
||||
{
|
||||
}
|
||||
|
||||
const struct blk_integrity_profile t10_pi_type1_crc = {
|
||||
.name = "T10-DIF-TYPE1-CRC",
|
||||
.generate_fn = t10_pi_type1_generate_crc,
|
||||
.verify_fn = t10_pi_type1_verify_crc,
|
||||
.prepare_fn = t10_pi_type1_prepare,
|
||||
.complete_fn = t10_pi_type1_complete,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type1_crc);
|
||||
|
||||
const struct blk_integrity_profile t10_pi_type1_ip = {
|
||||
.name = "T10-DIF-TYPE1-IP",
|
||||
.generate_fn = t10_pi_type1_generate_ip,
|
||||
.verify_fn = t10_pi_type1_verify_ip,
|
||||
.prepare_fn = t10_pi_type1_prepare,
|
||||
.complete_fn = t10_pi_type1_complete,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type1_ip);
|
||||
|
||||
const struct blk_integrity_profile t10_pi_type3_crc = {
|
||||
.name = "T10-DIF-TYPE3-CRC",
|
||||
.generate_fn = t10_pi_type3_generate_crc,
|
||||
.verify_fn = t10_pi_type3_verify_crc,
|
||||
.prepare_fn = t10_pi_type3_prepare,
|
||||
.complete_fn = t10_pi_type3_complete,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type3_crc);
|
||||
|
||||
const struct blk_integrity_profile t10_pi_type3_ip = {
|
||||
.name = "T10-DIF-TYPE3-IP",
|
||||
.generate_fn = t10_pi_type3_generate_ip,
|
||||
.verify_fn = t10_pi_type3_verify_ip,
|
||||
.prepare_fn = t10_pi_type3_prepare,
|
||||
.complete_fn = t10_pi_type3_complete,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type3_ip);
|
||||
|
|
|
@ -345,6 +345,14 @@ static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
|
|||
#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
|
||||
#endif
|
||||
|
||||
static void dm_integrity_prepare(struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* DM Integrity profile, protection is performed layer above (dm-crypt)
|
||||
*/
|
||||
|
@ -352,6 +360,8 @@ static const struct blk_integrity_profile dm_integrity_profile = {
|
|||
.name = "DM-DIF-EXT-TAG",
|
||||
.generate_fn = NULL,
|
||||
.verify_fn = NULL,
|
||||
.prepare_fn = dm_integrity_prepare,
|
||||
.complete_fn = dm_integrity_complete,
|
||||
};
|
||||
|
||||
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
|
||||
|
|
|
@ -666,8 +666,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
|||
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
|
||||
return BLK_STS_NOTSUPP;
|
||||
control |= NVME_RW_PRINFO_PRACT;
|
||||
} else if (req_op(req) == REQ_OP_WRITE) {
|
||||
t10_pi_prepare(req, ns->pi_type);
|
||||
}
|
||||
|
||||
switch (ns->pi_type) {
|
||||
|
@ -690,13 +688,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
|||
|
||||
void nvme_cleanup_cmd(struct request *req)
|
||||
{
|
||||
if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
|
||||
nvme_req(req)->status == 0) {
|
||||
struct nvme_ns *ns = req->rq_disk->private_data;
|
||||
|
||||
t10_pi_complete(req, ns->pi_type,
|
||||
blk_rq_bytes(req) >> ns->lba_shift);
|
||||
}
|
||||
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
struct nvme_ns *ns = req->rq_disk->private_data;
|
||||
struct page *page = req->special_vec.bv_page;
|
||||
|
|
|
@ -1211,9 +1211,6 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
|
|||
dix = scsi_prot_sg_count(cmd);
|
||||
dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
|
||||
|
||||
if (write && dix)
|
||||
t10_pi_prepare(cmd->request, sdkp->protection_type);
|
||||
|
||||
if (dif || dix)
|
||||
protect = sd_setup_protect_cmnd(cmd, dix, dif);
|
||||
else
|
||||
|
@ -2054,11 +2051,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
|||
"sd_done: completed %d of %d bytes\n",
|
||||
good_bytes, scsi_bufflen(SCpnt)));
|
||||
|
||||
if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) &&
|
||||
good_bytes)
|
||||
t10_pi_complete(SCpnt->request, sdkp->protection_type,
|
||||
good_bytes / scsi_prot_interval(SCpnt));
|
||||
|
||||
return good_bytes;
|
||||
}
|
||||
|
||||
|
|
|
@ -1522,10 +1522,14 @@ struct blk_integrity_iter {
|
|||
};
|
||||
|
||||
typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
|
||||
typedef void (integrity_prepare_fn) (struct request *);
|
||||
typedef void (integrity_complete_fn) (struct request *, unsigned int);
|
||||
|
||||
struct blk_integrity_profile {
|
||||
integrity_processing_fn *generate_fn;
|
||||
integrity_processing_fn *verify_fn;
|
||||
integrity_prepare_fn *prepare_fn;
|
||||
integrity_complete_fn *complete_fn;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
|
|
|
@ -53,18 +53,4 @@ extern const struct blk_integrity_profile t10_pi_type1_ip;
|
|||
extern const struct blk_integrity_profile t10_pi_type3_crc;
|
||||
extern const struct blk_integrity_profile t10_pi_type3_ip;
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
extern void t10_pi_prepare(struct request *rq, u8 protection_type);
|
||||
extern void t10_pi_complete(struct request *rq, u8 protection_type,
|
||||
unsigned int intervals);
|
||||
#else
|
||||
static inline void t10_pi_complete(struct request *rq, u8 protection_type,
|
||||
unsigned int intervals)
|
||||
{
|
||||
}
|
||||
static inline void t10_pi_prepare(struct request *rq, u8 protection_type)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue