nvme fixes for Linux 6.4
- Fixes for spurious Keep Alive timeouts (Uday) - Fix for command type check on passthrough actions (Min) - Fix for nvme command name for error logging (Christoph) -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE3Fbyvv+648XNRdHTPe3zGtjzRgkFAmR4uxMACgkQPe3zGtjz Rgli9hAAsrGDFzSKhJL4Yu6qNIJPEmEcMs1cgQxg6p820DNiJq7lIBxcclczydOT P6aKrxm1e2u07Kig4hnVmsPBZHh808EFDCsbm1MgZDfJp6yyLJLlemBtMK3NWe5s Vkc/DcUGMDsKW7ZChmu3+xAv+sWayGOA6x7Ad71KLLErI9w2vyIWD6t+7wSnvKnS U/yPvmFmy4366qVWCpDurdXnA7jr0vPkQaOo4kqe7rQScKY66CKErdD1SxXCzsS2 e4G28AZYj5DevzUPMI2TqCal+xjjU5iiy5YWOB6GQTVhIUV6qESV5lNkc0BetCit qZNUvRnZ5JjyFCRQEEvoTsX66QZhHp2K403MVNIN+vqcBKNLjvv6UIa+F5Ny2ztb osYJ6kpHxQ1bt7iI03OObpiH8CMtcFq1viIYLLXOzyAdGesTkXh2A9hZEtPbV6so FlAsBlVZrrmDfPLuOPPssdK8ktsC6QMSKrQVzO5bFRhjCEerLJfsMqbdYQjCsi/R nYbb9iuziO5x/j7JhFSoDccCpjcyAhM9bnbTFqeloKWoBtmeFIbx3fW00e27yDt4 Y0k4S/tXEc/934ItSjTe/ztiXPg/jgp2OctAudpDhr+334aTsAbjxz7zT/aLIeHd jvoTEi6Uf2stSsocNFU8YzVhvXnwKoUu39bjmJix/YSTiD0AY5o= =9d+f -----END PGP SIGNATURE----- Merge tag 'nvme-6.4-2023-06-01' of git://git.infradead.org/nvme into block-6.4 Pull NVMe fixes from Keith: "nvme fixes for Linux 6.4 - Fixes for spurious Keep Alive timeouts (Uday) - Fix for command type check on passthrough actions (Min) - Fix for nvme command name for error logging (Christoph)" * tag 'nvme-6.4-2023-06-01' of git://git.infradead.org/nvme: nvme: fix the name of Zone Append for verbose logging nvme: improve handling of long keep alives nvme: check IO start time when deciding to defer KA nvme: double KA polling frequency to avoid KATO with TBKAS on nvme: fix miss command type check
This commit is contained in:
commit
2e45a49531
|
@ -21,7 +21,7 @@ static const char * const nvme_ops[] = {
|
|||
[nvme_cmd_resv_release] = "Reservation Release",
|
||||
[nvme_cmd_zone_mgmt_send] = "Zone Management Send",
|
||||
[nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
|
||||
[nvme_cmd_zone_append] = "Zone Management Append",
|
||||
[nvme_cmd_zone_append] = "Zone Append",
|
||||
};
|
||||
|
||||
static const char * const nvme_admin_ops[] = {
|
||||
|
|
|
@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req)
|
|||
trace_nvme_complete_rq(req);
|
||||
nvme_cleanup_cmd(req);
|
||||
|
||||
if (ctrl->kas)
|
||||
/*
|
||||
* Completions of long-running commands should not be able to
|
||||
* defer sending of periodic keep alives, since the controller
|
||||
* may have completed processing such commands a long time ago
|
||||
* (arbitrarily close to command submission time).
|
||||
* req->deadline - req->timeout is the command submission time
|
||||
* in jiffies.
|
||||
*/
|
||||
if (ctrl->kas &&
|
||||
req->deadline - req->timeout >= ctrl->ka_last_check_time)
|
||||
ctrl->comp_seen = true;
|
||||
|
||||
switch (nvme_decide_disposition(req)) {
|
||||
|
@ -1115,7 +1124,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
|
|||
}
|
||||
EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
|
||||
|
||||
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
|
||||
void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
|
||||
struct nvme_command *cmd, int status)
|
||||
{
|
||||
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
|
||||
|
@ -1132,6 +1141,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
|
|||
nvme_queue_scan(ctrl);
|
||||
flush_work(&ctrl->scan_work);
|
||||
}
|
||||
if (ns)
|
||||
return;
|
||||
|
||||
switch (cmd->common.opcode) {
|
||||
case nvme_admin_set_features:
|
||||
|
@ -1161,9 +1172,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
|
|||
* The host should send Keep Alive commands at half of the Keep Alive Timeout
|
||||
* accounting for transport roundtrip times [..].
|
||||
*/
|
||||
static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
unsigned long delay = ctrl->kato * HZ / 2;
|
||||
|
||||
/*
|
||||
* When using Traffic Based Keep Alive, we need to run
|
||||
* nvme_keep_alive_work at twice the normal frequency, as one
|
||||
* command completion can postpone sending a keep alive command
|
||||
* by up to twice the delay between runs.
|
||||
*/
|
||||
if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
|
||||
delay /= 2;
|
||||
return delay;
|
||||
}
|
||||
|
||||
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
|
||||
queue_delayed_work(nvme_wq, &ctrl->ka_work,
|
||||
nvme_keep_alive_work_period(ctrl));
|
||||
}
|
||||
|
||||
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
|
||||
|
@ -1172,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
|
|||
struct nvme_ctrl *ctrl = rq->end_io_data;
|
||||
unsigned long flags;
|
||||
bool startka = false;
|
||||
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
|
||||
unsigned long delay = nvme_keep_alive_work_period(ctrl);
|
||||
|
||||
/*
|
||||
* Subtract off the keepalive RTT so nvme_keep_alive_work runs
|
||||
* at the desired frequency.
|
||||
*/
|
||||
if (rtt <= delay) {
|
||||
delay -= rtt;
|
||||
} else {
|
||||
dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
|
||||
jiffies_to_msecs(rtt));
|
||||
delay = 0;
|
||||
}
|
||||
|
||||
blk_mq_free_request(rq);
|
||||
|
||||
|
@ -1182,6 +1223,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
|
|||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
ctrl->ka_last_check_time = jiffies;
|
||||
ctrl->comp_seen = false;
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (ctrl->state == NVME_CTRL_LIVE ||
|
||||
|
@ -1189,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
|
|||
startka = true;
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
if (startka)
|
||||
nvme_queue_keep_alive_work(ctrl);
|
||||
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
|
||||
return RQ_END_IO_NONE;
|
||||
}
|
||||
|
||||
|
@ -1200,6 +1242,8 @@ static void nvme_keep_alive_work(struct work_struct *work)
|
|||
bool comp_seen = ctrl->comp_seen;
|
||||
struct request *rq;
|
||||
|
||||
ctrl->ka_last_check_time = jiffies;
|
||||
|
||||
if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
|
||||
dev_dbg(ctrl->device,
|
||||
"reschedule traffic based keep-alive timer\n");
|
||||
|
|
|
@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
|
|||
blk_mq_free_request(req);
|
||||
|
||||
if (effects)
|
||||
nvme_passthru_end(ctrl, effects, cmd, ret);
|
||||
nvme_passthru_end(ctrl, ns, effects, cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -328,6 +328,7 @@ struct nvme_ctrl {
|
|||
struct delayed_work ka_work;
|
||||
struct delayed_work failfast_work;
|
||||
struct nvme_command ka_cmd;
|
||||
unsigned long ka_last_check_time;
|
||||
struct work_struct fw_act_work;
|
||||
unsigned long events;
|
||||
|
||||
|
@ -1077,7 +1078,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||
u8 opcode);
|
||||
u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
|
||||
int nvme_execute_rq(struct request *rq, bool at_head);
|
||||
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
|
||||
void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
|
||||
struct nvme_command *cmd, int status);
|
||||
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
|
||||
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
|
||||
|
|
|
@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
|
|||
blk_mq_free_request(rq);
|
||||
|
||||
if (effects)
|
||||
nvme_passthru_end(ctrl, effects, req->cmd, status);
|
||||
nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
|
||||
}
|
||||
|
||||
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
|
||||
|
|
Loading…
Reference in New Issue