From: Mike Snitzer <snitzer(a)redhat.com>
nvme: update failover handling to work with REQ_FAILFAST_TRANSPORT
BZ: 1948690
Upstream Status: RHEL-only
Signed-off-by: Mike Snitzer <snitzer(a)redhat.com>
rhel-8.git commit f8fb6ea1226e2abc525c88da13b346118d548eea
Author: Mike Snitzer <snitzer(a)redhat.com>
Date: Tue Aug 25 21:52:46 2020 -0400
[nvme] nvme: update failover handling to work with REQ_FAILFAST_TRANSPORT
Message-id: <20200825215248.2291-9-snitzer(a)redhat.com>
Patchwork-id: 325177
Patchwork-instance: patchwork
O-Subject: [RHEL8.3 PATCH 08/10] nvme: update failover handling to work with
REQ_FAILFAST_TRANSPORT
Bugzilla: 1843515
RH-Acked-by: David Milburn <dmilburn(a)redhat.com>
RH-Acked-by: Gopal Tiwari <gtiwari(a)redhat.com>
RH-Acked-by: Ewan Milne <emilne(a)redhat.com>
BZ: 1843515
Upstream Status: RHEL-only
If REQ_FAILFAST_TRANSPORT is set it means the driver should not retry
IO that completed with transport errors. REQ_FAILFAST_TRANSPORT is
set by multipathing software (e.g. dm-multipath) before it issues IO.
Update NVMe to prepare for failover of requests marked with either
REQ_NVME_MPATH or REQ_FAILFAST_TRANSPORT. This allows such requests
to be given a disposition of FAILOVER.
Introduce nvme_end_req_with_failover() for use in nvme_complete_rq()
if REQ_NVME_MPATH isn't set. nvme_end_req_with_failover() ensures
request is completed with a retryable IO error when appropriate.
__nvme_end_req() was factored out for use by both nvme_end_req() and
nvme_end_req_with_failover().
Signed-off-by: Mike Snitzer <snitzer(a)redhat.com>
Signed-off-by: Frantisek Hrbata <fhrbata(a)redhat.com>
diff a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -311,7 +311,7 @@ static inline enum nvme_disposition nvme_decide_disposition(struct
request *req)
nvme_req(req)->retries >= nvme_max_retries)
return COMPLETE;
- if (req->cmd_flags & REQ_NVME_MPATH) {
+ if (req->cmd_flags & (REQ_NVME_MPATH | REQ_FAILFAST_TRANSPORT)) {
if (nvme_is_path_error(nvme_req(req)->status) ||
blk_queue_dying(req->q))
return FAILOVER;
@@ -323,10 +323,8 @@ static inline enum nvme_disposition nvme_decide_disposition(struct
request *req)
return RETRY;
}
-static inline void nvme_end_req(struct request *req)
+static inline void __nvme_end_req(struct request *req, blk_status_t status)
{
- blk_status_t status = nvme_error_status(nvme_req(req)->status);
-
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
@@ -336,6 +334,28 @@ static inline void nvme_end_req(struct request *req)
blk_mq_end_request(req, status);
}
+static inline void nvme_end_req(struct request *req)
+{
+ __nvme_end_req(req, nvme_error_status(nvme_req(req)->status));
+}
+
+static inline void nvme_end_req_with_failover(struct request *req)
+{
+ u16 nvme_status = nvme_req(req)->status;
+ blk_status_t status = nvme_error_status(nvme_status);
+
+ if (unlikely(nvme_status & NVME_SC_DNR))
+ goto out;
+
+ if (!blk_path_error(status)) {
+ pr_debug("Request meant for failover but blk_status_t (errno=%d) was not
retryable.\n",
+ blk_status_to_errno(status));
+ status = BLK_STS_IOERR;
+ }
+out:
+ __nvme_end_req(req, status);
+}
+
void nvme_complete_rq(struct request *req)
{
trace_nvme_complete_rq(req);
@@ -352,7 +372,10 @@ void nvme_complete_rq(struct request *req)
nvme_retry_req(req);
return;
case FAILOVER:
- nvme_failover_req(req);
+ if (req->cmd_flags & REQ_NVME_MPATH)
+ nvme_failover_req(req);
+ else
+ nvme_end_req_with_failover(req);
return;
}
}
--
https://gitlab.com/cki-project/kernel-ark/-/merge_requests/1024