summaryrefslogtreecommitdiff
path: root/drivers/nvme/host/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/core.c')
-rw-r--r--drivers/nvme/host/core.c490
1 files changed, 351 insertions, 139 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2533b88e66d5..2429b11eb9a8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -24,12 +24,22 @@
#include "nvme.h"
#include "fabrics.h"
+#include <linux/nvme-auth.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#define NVME_MINORS (1U << MINORBITS)
+struct nvme_ns_info {
+ struct nvme_ns_ids ids;
+ u32 nsid;
+ __le32 anagrpid;
+ bool is_shared;
+ bool is_readonly;
+ bool is_ready;
+};
+
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
@@ -330,6 +340,7 @@ enum nvme_disposition {
COMPLETE,
RETRY,
FAILOVER,
+ AUTHENTICATE,
};
static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
@@ -337,6 +348,9 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
if (likely(nvme_req(req)->status == 0))
return COMPLETE;
+ if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
+
if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
@@ -375,11 +389,13 @@ static inline void nvme_end_req(struct request *req)
void nvme_complete_rq(struct request *req)
{
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- if (nvme_req(req)->ctrl->kas)
- nvme_req(req)->ctrl->comp_seen = true;
+ if (ctrl->kas)
+ ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) {
case COMPLETE:
@@ -391,6 +407,14 @@ void nvme_complete_rq(struct request *req)
case FAILOVER:
nvme_failover_req(req);
return;
+ case AUTHENTICATE:
+#ifdef CONFIG_NVME_AUTH
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+ nvme_retry_req(req);
+#else
+ nvme_end_req(req);
+#endif
+ return;
}
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -702,7 +726,9 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
switch (ctrl->state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
- req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+ (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
return true;
break;
default:
@@ -990,8 +1016,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags)
+ int qid, int at_head, blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -1000,15 +1025,12 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
- qid ? qid - 1 : 0);
+ qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
- if (timeout)
- req->timeout = timeout;
-
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
if (ret)
@@ -1028,7 +1050,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1329,8 +1351,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
}
}
-static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
struct nvme_command c = { };
bool csi_seen = false;
@@ -1343,7 +1365,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
return 0;
c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.nsid = cpu_to_le32(info->nsid);
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
@@ -1355,7 +1377,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (status) {
dev_warn(ctrl->device,
"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
- nsid, status);
+ info->nsid, status);
goto free_data;
}
@@ -1365,7 +1387,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (cur->nidl == 0)
break;
- len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
+ len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
if (len < 0)
break;
@@ -1374,7 +1396,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (nvme_multi_css(ctrl) && !csi_seen) {
dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
- nsid);
+ info->nsid);
status = -EINVAL;
}
@@ -1384,7 +1406,7 @@ free_data:
}
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids, struct nvme_id_ns **id)
+ struct nvme_id_ns **id)
{
struct nvme_command c = { };
int error;
@@ -1407,51 +1429,66 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
error = NVME_SC_INVALID_NS | NVME_SC_DNR;
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
+ return 0;
+out_free_id:
+ kfree(*id);
+ return error;
+}
+static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_ns_ids *ids = &info->ids;
+ struct nvme_id_ns *id;
+ int ret;
+
+ ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = true;
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
dev_info(ctrl->device,
"Ignoring bogus Namespace Identifiers\n");
} else {
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0) &&
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
-
+ kfree(id);
return 0;
-
-out_free_id:
- kfree(*id);
- return error;
}
-static int nvme_identify_ns_cs_indep(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_id_ns_cs_indep **id)
+static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
+ struct nvme_id_ns_cs_indep *id;
struct nvme_command c = {
.identify.opcode = nvme_admin_identify,
- .identify.nsid = cpu_to_le32(nsid),
+ .identify.nsid = cpu_to_le32(info->nsid),
.identify.cns = NVME_ID_CNS_NS_CS_INDEP,
};
int ret;
- *id = kmalloc(sizeof(**id), GFP_KERNEL);
- if (!*id)
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
return -ENOMEM;
- ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
- if (ret) {
- dev_warn(ctrl->device,
- "Identify namespace (CS independent) failed (%d)\n",
- ret);
- kfree(*id);
- return ret;
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (!ret) {
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = id->nstat & NVME_NSTAT_NRDY;
}
-
- return 0;
+ kfree(id);
+ return ret;
}
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
@@ -1466,7 +1503,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1875,6 +1912,11 @@ static void nvme_update_disk_info(struct gendisk *disk,
ns->ctrl->max_zeroes_sectors);
}
+static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
+}
+
static inline bool nvme_first_scan(struct gendisk *disk)
{
/* nvme_alloc_ns() scans the disk prior to adding it */
@@ -1912,12 +1954,44 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
blk_queue_chunk_sectors(ns->queue, iob);
}
-static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_update_ns_info_generic(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
{
- unsigned lbaf = nvme_lbaf_index(id->flbas);
+ blk_mq_freeze_queue(ns->disk->queue);
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_mq_unfreeze_queue(ns->disk->queue);
+
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ /* Hide the block-interface for these devices */
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+
+ return 0;
+}
+
+static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns *id;
+ unsigned lbaf;
int ret;
+ ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+
blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
@@ -1927,36 +2001,35 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
- if (ret)
- goto out_unfreeze;
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
}
- set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
- return ret;
+ goto out;
}
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
- set_disk_ro(ns->head->disk,
- (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
disk_update_readahead(ns->head->disk);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
- return 0;
-out_unfreeze:
+ ret = 0;
+out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
@@ -1966,10 +2039,31 @@ out_unfreeze:
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
- blk_mq_unfreeze_queue(ns->disk->queue);
+ kfree(id);
return ret;
}
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ switch (info->ids.csi) {
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ info->nsid);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+ return nvme_update_ns_info_block(ns, info);
+ case NVME_CSI_NVM:
+ return nvme_update_ns_info_block(ns, info);
+ default:
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported (csi %u)\n",
+ info->nsid, info->ids.csi);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+}
+
static char nvme_pr_type(enum pr_type type)
{
switch (type) {
@@ -2103,7 +2197,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
@@ -2123,6 +2217,7 @@ static int nvme_report_zones(struct gendisk *disk, sector_t sector,
static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@@ -3613,6 +3708,108 @@ static ssize_t dctype_show(struct device *dev,
}
static DEVICE_ATTR_RO(dctype);
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_ctrl_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_ctrl_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3636,6 +3833,10 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_kato.attr,
&dev_attr_cntrltype.attr,
&dev_attr_dctype.attr,
+#ifdef CONFIG_NVME_AUTH
+ &dev_attr_dhchap_secret.attr,
+ &dev_attr_dhchap_ctrl_secret.attr,
+#endif
NULL
};
@@ -3659,6 +3860,12 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0;
+#ifdef CONFIG_NVME_AUTH
+ if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
+ return 0;
+#endif
return a->mode;
}
@@ -3786,7 +3993,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_ns_ids *ids, bool is_shared)
+ struct nvme_ns_info *info)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3808,9 +4015,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
if (ret)
goto out_ida_remove;
head->subsys = ctrl->subsys;
- head->ns_id = nsid;
- head->ids = *ids;
- head->shared = is_shared;
+ head->ns_id = info->nsid;
+ head->ids = info->ids;
+ head->shared = info->is_shared;
kref_init(&head->ref);
if (head->ids.csi) {
@@ -3867,54 +4074,54 @@ static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
return ret;
}
-static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
- struct nvme_ns_ids *ids, bool is_shared)
+static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_ns_head *head = NULL;
int ret;
- ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
- "globally duplicate IDs for nsid %d\n", nsid);
+ "globally duplicate IDs for nsid %d\n", info->nsid);
nvme_print_device_info(ctrl);
return ret;
}
mutex_lock(&ctrl->subsys->lock);
- head = nvme_find_ns_head(ctrl, nsid);
+ head = nvme_find_ns_head(ctrl, info->nsid);
if (!head) {
- ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs in subsystem for nsid %d\n",
- nsid);
+ info->nsid);
goto out_unlock;
}
- head = nvme_alloc_ns_head(ctrl, nsid, ids, is_shared);
+ head = nvme_alloc_ns_head(ctrl, info);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
} else {
ret = -EINVAL;
- if (!is_shared || !head->shared) {
+ if (!info->is_shared || !head->shared) {
dev_err(ctrl->device,
- "Duplicate unshared namespace %d\n", nsid);
+ "Duplicate unshared namespace %d\n",
+ info->nsid);
goto out_put_ns_head;
}
- if (!nvme_ns_ids_equal(&head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
- nsid);
+ info->nsid);
goto out_put_ns_head;
}
if (!multipath && !list_empty(&head->list)) {
dev_warn(ctrl->device,
"Found shared namespace %d, but multipathing not supported.\n",
- nsid);
+ info->nsid);
dev_warn_once(ctrl->device,
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
}
@@ -3968,20 +4175,15 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_add(&ns->list, &ns->ctrl->namespaces);
}
-static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
struct nvme_ns *ns;
struct gendisk *disk;
- struct nvme_id_ns *id;
int node = ctrl->numa_node;
- if (nvme_identify_ns(ctrl, nsid, ids, &id))
- return;
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- goto out_free_id;
+ return;
disk = blk_mq_alloc_disk(ctrl->tagset, ns);
if (IS_ERR(disk))
@@ -4002,7 +4204,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
ns->ctrl = ctrl;
kref_init(&ns->kref);
- if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
+ if (nvme_init_ns_head(ns, info))
goto out_cleanup_disk;
/*
@@ -4028,7 +4230,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
ns->head->instance);
}
- if (nvme_update_ns_info(ns, id))
+ if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
@@ -4042,9 +4244,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!nvme_ns_head_multipath(ns->head))
nvme_add_ns_cdev(ns);
- nvme_mpath_add_disk(ns, id);
+ nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
- kfree(id);
return;
@@ -4064,8 +4265,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
put_disk(disk);
out_free_ns:
kfree(ns);
- out_free_id:
- kfree(id);
}
static void nvme_ns_remove(struct nvme_ns *ns)
@@ -4123,29 +4322,21 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
}
}
-static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{
- struct nvme_id_ns *id;
int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
if (test_bit(NVME_NS_DEAD, &ns->flags))
goto out;
- ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
- if (ret)
- goto out;
-
ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
- if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
dev_err(ns->ctrl->device,
"identifiers changed for nsid %d\n", ns->head->ns_id);
- goto out_free_id;
+ goto out;
}
- ret = nvme_update_ns_info(ns, id);
-
-out_free_id:
- kfree(id);
+ ret = nvme_update_ns_info(ns, info);
out:
/*
* Only remove the namespace if we got a fatal error back from the
@@ -4157,59 +4348,47 @@ out:
nvme_ns_remove(ns);
}
-static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns_ids ids = { };
- struct nvme_id_ns_cs_indep *id;
+ struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
- bool ready = true;
- if (nvme_identify_ns_descs(ctrl, nsid, &ids))
+ if (nvme_identify_ns_descs(ctrl, &info))
return;
+ if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
+ dev_warn(ctrl->device,
+ "command set not reported for nsid: %d\n", nsid);
+ return;
+ }
+
/*
- * Check if the namespace is ready. If not ignore it, we will get an
- * AEN once it becomes ready and restart the scan.
+ * If available try to use the Command Set Idependent Identify Namespace
+ * data structure to find all the generic information that is needed to
+ * set up a namespace. If not fall back to the legacy version.
*/
- if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) &&
- !nvme_identify_ns_cs_indep(ctrl, nsid, &id)) {
- ready = id->nstat & NVME_NSTAT_NRDY;
- kfree(id);
+ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
+ if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
+ return;
+ } else {
+ if (nvme_ns_info_from_identify(ctrl, &info))
+ return;
}
- if (!ready)
+ /*
+ * Ignore the namespace if it is not ready. We will get an AEN once it
+ * becomes ready and restart the scan.
+ */
+ if (!info.is_ready)
return;
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- nvme_validate_ns(ns, &ids);
+ nvme_validate_ns(ns, &info);
nvme_put_ns(ns);
- return;
- }
-
- switch (ids.csi) {
- case NVME_CSI_NVM:
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- case NVME_CSI_ZNS:
- if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
- dev_warn(ctrl->device,
- "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
- nsid);
- break;
- }
- if (!nvme_multi_css(ctrl)) {
- dev_warn(ctrl->device,
- "command set not reported for nsid: %d\n",
- nsid);
- break;
- }
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- default:
- dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
- ids.csi, nsid);
- break;
+ } else {
+ nvme_alloc_ns(ctrl, &info);
}
}
@@ -4265,7 +4444,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
if (!nsid) /* end of the list? */
goto out;
- nvme_validate_or_alloc_ns(ctrl, nsid);
+ nvme_scan_ns(ctrl, nsid);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
@@ -4288,7 +4467,7 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
kfree(id);
for (i = 1; i <= nn; i++)
- nvme_validate_or_alloc_ns(ctrl, i);
+ nvme_scan_ns(ctrl, i);
nvme_remove_invalid_namespaces(ctrl, nn);
}
@@ -4525,9 +4704,19 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_get_fw_slot_info(ctrl);
}
+static u32 nvme_aer_type(u32 result)
+{
+ return result & 0x7;
+}
+
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
+}
+
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
- u32 aer_notice_type = (result & 0xff00) >> 8;
+ u32 aer_notice_type = nvme_aer_subtype(result);
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -4542,8 +4731,10 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* recovery actions from interfering with the controller's
* firmware activation.
*/
- if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ nvme_auth_stop(ctrl);
queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
@@ -4560,11 +4751,19 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
}
}
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ trace_nvme_async_event(ctrl, NVME_AER_ERROR);
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
+}
+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- u32 aer_type = result & 0x07;
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
@@ -4574,6 +4773,15 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
@@ -4590,6 +4798,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
@@ -4649,6 +4858,8 @@ static void nvme_free_ctrl(struct device *dev)
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ nvme_auth_stop(ctrl);
+ nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
if (subsys) {
@@ -4739,6 +4950,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
nvme_mpath_init_ctrl(ctrl);
+ nvme_auth_init_ctrl(ctrl);
return 0;
out_free_name: