diff options
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r-- | drivers/nvme/host/pci.c | 35 |
1 files changed, 21 insertions, 14 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 295fbec1e5f2..fbc71fac6f1e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -84,6 +84,7 @@ struct nvme_dev { struct dma_pool *prp_small_pool; unsigned online_queues; unsigned max_qid; + unsigned int num_vecs; int q_depth; u32 db_stride; void __iomem *bar; @@ -414,7 +415,8 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) { struct nvme_dev *dev = set->driver_data; - return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev), 0); + return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev), + dev->num_vecs > 1 ? 1 /* admin queue */ : 0); } /** @@ -1380,8 +1382,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, return 0; } -static int nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth, int node) +static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) { struct nvme_queue *nvmeq = &dev->queues[qid]; @@ -1457,7 +1458,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) nvmeq->sq_cmds_io = dev->cmb + offset; } - nvmeq->cq_vector = qid - 1; + /* + * A queue's vector matches the queue identifier unless the controller + * has only one vector available. + */ + nvmeq->cq_vector = dev->num_vecs == 1 ? 0 : qid; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) goto release_vector; @@ -1596,8 +1601,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) if (result < 0) return result; - result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, - dev_to_node(dev->dev)); + result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (result) return result; @@ -1630,9 +1634,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) int ret = 0; for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { - /* vector == qid - 1, match nvme_create_queue */ - if (nvme_alloc_queue(dev, i, dev->q_depth, - pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { + if (nvme_alloc_queue(dev, i, dev->q_depth)) { ret = -ENOMEM; break; } @@ -1914,6 +1916,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) int result, nr_io_queues; unsigned long size; + struct irq_affinity affd = { + .pre_vectors = 1 + }; + nr_io_queues = num_possible_cpus(); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) @@ -1949,11 +1955,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) * setting up the full range we need. */ pci_free_irq_vectors(pdev); - nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues, - PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY); - if (nr_io_queues <= 0) + result = pci_alloc_irq_vectors_affinity(pdev, 1, nr_io_queues + 1, + PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); + if (result <= 0) return -EIO; - dev->max_qid = nr_io_queues; + dev->num_vecs = result; + dev->max_qid = max(result - 1, 1); /* * Should investigate if there's a performance win from allocating @@ -2201,7 +2208,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) nvme_stop_queues(&dev->ctrl); - if (!dead) { + if (!dead && dev->ctrl.queue_count > 0) { /* * If the controller is still alive tell it to stop using the * host memory buffer. In theory the shutdown / reset should |