summaryrefslogtreecommitdiff
path: root/drivers/cxl
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cxl')
-rw-r--r--drivers/cxl/core/core.h5
-rw-r--r--drivers/cxl/core/memdev.c13
-rw-r--r--drivers/cxl/core/region.c124
3 files changed, 141 insertions, 1 deletions
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index e888e293943e..deb5f87d6d0a 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -25,7 +25,12 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
#define CXL_DAX_REGION_TYPE(x) (&cxl_dax_region_type)
int cxl_region_init(void);
void cxl_region_exit(void);
+int cxl_get_poison_by_endpoint(struct cxl_port *port);
#else
+static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
+{
+ return 0;
+}
static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
{
}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 8be01479d40b..185b6d27b698 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -136,13 +136,24 @@ static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
{
+ struct cxl_port *port;
int rc;
+ port = dev_get_drvdata(&cxlmd->dev);
+ if (!port || !is_cxl_endpoint(port))
+ return -EINVAL;
+
rc = down_read_interruptible(&cxl_dpa_rwsem);
if (rc)
return rc;
- rc = cxl_get_poison_by_memdev(cxlmd);
+ if (port->commit_end == -1) {
+ /* No regions mapped to this memdev */
+ rc = cxl_get_poison_by_memdev(cxlmd);
+ } else {
+ /* Regions mapped, collect poison by endpoint */
+ rc = cxl_get_poison_by_endpoint(port);
+ }
up_read(&cxl_dpa_rwsem);
return rc;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index b2fd67fcebfb..f822de44bee0 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2238,6 +2238,130 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
+struct cxl_poison_context {
+ struct cxl_port *port;
+ enum cxl_decoder_mode mode;
+ u64 offset;
+};
+
+static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
+ struct cxl_poison_context *ctx)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ u64 offset, length;
+ int rc = 0;
+
+ /*
+ * Collect poison for the remaining unmapped resources
+ * after poison is collected by committed endpoints.
+ *
+ * Knowing that PMEM must always follow RAM, get poison
+ * for unmapped resources based on the last decoder's mode:
+ * ram: scan remains of ram range, then any pmem range
+ * pmem: scan remains of pmem range
+ */
+
+ if (ctx->mode == CXL_DECODER_RAM) {
+ offset = ctx->offset;
+ length = resource_size(&cxlds->ram_res) - offset;
+ rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ if (rc == -EFAULT)
+ rc = 0;
+ if (rc)
+ return rc;
+ }
+ if (ctx->mode == CXL_DECODER_PMEM) {
+ offset = ctx->offset;
+ length = resource_size(&cxlds->dpa_res) - offset;
+ if (!length)
+ return 0;
+ } else if (resource_size(&cxlds->pmem_res)) {
+ offset = cxlds->pmem_res.start;
+ length = resource_size(&cxlds->pmem_res);
+ } else {
+ return 0;
+ }
+
+ return cxl_mem_get_poison(cxlmd, offset, length, NULL);
+}
+
+static int poison_by_decoder(struct device *dev, void *arg)
+{
+ struct cxl_poison_context *ctx = arg;
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_memdev *cxlmd;
+ u64 offset, length;
+ int rc = 0;
+
+ if (!is_endpoint_decoder(dev))
+ return rc;
+
+ cxled = to_cxl_endpoint_decoder(dev);
+ if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
+ return rc;
+
+ /*
+ * Regions are only created with single mode decoders: pmem or ram.
+ * Linux does not support mixed mode decoders. This means that
+ * reading poison per endpoint decoder adheres to the requirement
+ * that poison reads of pmem and ram must be separated.
+ * CXL 3.0 Spec 8.2.9.8.4.1
+ */
+ if (cxled->mode == CXL_DECODER_MIXED) {
+ dev_dbg(dev, "poison list read unsupported in mixed mode\n");
+ return rc;
+ }
+
+ cxlmd = cxled_to_memdev(cxled);
+ if (cxled->skip) {
+ offset = cxled->dpa_res->start - cxled->skip;
+ length = cxled->skip;
+ rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ rc = 0;
+ if (rc)
+ return rc;
+ }
+
+ offset = cxled->dpa_res->start;
+ length = cxled->dpa_res->end - offset + 1;
+ rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
+ if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ rc = 0;
+ if (rc)
+ return rc;
+
+ /* Iterate until commit_end is reached */
+ if (cxled->cxld.id == ctx->port->commit_end) {
+ ctx->offset = cxled->dpa_res->end + 1;
+ ctx->mode = cxled->mode;
+ return 1;
+ }
+
+ return 0;
+}
+
+int cxl_get_poison_by_endpoint(struct cxl_port *port)
+{
+ struct cxl_poison_context ctx;
+ int rc = 0;
+
+ rc = down_read_interruptible(&cxl_region_rwsem);
+ if (rc)
+ return rc;
+
+ ctx = (struct cxl_poison_context) {
+ .port = port
+ };
+
+ rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
+ if (rc == 1)
+ rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport), &ctx);
+
+ up_read(&cxl_region_rwsem);
+ return rc;
+}
+
static struct lock_class_key cxl_pmem_region_key;
static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)