diff options
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/cxl/api.c | 28 | ||||
-rw-r--r-- | drivers/misc/cxl/context.c | 3 | ||||
-rw-r--r-- | drivers/misc/cxl/cxl.h | 15 | ||||
-rw-r--r-- | drivers/misc/cxl/fault.c | 10 | ||||
-rw-r--r-- | drivers/misc/cxl/guest.c | 78 | ||||
-rw-r--r-- | drivers/misc/cxl/native.c | 29 | ||||
-rw-r--r-- | drivers/misc/cxl/pci.c | 64 | ||||
-rw-r--r-- | drivers/misc/cxl/sysfs.c | 10 |
8 files changed, 170 insertions, 67 deletions
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 2107c948406d..6d228ccd884d 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -68,15 +68,6 @@ struct cxl_context *cxl_get_context(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(cxl_get_context); -struct device *cxl_get_phys_dev(struct pci_dev *dev) -{ - struct cxl_afu *afu; - - afu = cxl_pci_to_afu(dev); - - return afu->adapter->dev.parent; -} - int cxl_release_context(struct cxl_context *ctx) { if (ctx->status >= STARTED) @@ -192,6 +183,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, ctx->pid = get_task_pid(task, PIDTYPE_PID); ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID); kernel = false; + ctx->real_mode = false; } cxl_ctx_get(); @@ -228,6 +220,24 @@ void cxl_set_master(struct cxl_context *ctx) } EXPORT_SYMBOL_GPL(cxl_set_master); +int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode) +{ + if (ctx->status == STARTED) { + /* + * We could potentially update the PE and issue an update LLCMD + * to support this, but it doesn't seem to have a good use case + * since it's trivial to just create a second kernel context + * with different translation modes, so until someone convinces + * me otherwise: + */ + return -EBUSY; + } + + ctx->real_mode = real_mode; + return 0; +} +EXPORT_SYMBOL_GPL(cxl_set_translation_mode); + /* wrappers around afu_* file ops which are EXPORTED */ int cxl_fd_open(struct inode *inode, struct file *file) { diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 7edea9c19199..26d206b1d08c 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -297,8 +297,7 @@ static void reclaim_ctx(struct rcu_head *rcu) if (ctx->kernelapi) kfree(ctx->mapping); - if (ctx->irq_bitmap) - kfree(ctx->irq_bitmap); + kfree(ctx->irq_bitmap); /* Drop ref to the afu device taken during cxl_context_init */ cxl_afu_put(ctx->afu); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 73dc2a33da74..4fe50788ff45 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -178,15 +178,6 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; #define CXL_PSL_SR_An_MP (1ull << (63-62)) /* Master Process */ #define CXL_PSL_SR_An_LE (1ull << (63-63)) /* Little Endian */ -/****** CXL_PSL_LLCMD_An ****************************************************/ -#define CXL_LLCMD_TERMINATE 0x0001000000000000ULL -#define CXL_LLCMD_REMOVE 0x0002000000000000ULL -#define CXL_LLCMD_SUSPEND 0x0003000000000000ULL -#define CXL_LLCMD_RESUME 0x0004000000000000ULL -#define CXL_LLCMD_ADD 0x0005000000000000ULL -#define CXL_LLCMD_UPDATE 0x0006000000000000ULL -#define CXL_LLCMD_HANDLE_MASK 0x000000000000ffffULL - /****** CXL_PSL_ID_An ****************************************************/ #define CXL_PSL_ID_An_F (1ull << (63-31)) #define CXL_PSL_ID_An_L (1ull << (63-30)) @@ -376,11 +367,13 @@ struct cxl_afu_native { }; struct cxl_afu_guest { + struct cxl_afu *parent; u64 handle; phys_addr_t p2n_phys; u64 p2n_size; int max_ints; - struct mutex recovery_lock; + bool handle_err; + struct delayed_work work_err; int previous_state; }; @@ -524,6 +517,7 @@ struct cxl_context { bool pe_inserted; bool master; bool kernel; + bool real_mode; bool pending_irq; bool pending_fault; bool pending_afu_err; @@ -580,6 +574,7 @@ struct cxl { bool perst_loads_image; bool perst_select_user; bool perst_same_image; + bool psl_timebase_synced; }; int cxl_pci_alloc_one_irq(struct cxl *adapter); diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 9a8650bcb042..377e650a2a1d 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -149,11 +149,13 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, * update_mmu_cache() will not have loaded the hash since current->trap * is not a 0x400 or 0x300, so just call hash_page_mm() here. */ - access = _PAGE_PRESENT; + access = _PAGE_PRESENT | _PAGE_READ; if (dsisr & CXL_PSL_DSISR_An_S) - access |= _PAGE_RW; - if ((!ctx->kernel) || ~(dar & (1ULL << 63))) - access |= _PAGE_USER; + access |= _PAGE_WRITE; + + access |= _PAGE_PRIVILEGED; + if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) + access &= ~_PAGE_PRIVILEGED; if (dsisr & DSISR_NOHPTE) inv_flags |= HPTE_NOHPTE_UPDATE; diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 8213372de2b7..bc8d0b9870eb 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -178,6 +178,9 @@ static int afu_read_error_state(struct cxl_afu *afu, int *state_out) u64 state; int rc = 0; + if (!afu) + return -EIO; + rc = cxl_h_read_error_state(afu->guest->handle, &state); if (!rc) { WARN_ON(state != H_STATE_NORMAL && @@ -552,6 +555,17 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) elem->common.sstp0 = cpu_to_be64(ctx->sstp0); elem->common.sstp1 = cpu_to_be64(ctx->sstp1); + + /* + * Ensure we have at least one interrupt allocated to take faults for + * kernel contexts that may not have allocated any AFU IRQs at all: + */ + if (ctx->irqs.range[0] == 0) { + rc = afu_register_irqs(ctx, 0); + if (rc) + goto out_free; + } + for (r = 0; r < CXL_IRQ_RANGES; r++) { for (i = 0; i < ctx->irqs.range[r]; i++) { if (r == 0 && i == 0) { @@ -597,6 +611,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) enable_afu_irqs(ctx); } +out_free: free_page((u64)elem); return rc; } @@ -605,6 +620,9 @@ static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u { pr_devel("in %s\n", __func__); + if (ctx->real_mode) + return -EPERM; + ctx->kernel = kernel; if (ctx->afu->current_mode == CXL_MODE_DIRECTED) return attach_afu_directed(ctx, wed, amr); @@ -818,7 +836,6 @@ static int afu_update_state(struct cxl_afu *afu) switch (cur_state) { case H_STATE_NORMAL: afu->guest->previous_state = cur_state; - rc = 1; break; case H_STATE_DISABLE: @@ -834,7 +851,6 @@ static int afu_update_state(struct cxl_afu *afu) pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, pci_channel_io_normal); pci_error_handlers(afu, CXL_RESUME_EVENT, 0); - rc = 1; } afu->guest->previous_state = 0; break; @@ -859,39 +875,30 @@ static int afu_update_state(struct cxl_afu *afu) return rc; } -static int afu_do_recovery(struct cxl_afu *afu) +static void afu_handle_errstate(struct work_struct *work) { - int rc; + struct cxl_afu_guest *afu_guest = + container_of(to_delayed_work(work), struct cxl_afu_guest, work_err); - /* many threads can arrive here, in case of detach_all for example. - * Only one needs to drive the recovery - */ - if (mutex_trylock(&afu->guest->recovery_lock)) { - rc = afu_update_state(afu); - mutex_unlock(&afu->guest->recovery_lock); - return rc; - } - return 0; + if (!afu_update_state(afu_guest->parent) && + afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE) + return; + + if (afu_guest->handle_err == true) + schedule_delayed_work(&afu_guest->work_err, + msecs_to_jiffies(3000)); } static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) { int state; - if (afu) { - if (afu_read_error_state(afu, &state) || - state != H_STATE_NORMAL) { - if (afu_do_recovery(afu) > 0) { - /* check again in case we've just fixed it */ - if (!afu_read_error_state(afu, &state) && - state == H_STATE_NORMAL) - return true; - } - return false; - } + if (afu && (!afu_read_error_state(afu, &state))) { + if (state == H_STATE_NORMAL) + return true; } - return true; + return false; } static int afu_properties_look_ok(struct cxl_afu *afu) @@ -929,8 +936,6 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n return -ENOMEM; } - mutex_init(&afu->guest->recovery_lock); - if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice))) @@ -986,6 +991,15 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n afu->enabled = true; + /* + * wake up the cpu periodically to check the state + * of the AFU using "afu" stored in the guest structure. + */ + afu->guest->parent = afu; + afu->guest->handle_err = true; + INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate); + schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000)); + if ((rc = cxl_pci_vphb_add(afu))) dev_info(&afu->dev, "Can't register vPHB\n"); @@ -1014,6 +1028,10 @@ void cxl_guest_remove_afu(struct cxl_afu *afu) if (!afu) return; + /* flush and stop pending job */ + afu->guest->handle_err = false; + flush_delayed_work(&afu->guest->work_err); + cxl_pci_vphb_remove(afu); cxl_sysfs_afu_remove(afu); @@ -1101,6 +1119,12 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic adapter->dev.release = release_adapter; dev_set_drvdata(&pdev->dev, adapter); + /* + * Hypervisor controls PSL timebase initialization (p1 register). + * On FW840, PSL is initialized. + */ + adapter->psl_timebase_synced = true; + if ((rc = cxl_of_read_adapter_handle(adapter, np))) goto err1; diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index ecf7557cd657..55d8a1459f28 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -186,16 +186,25 @@ static int spa_max_procs(int spa_size) int cxl_alloc_spa(struct cxl_afu *afu) { + unsigned spa_size; + /* Work out how many pages to allocate */ afu->native->spa_order = 0; do { afu->native->spa_order++; - afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; + spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; + + if (spa_size > 0x100000) { + dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", + afu->native->spa_max_procs, afu->native->spa_size); + afu->num_procs = afu->native->spa_max_procs; + break; + } + + afu->native->spa_size = spa_size; afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); } while (afu->native->spa_max_procs < afu->num_procs); - WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */ - if (!(afu->native->spa = (struct cxl_process_element *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); @@ -486,8 +495,9 @@ static u64 calculate_sr(struct cxl_context *ctx) if (mfspr(SPRN_LPCR) & LPCR_TC) sr |= CXL_PSL_SR_An_TC; if (ctx->kernel) { - sr |= CXL_PSL_SR_An_R | (mfmsr() & MSR_SF); - sr |= CXL_PSL_SR_An_HV; + if (!ctx->real_mode) + sr |= CXL_PSL_SR_An_R; + sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; } else { sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; sr &= ~(CXL_PSL_SR_An_HV); @@ -526,6 +536,15 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0); ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1); + /* + * Ensure we have the multiplexed PSL interrupt set up to take faults + * for kernel contexts that may not have allocated any AFU IRQs at all: + */ + if (ctx->irqs.range[0] == 0) { + ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; + ctx->irqs.range[0] = 1; + } + for (r = 0; r < CXL_IRQ_RANGES; r++) { ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 2844e975bf79..a08fcc888a71 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -21,6 +21,7 @@ #include <asm/msi_bitmap.h> #include <asm/pnv-pci.h> #include <asm/io.h> +#include <asm/reg.h> #include "cxl.h" #include <misc/cxl.h> @@ -321,12 +322,43 @@ static void dump_afu_descriptor(struct cxl_afu *afu) #undef show_reg } +#define CAPP_UNIT0_ID 0xBA +#define CAPP_UNIT1_ID 0XBE + +static u64 get_capp_unit_id(struct device_node *np) +{ + u32 phb_index; + + /* + * For chips other than POWER8NVL, we only have CAPP 0, + * irrespective of which PHB is used. + */ + if (!pvr_version_is(PVR_POWER8NVL)) + return CAPP_UNIT0_ID; + + /* + * For POWER8NVL, assume CAPP 0 is attached to PHB0 and + * CAPP 1 is attached to PHB1. + */ + if (of_property_read_u32(np, "ibm,phb-index", &phb_index)) + return 0; + + if (phb_index == 0) + return CAPP_UNIT0_ID; + + if (phb_index == 1) + return CAPP_UNIT1_ID; + + return 0; +} + static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev) { struct device_node *np; const __be32 *prop; u64 psl_dsnctl; u64 chipid; + u64 capp_unit_id; if (!(np = pnv_pci_get_phb_node(dev))) return -ENODEV; @@ -336,10 +368,19 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev if (!np) return -ENODEV; chipid = be32_to_cpup(prop); + capp_unit_id = get_capp_unit_id(np); of_node_put(np); + if (!capp_unit_id) { + pr_err("cxl: invalid capp unit id\n"); + return -ENODEV; + } + psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */ + psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ /* Tell PSL where to route data to */ - psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5)); + psl_dsnctl |= (chipid << (63-5)); + psl_dsnctl |= (capp_unit_id << (63-13)); + cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl); cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); /* snoop write mask */ @@ -355,22 +396,24 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) #define _2048_250MHZ_CYCLES 1 -static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) +static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) { u64 psl_tb; int delta; unsigned int retry = 0; struct device_node *np; + adapter->psl_timebase_synced = false; + if (!(np = pnv_pci_get_phb_node(dev))) - return -ENODEV; + return; /* Do not fail when CAPP timebase sync is not supported by OPAL */ of_node_get(np); if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) { of_node_put(np); - pr_err("PSL: Timebase sync: OPAL support missing\n"); - return 0; + dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n"); + return; } of_node_put(np); @@ -389,8 +432,8 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) do { msleep(1); if (retry++ > 5) { - pr_err("PSL: Timebase sync: giving up!\n"); - return -EIO; + dev_info(&dev->dev, "PSL timebase can't synchronize\n"); + return; } psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase); delta = mftb() - psl_tb; @@ -398,7 +441,8 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) delta = -delta; } while (tb_to_ns(delta) > 16000); - return 0; + adapter->psl_timebase_synced = true; + return; } static int init_implementation_afu_regs(struct cxl_afu *afu) @@ -1144,8 +1188,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) goto err; - if ((rc = cxl_setup_psl_timebase(adapter, dev))) - goto err; + /* Ignore error, adapter init is not dependant on timebase sync */ + cxl_setup_psl_timebase(adapter, dev); if ((rc = cxl_native_register_psl_err_irq(adapter))) goto err; diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 25913c08794c..b043c20f158f 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -57,6 +57,15 @@ static ssize_t image_loaded_show(struct device *device, return scnprintf(buf, PAGE_SIZE, "factory\n"); } +static ssize_t psl_timebase_synced_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct cxl *adapter = to_cxl_adapter(device); + + return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); +} + static ssize_t reset_adapter_store(struct device *device, struct device_attribute *attr, const char *buf, size_t count) @@ -142,6 +151,7 @@ static struct device_attribute adapter_attrs[] = { __ATTR_RO(psl_revision), __ATTR_RO(base_image), __ATTR_RO(image_loaded), + __ATTR_RO(psl_timebase_synced), __ATTR_RW(load_image_on_perst), __ATTR_RW(perst_reloads_same_image), __ATTR(reset, S_IWUSR, NULL, reset_adapter_store), |