diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2016-03-29 18:28:50 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-03-29 23:53:37 -0400 |
commit | d766e6a393383c60a55bdcc72586f21a1ff12509 (patch) | |
tree | 2850bdf21e89a7bd48c8d08cd7ff6463ad955214 /drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |
parent | 832be4041d4999e008839d12d1efe118da27bd99 (diff) |
drm/amdgpu: switch ih handling to two levels (v3)
Newer asics have a two levels of irq ids now:
client id - the IP
src id - the interrupt src within the IP
v2: integrated Christian's comments.
v3: fix rebase fail in SI and CIK
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Ken Wang <Qingqing.Wang@amd.com>
Reviewed-by: Ken Wang <Qingqing.Wang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 111 |
1 files changed, 76 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index e63ece049b05..7e7acd47ec78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -89,23 +89,28 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) static void amdgpu_irq_disable_all(struct amdgpu_device *adev) { unsigned long irqflags; - unsigned i, j; + unsigned i, j, k; int r; spin_lock_irqsave(&adev->irq.lock, irqflags); - for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { - struct amdgpu_irq_src *src = adev->irq.sources[i]; - - if (!src || !src->funcs->set || !src->num_types) + for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { + if (!adev->irq.client[i].sources) continue; - for (j = 0; j < src->num_types; ++j) { - atomic_set(&src->enabled_types[j], 0); - r = src->funcs->set(adev, src, j, - AMDGPU_IRQ_STATE_DISABLE); - if (r) - DRM_ERROR("error disabling interrupt (%d)\n", - r); + for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { + struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; + + if (!src || !src->funcs->set || !src->num_types) + continue; + + for (k = 0; k < src->num_types; ++k) { + atomic_set(&src->enabled_types[k], 0); + r = src->funcs->set(adev, src, k, + AMDGPU_IRQ_STATE_DISABLE); + if (r) + DRM_ERROR("error disabling interrupt (%d)\n", + r); + } } } spin_unlock_irqrestore(&adev->irq.lock, irqflags); @@ -254,7 +259,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) */ void amdgpu_irq_fini(struct amdgpu_device *adev) { - unsigned i; + unsigned i, j; drm_vblank_cleanup(adev->ddev); if (adev->irq.installed) { @@ -266,19 +271,25 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) cancel_work_sync(&adev->reset_work); } - for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { - struct amdgpu_irq_src *src = adev->irq.sources[i]; - - if (!src) + for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { + if (!adev->irq.client[i].sources) continue; - kfree(src->enabled_types); - src->enabled_types = NULL; - if (src->data) { - kfree(src->data); - kfree(src); - adev->irq.sources[i] = NULL; + for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { + struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; + + if (!src) + continue; + + kfree(src->enabled_types); + src->enabled_types = NULL; + if (src->data) { + kfree(src->data); + kfree(src); + adev->irq.client[i].sources[j] = NULL; + } } + kfree(adev->irq.client[i].sources); } } @@ -290,18 +301,30 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) * @source: irq source * */ -int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, +int amdgpu_irq_add_id(struct amdgpu_device *adev, + unsigned client_id, unsigned src_id, struct amdgpu_irq_src *source) { - if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) + if (client_id >= AMDGPU_IH_CLIENTID_MAX) return -EINVAL; - if (adev->irq.sources[src_id] != NULL) + if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) return -EINVAL; if (!source->funcs) return -EINVAL; + if (!adev->irq.client[client_id].sources) { + adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID, + sizeof(struct amdgpu_irq_src), + GFP_KERNEL); + if (!adev->irq.client[client_id].sources) + return -ENOMEM; + } + + if (adev->irq.client[client_id].sources[src_id] != NULL) + return -EINVAL; + if (source->num_types && !source->enabled_types) { atomic_t *types; @@ -313,8 +336,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, source->enabled_types = types; } - adev->irq.sources[src_id] = source; - + adev->irq.client[client_id].sources[src_id] = source; return 0; } @@ -329,10 +351,16 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id, void amdgpu_irq_dispatch(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { + unsigned client_id = entry->client_id; unsigned src_id = entry->src_id; struct amdgpu_irq_src *src; int r; + if (client_id >= AMDGPU_IH_CLIENTID_MAX) { + DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); + return; + } + if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); return; @@ -341,7 +369,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, if (adev->irq.virq[src_id]) { generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); } else { - src = adev->irq.sources[src_id]; + if (!adev->irq.client[client_id].sources) { + DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", + client_id, src_id); + return; + } + + src = adev->irq.client[client_id].sources[src_id]; if (!src) { DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); return; @@ -385,13 +419,20 @@ int amdgpu_irq_update(struct amdgpu_device *adev, void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) { - int i, j; - for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) { - struct amdgpu_irq_src *src = adev->irq.sources[i]; - if (!src) + int i, j, k; + + for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { + if (!adev->irq.client[i].sources) continue; - for (j = 0; j < src->num_types; j++) - amdgpu_irq_update(adev, src, j); + + for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { + struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; + + if (!src) + continue; + for (k = 0; k < src->num_types; k++) + amdgpu_irq_update(adev, src, k); + } } } |