diff options
author | Christian König <christian.koenig@amd.com> | 2018-09-16 20:13:21 +0200 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-09-26 21:09:21 -0500 |
commit | 425c31437f26436e17bdb3041a26e9864d18ba13 (patch) | |
tree | e58d951076960cd2ab75d9fe9fbf1270c1d604e3 /drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | |
parent | f54b30d70bc606f7a154edba5883c7fa23838e9f (diff) |
drm/amdgpu: cleanup amdgpu_ih.c
Cleanup amdgpu_ih.c to be able to handle multiple interrupt rings.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 152 |
1 files changed, 67 insertions, 85 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 4ed86218cef3..15fb0f9738ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -27,43 +27,19 @@ #include "amdgpu_amdkfd.h" /** - * amdgpu_ih_ring_alloc - allocate memory for the IH ring - * - * @adev: amdgpu_device pointer - * - * Allocate a ring buffer for the interrupt controller. - * Returns 0 for success, errors for failure. - */ -static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) -{ - int r; - - /* Allocate ring buffer */ - if (adev->irq.ih.ring_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - &adev->irq.ih.ring_obj, - &adev->irq.ih.gpu_addr, - (void **)&adev->irq.ih.ring); - if (r) { - DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); - return r; - } - } - return 0; -} - -/** * amdgpu_ih_ring_init - initialize the IH state * * @adev: amdgpu_device pointer + * @ih: ih ring to initialize + * @ring_size: ring size to allocate + * @use_bus_addr: true when we can use dma_alloc_coherent * * Initializes the IH state and allocates a buffer * for the IH ring buffer. * Returns 0 for success, errors for failure. */ -int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, - bool use_bus_addr) +int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + unsigned ring_size, bool use_bus_addr) { u32 rb_bufsz; int r; @@ -71,70 +47,76 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, /* Align ring size */ rb_bufsz = order_base_2(ring_size / 4); ring_size = (1 << rb_bufsz) * 4; - adev->irq.ih.ring_size = ring_size; - adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1; - adev->irq.ih.rptr = 0; - adev->irq.ih.use_bus_addr = use_bus_addr; - - if (adev->irq.ih.use_bus_addr) { - if (!adev->irq.ih.ring) { - /* add 8 bytes for the rptr/wptr shadows and - * add them to the end of the ring allocation. - */ - adev->irq.ih.ring = pci_alloc_consistent(adev->pdev, - adev->irq.ih.ring_size + 8, - &adev->irq.ih.rb_dma_addr); - if (adev->irq.ih.ring == NULL) - return -ENOMEM; - memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8); - adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0; - adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1; - } - return 0; + ih->ring_size = ring_size; + ih->ptr_mask = ih->ring_size - 1; + ih->rptr = 0; + ih->use_bus_addr = use_bus_addr; + + if (use_bus_addr) { + if (ih->ring) + return 0; + + /* add 8 bytes for the rptr/wptr shadows and + * add them to the end of the ring allocation. + */ + ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8, + &ih->rb_dma_addr, GFP_KERNEL); + if (ih->ring == NULL) + return -ENOMEM; + + memset((void *)ih->ring, 0, ih->ring_size + 8); + ih->wptr_offs = (ih->ring_size / 4) + 0; + ih->rptr_offs = (ih->ring_size / 4) + 1; } else { - r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs); + r = amdgpu_device_wb_get(adev, &ih->wptr_offs); + if (r) + return r; + + r = amdgpu_device_wb_get(adev, &ih->rptr_offs); if (r) { - dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r); + amdgpu_device_wb_free(adev, ih->wptr_offs); return r; } - r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs); + r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &ih->ring_obj, &ih->gpu_addr, + (void **)&ih->ring); if (r) { - amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs); - dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r); + amdgpu_device_wb_free(adev, ih->rptr_offs); + amdgpu_device_wb_free(adev, ih->wptr_offs); return r; } - - return amdgpu_ih_ring_alloc(adev); } + return 0; } /** * amdgpu_ih_ring_fini - tear down the IH state * * @adev: amdgpu_device pointer + * @ih: ih ring to tear down * * Tears down the IH state and frees buffer * used for the IH ring buffer. */ -void amdgpu_ih_ring_fini(struct amdgpu_device *adev) +void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - if (adev->irq.ih.use_bus_addr) { - if (adev->irq.ih.ring) { - /* add 8 bytes for the rptr/wptr shadows and - * add them to the end of the ring allocation. - */ - pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8, - (void *)adev->irq.ih.ring, - adev->irq.ih.rb_dma_addr); - adev->irq.ih.ring = NULL; - } + if (ih->use_bus_addr) { + if (!ih->ring) + return; + + /* add 8 bytes for the rptr/wptr shadows and + * add them to the end of the ring allocation. + */ + dma_free_coherent(adev->dev, ih->ring_size + 8, + (void *)ih->ring, ih->rb_dma_addr); + ih->ring = NULL; } else { - amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj, - &adev->irq.ih.gpu_addr, - (void **)&adev->irq.ih.ring); - amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs); - amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs); + amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr, + (void **)&ih->ring); + amdgpu_device_wb_free(adev, ih->wptr_offs); + amdgpu_device_wb_free(adev, ih->rptr_offs); } } @@ -142,56 +124,56 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev) * amdgpu_ih_process - interrupt handler * * @adev: amdgpu_device pointer + * @ih: ih ring to process * * Interrupt hander (VI), walk the IH ring. * Returns irq process return code. */ -int amdgpu_ih_process(struct amdgpu_device *adev) +int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { struct amdgpu_iv_entry entry; u32 wptr; - if (!adev->irq.ih.enabled || adev->shutdown) + if (!ih->enabled || adev->shutdown) return IRQ_NONE; wptr = amdgpu_ih_get_wptr(adev); restart_ih: /* is somebody else already processing irqs? */ - if (atomic_xchg(&adev->irq.ih.lock, 1)) + if (atomic_xchg(&ih->lock, 1)) return IRQ_NONE; - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr); + DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); - while (adev->irq.ih.rptr != wptr) { - u32 ring_index = adev->irq.ih.rptr >> 2; + while (ih->rptr != wptr) { + u32 ring_index = ih->rptr >> 2; /* Prescreening of high-frequency interrupts */ if (!amdgpu_ih_prescreen_iv(adev)) { - adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; + ih->rptr &= ih->ptr_mask; continue; } /* Before dispatching irq to IP blocks, send it to amdkfd */ amdgpu_amdkfd_interrupt(adev, - (const void *) &adev->irq.ih.ring[ring_index]); + (const void *) &ih->ring[ring_index]); - entry.iv_entry = (const uint32_t *) - &adev->irq.ih.ring[ring_index]; + entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; amdgpu_ih_decode_iv(adev, &entry); - adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; + ih->rptr &= ih->ptr_mask; amdgpu_irq_dispatch(adev, &entry); } amdgpu_ih_set_rptr(adev); - atomic_set(&adev->irq.ih.lock, 0); + atomic_set(&ih->lock, 0); /* make sure wptr hasn't changed while processing */ wptr = amdgpu_ih_get_wptr(adev); - if (wptr != adev->irq.ih.rptr) + if (wptr != ih->rptr) goto restart_ih; return IRQ_HANDLED; |