diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2012-03-20 17:18:22 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-03-21 06:55:54 +0000 |
commit | 25a857fbe973bdcc7df0df2e0c8f9c6e1ab0e475 (patch) | |
tree | 48299c76543e234ef10ad746dc9d4e0904f472ad /drivers/gpu/drm/radeon | |
parent | 347e7592beb0abd56a11ec16ca8aba9f60681f13 (diff) |
drm/radeon/kms: add support for interrupts on SI
This is mostly identical to evergreen/ni, however
there are some additional fields in the IV vector
for RINGID and VMID.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 721 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/sid.h | 158 |
3 files changed, 881 insertions, 2 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 5eb23829353f..924b68718b82 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2778,7 +2778,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->ih.rptr = 0; } -static int r600_ih_ring_alloc(struct radeon_device *rdev) +int r600_ih_ring_alloc(struct radeon_device *rdev) { int r; @@ -2814,7 +2814,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev) return 0; } -static void r600_ih_ring_fini(struct radeon_device *rdev) +void r600_ih_ring_fini(struct radeon_device *rdev) { int r; if (rdev->ih.ring_obj) { diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 4252cd0ab64b..6aecbf54cd5d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -55,6 +55,8 @@ MODULE_FIRMWARE("radeon/VERDE_ce.bin"); MODULE_FIRMWARE("radeon/VERDE_mc.bin"); MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); +extern int r600_ih_ring_alloc(struct radeon_device *rdev); +extern void r600_ih_ring_fini(struct radeon_device *rdev); extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); @@ -3072,3 +3074,722 @@ static int si_rlc_resume(struct radeon_device *rdev) return 0; } +static void si_enable_interrupts(struct radeon_device *rdev) +{ + u32 ih_cntl = RREG32(IH_CNTL); + u32 ih_rb_cntl = RREG32(IH_RB_CNTL); + + ih_cntl |= ENABLE_INTR; + ih_rb_cntl |= IH_RB_ENABLE; + WREG32(IH_CNTL, ih_cntl); + WREG32(IH_RB_CNTL, ih_rb_cntl); + rdev->ih.enabled = true; +} + +static void si_disable_interrupts(struct radeon_device *rdev) +{ + u32 ih_rb_cntl = RREG32(IH_RB_CNTL); + u32 ih_cntl = RREG32(IH_CNTL); + + ih_rb_cntl &= ~IH_RB_ENABLE; + ih_cntl &= ~ENABLE_INTR; + WREG32(IH_RB_CNTL, ih_rb_cntl); + WREG32(IH_CNTL, ih_cntl); + /* set rptr, wptr to 0 */ + WREG32(IH_RB_RPTR, 0); + WREG32(IH_RB_WPTR, 0); + rdev->ih.enabled = false; + rdev->ih.wptr = 0; + rdev->ih.rptr = 0; +} + +static void si_disable_interrupt_state(struct radeon_device *rdev) +{ + u32 tmp; + + WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + WREG32(CP_INT_CNTL_RING1, 0); + WREG32(CP_INT_CNTL_RING2, 0); + WREG32(GRBM_INT_CNTL, 0); + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + if (rdev->num_crtc >= 4) { + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } + + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } + + WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + + tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + +} + +static int si_irq_init(struct radeon_device *rdev) +{ + int ret = 0; + int rb_bufsz; + u32 interrupt_cntl, ih_cntl, ih_rb_cntl; + + /* allocate ring */ + ret = r600_ih_ring_alloc(rdev); + if (ret) + return ret; + + /* disable irqs */ + si_disable_interrupts(rdev); + + /* init rlc */ + ret = si_rlc_resume(rdev); + if (ret) { + r600_ih_ring_fini(rdev); + return ret; + } + + /* setup interrupt control */ + /* set dummy read address to ring address */ + WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + interrupt_cntl = RREG32(INTERRUPT_CNTL); + /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi + * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; + /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; + WREG32(INTERRUPT_CNTL, interrupt_cntl); + + WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); + rb_bufsz = drm_order(rdev->ih.ring_size / 4); + + ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | + IH_WPTR_OVERFLOW_CLEAR | + (rb_bufsz << 1)); + + if (rdev->wb.enabled) + ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; + + /* set the writeback address whether it's enabled or not */ + WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); + WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); + + WREG32(IH_RB_CNTL, ih_rb_cntl); + + /* set rptr, wptr to 0 */ + WREG32(IH_RB_RPTR, 0); + WREG32(IH_RB_WPTR, 0); + + /* Default settings for IH_CNTL (disabled at first) */ + ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0); + /* RPTR_REARM only works if msi's are enabled */ + if (rdev->msi_enabled) + ih_cntl |= RPTR_REARM; + WREG32(IH_CNTL, ih_cntl); + + /* force the active interrupt state to all disabled */ + si_disable_interrupt_state(rdev); + + /* enable irqs */ + si_enable_interrupts(rdev); + + return ret; +} + +int si_irq_set(struct radeon_device *rdev) +{ + u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; + u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; + u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; + u32 grbm_int_cntl = 0; + u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; + + if (!rdev->irq.installed) { + WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); + return -EINVAL; + } + /* don't enable anything if the ih is disabled */ + if (!rdev->ih.enabled) { + si_disable_interrupts(rdev); + /* force the active interrupt state to all disabled */ + si_disable_interrupt_state(rdev); + return 0; + } + + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + + /* enable CP interrupts on all rings */ + if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + DRM_DEBUG("si_irq_set: sw int gfx\n"); + cp_int_cntl |= TIME_STAMP_INT_ENABLE; + } + if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) { + DRM_DEBUG("si_irq_set: sw int cp1\n"); + cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; + } + if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) { + DRM_DEBUG("si_irq_set: sw int cp2\n"); + cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; + } + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { + DRM_DEBUG("si_irq_set: vblank 0\n"); + crtc1 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { + DRM_DEBUG("si_irq_set: vblank 1\n"); + crtc2 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[2] || + rdev->irq.pflip[2]) { + DRM_DEBUG("si_irq_set: vblank 2\n"); + crtc3 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[3] || + rdev->irq.pflip[3]) { + DRM_DEBUG("si_irq_set: vblank 3\n"); + crtc4 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[4] || + rdev->irq.pflip[4]) { + DRM_DEBUG("si_irq_set: vblank 4\n"); + crtc5 |= VBLANK_INT_MASK; + } + if (rdev->irq.crtc_vblank_int[5] || + rdev->irq.pflip[5]) { + DRM_DEBUG("si_irq_set: vblank 5\n"); + crtc6 |= VBLANK_INT_MASK; + } + if (rdev->irq.hpd[0]) { + DRM_DEBUG("si_irq_set: hpd 1\n"); + hpd1 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[1]) { + DRM_DEBUG("si_irq_set: hpd 2\n"); + hpd2 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[2]) { + DRM_DEBUG("si_irq_set: hpd 3\n"); + hpd3 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[3]) { + DRM_DEBUG("si_irq_set: hpd 4\n"); + hpd4 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[4]) { + DRM_DEBUG("si_irq_set: hpd 5\n"); + hpd5 |= DC_HPDx_INT_EN; + } + if (rdev->irq.hpd[5]) { + DRM_DEBUG("si_irq_set: hpd 6\n"); + hpd6 |= DC_HPDx_INT_EN; + } + if (rdev->irq.gui_idle) { + DRM_DEBUG("gui idle\n"); + grbm_int_cntl |= GUI_IDLE_INT_ENABLE; + } + + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); + WREG32(CP_INT_CNTL_RING2, cp_int_cntl2); + + WREG32(GRBM_INT_CNTL, grbm_int_cntl); + + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); + if (rdev->num_crtc >= 4) { + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); + } + if (rdev->num_crtc >= 6) { + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); + } + + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + } + + WREG32(DC_HPD1_INT_CONTROL, hpd1); + WREG32(DC_HPD2_INT_CONTROL, hpd2); + WREG32(DC_HPD3_INT_CONTROL, hpd3); + WREG32(DC_HPD4_INT_CONTROL, hpd4); + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + + return 0; +} + +static inline void si_irq_ack(struct radeon_device *rdev) +{ + u32 tmp; + + rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); + rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); + rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); + rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); + rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); + rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); + rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); + if (rdev->num_crtc >= 4) { + rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); + } + if (rdev->num_crtc >= 6) { + rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); + } + + if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); + + if (rdev->num_crtc >= 4) { + if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); + } + + if (rdev->num_crtc >= 6) { + if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) + WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) + WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); + } + + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } +} + +static void si_irq_disable(struct radeon_device *rdev) +{ + si_disable_interrupts(rdev); + /* Wait and acknowledge irq */ + mdelay(1); + si_irq_ack(rdev); + si_disable_interrupt_state(rdev); +} + +static void si_irq_suspend(struct radeon_device *rdev) +{ + si_irq_disable(rdev); + si_rlc_stop(rdev); +} + +static inline u32 si_get_ih_wptr(struct radeon_device *rdev) +{ + u32 wptr, tmp; + + if (rdev->wb.enabled) + wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); + else + wptr = RREG32(IH_RB_WPTR); + + if (wptr & RB_OVERFLOW) { + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 16). Hopefully + * this should allow us to catchup. + */ + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", + wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; + tmp = RREG32(IH_RB_CNTL); + tmp |= IH_WPTR_OVERFLOW_CLEAR; + WREG32(IH_RB_CNTL, tmp); + } + return (wptr & rdev->ih.ptr_mask); +} + +/* SI IV Ring + * Each IV ring entry is 128 bits: + * [7:0] - interrupt source id + * [31:8] - reserved + * [59:32] - interrupt source data + * [63:60] - reserved + * [71:64] - RINGID + * [79:72] - VMID + * [127:80] - reserved + */ +int si_irq_process(struct radeon_device *rdev) +{ + u32 wptr; + u32 rptr; + u32 src_id, src_data, ring_id; + u32 ring_index; + unsigned long flags; + bool queue_hotplug = false; + + if (!rdev->ih.enabled || rdev->shutdown) + return IRQ_NONE; + + wptr = si_get_ih_wptr(rdev); + rptr = rdev->ih.rptr; + DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr); + + spin_lock_irqsave(&rdev->ih.lock, flags); + if (rptr == wptr) { + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_NONE; + } +restart_ih: + /* Order reading of wptr vs. reading of IH ring data */ + rmb(); + + /* display interrupts */ + si_irq_ack(rdev); + + rdev->ih.wptr = wptr; + while (rptr != wptr) { + /* wptr/rptr are in bytes! */ + ring_index = rptr / 4; + src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; + src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; + ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; + + switch (src_id) { + case 1: /* D1 vblank/vline */ + switch (src_data) { + case 0: /* D1 vblank */ + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D1 vblank\n"); + } + break; + case 1: /* D1 vline */ + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; + DRM_DEBUG("IH: D1 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 2: /* D2 vblank/vline */ + switch (src_data) { + case 0: /* D2 vblank */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D2 vblank\n"); + } + break; + case 1: /* D2 vline */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + DRM_DEBUG("IH: D2 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 3: /* D3 vblank/vline */ + switch (src_data) { + case 0: /* D3 vblank */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[2]) { + drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[2]) + radeon_crtc_handle_flip(rdev, 2); + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D3 vblank\n"); + } + break; + case 1: /* D3 vline */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + DRM_DEBUG("IH: D3 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 4: /* D4 vblank/vline */ + switch (src_data) { + case 0: /* D4 vblank */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[3]) { + drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[3]) + radeon_crtc_handle_flip(rdev, 3); + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D4 vblank\n"); + } + break; + case 1: /* D4 vline */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + DRM_DEBUG("IH: D4 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 5: /* D5 vblank/vline */ + switch (src_data) { + case 0: /* D5 vblank */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[4]) { + drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[4]) + radeon_crtc_handle_flip(rdev, 4); + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D5 vblank\n"); + } + break; + case 1: /* D5 vline */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + DRM_DEBUG("IH: D5 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 6: /* D6 vblank/vline */ + switch (src_data) { + case 0: /* D6 vblank */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[5]) { + drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[5]) + radeon_crtc_handle_flip(rdev, 5); + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + DRM_DEBUG("IH: D6 vblank\n"); + } + break; + case 1: /* D6 vline */ + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + DRM_DEBUG("IH: D6 vline\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 42: /* HPD hotplug */ + switch (src_data) { + case 0: + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD1\n"); + } + break; + case 1: + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD2\n"); + } + break; + case 2: + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD3\n"); + } + break; + case 3: + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD4\n"); + } + break; + case 4: + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD5\n"); + } + break; + case 5: + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + queue_hotplug = true; + DRM_DEBUG("IH: HPD6\n"); + } + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + break; + case 176: /* RINGID0 CP_INT */ + radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); + break; + case 177: /* RINGID1 CP_INT */ + radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); + break; + case 178: /* RINGID2 CP_INT */ + radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); + break; + case 181: /* CP EOP event */ + DRM_DEBUG("IH: CP EOP\n"); + switch (ring_id) { + case 0: + radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); + break; + case 1: + radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); + break; + case 2: + radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); + break; + } + break; + case 233: /* GUI IDLE */ + DRM_DEBUG("IH: GUI idle\n"); + rdev->pm.gui_idle = true; + wake_up(&rdev->irq.idle_queue); + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); + break; + } + + /* wptr/rptr are in bytes! */ + rptr += 16; + rptr &= rdev->ih.ptr_mask; + } + /* make sure wptr hasn't changed while processing */ + wptr = si_get_ih_wptr(rdev); + if (wptr != rdev->ih.wptr) + goto restart_ih; + if (queue_hotplug) + schedule_work(&rdev->hotplug_work); + rdev->ih.rptr = rptr; + WREG32(IH_RB_RPTR, rdev->ih.rptr); + spin_unlock_irqrestore(&rdev->ih.lock, flags); + return IRQ_HANDLED; +} + diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 78b5649aeceb..53ea2c42dbd6 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -182,8 +182,42 @@ #define HDP_MISC_CNTL 0x2F4C #define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) +#define IH_RB_CNTL 0x3e00 +# define IH_RB_ENABLE (1 << 0) +# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ +# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) +# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) +# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ +# define IH_WPTR_OVERFLOW_ENABLE (1 << 16) +# define IH_WPTR_OVERFLOW_CLEAR (1 << 31) +#define IH_RB_BASE 0x3e04 +#define IH_RB_RPTR 0x3e08 +#define IH_RB_WPTR 0x3e0c +# define RB_OVERFLOW (1 << 0) +# define WPTR_OFFSET_MASK 0x3fffc +#define IH_RB_WPTR_ADDR_HI 0x3e10 +#define IH_RB_WPTR_ADDR_LO 0x3e14 +#define IH_CNTL 0x3e18 +# define ENABLE_INTR (1 << 0) +# define IH_MC_SWAP(x) ((x) << 1) +# define IH_MC_SWAP_NONE 0 +# define IH_MC_SWAP_16BIT 1 +# define IH_MC_SWAP_32BIT 2 +# define IH_MC_SWAP_64BIT 3 +# define RPTR_REARM (1 << 4) +# define MC_WRREQ_CREDIT(x) ((x) << 15) +# define MC_WR_CLEAN_CNT(x) ((x) << 20) +# define MC_VMID(x) ((x) << 25) + #define CONFIG_MEMSIZE 0x5428 +#define INTERRUPT_CNTL 0x5468 +# define IH_DUMMY_RD_OVERRIDE (1 << 0) +# define IH_DUMMY_RD_EN (1 << 1) +# define IH_REQ_NONSNOOP_EN (1 << 3) +# define GEN_IH_INT_EN (1 << 8) +#define INTERRUPT_CNTL2 0x546c + #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 #define BIF_FB_EN 0x5490 @@ -207,6 +241,108 @@ # define LATENCY_LOW_WATERMARK(x) ((x) << 0) # define LATENCY_HIGH_WATERMARK(x) ((x) << 16) +/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */ +#define VLINE_STATUS 0x6bb8 +# define VLINE_OCCURRED (1 << 0) +# define VLINE_ACK (1 << 4) +# define VLINE_STAT (1 << 12) +# define VLINE_INTERRUPT (1 << 16) +# define VLINE_INTERRUPT_TYPE (1 << 17) +/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */ +#define VBLANK_STATUS 0x6bbc +# define VBLANK_OCCURRED (1 << 0) +# define VBLANK_ACK (1 << 4) +# define VBLANK_STAT (1 << 12) +# define VBLANK_INTERRUPT (1 << 16) +# define VBLANK_INTERRUPT_TYPE (1 << 17) + +/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */ +#define INT_MASK 0x6b40 +# define VBLANK_INT_MASK (1 << 0) +# define VLINE_INT_MASK (1 << 4) + +#define DISP_INTERRUPT_STATUS 0x60f4 +# define LB_D1_VLINE_INTERRUPT (1 << 2) +# define LB_D1_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD1_INTERRUPT (1 << 17) +# define DC_HPD1_RX_INTERRUPT (1 << 18) +# define DACA_AUTODETECT_INTERRUPT (1 << 22) +# define DACB_AUTODETECT_INTERRUPT (1 << 23) +# define DC_I2C_SW_DONE_INTERRUPT (1 << 24) +# define DC_I2C_HW_DONE_INTERRUPT (1 << 25) +#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8 +# define LB_D2_VLINE_INTERRUPT (1 << 2) +# define LB_D2_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD2_INTERRUPT (1 << 17) +# define DC_HPD2_RX_INTERRUPT (1 << 18) +# define DISP_TIMER_INTERRUPT (1 << 24) +#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc +# define LB_D3_VLINE_INTERRUPT (1 << 2) +# define LB_D3_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD3_INTERRUPT (1 << 17) +# define DC_HPD3_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100 +# define LB_D4_VLINE_INTERRUPT (1 << 2) +# define LB_D4_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD4_INTERRUPT (1 << 17) +# define DC_HPD4_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c +# define LB_D5_VLINE_INTERRUPT (1 << 2) +# define LB_D5_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD5_INTERRUPT (1 << 17) +# define DC_HPD5_RX_INTERRUPT (1 << 18) +#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150 +# define LB_D6_VLINE_INTERRUPT (1 << 2) +# define LB_D6_VBLANK_INTERRUPT (1 << 3) +# define DC_HPD6_INTERRUPT (1 << 17) +# define DC_HPD6_RX_INTERRUPT (1 << 18) + +/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ +#define GRPH_INT_STATUS 0x6858 +# define GRPH_PFLIP_INT_OCCURRED (1 << 0) +# define GRPH_PFLIP_INT_CLEAR (1 << 8) +/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ +#define GRPH_INT_CONTROL 0x685c +# define GRPH_PFLIP_INT_MASK (1 << 0) +# define GRPH_PFLIP_INT_TYPE (1 << 8) + +#define DACA_AUTODETECT_INT_CONTROL 0x66c8 + +#define DC_HPD1_INT_STATUS 0x601c +#define DC_HPD2_INT_STATUS 0x6028 +#define DC_HPD3_INT_STATUS 0x6034 +#define DC_HPD4_INT_STATUS 0x6040 +#define DC_HPD5_INT_STATUS 0x604c +#define DC_HPD6_INT_STATUS 0x6058 +# define DC_HPDx_INT_STATUS (1 << 0) +# define DC_HPDx_SENSE (1 << 1) +# define DC_HPDx_RX_INT_STATUS (1 << 8) + +#define DC_HPD1_INT_CONTROL 0x6020 +#define DC_HPD2_INT_CONTROL 0x602c +#define DC_HPD3_INT_CONTROL 0x6038 +#define DC_HPD4_INT_CONTROL 0x6044 +#define DC_HPD5_INT_CONTROL 0x6050 +#define DC_HPD6_INT_CONTROL 0x605c +# define DC_HPDx_INT_ACK (1 << 0) +# define DC_HPDx_INT_POLARITY (1 << 8) +# define DC_HPDx_INT_EN (1 << 16) +# define DC_HPDx_RX_INT_ACK (1 << 20) +# define DC_HPDx_RX_INT_EN (1 << 24) + +#define DC_HPD1_CONTROL 0x6024 +#define DC_HPD2_CONTROL 0x6030 +#define DC_HPD3_CONTROL 0x603c +#define DC_HPD4_CONTROL 0x6048 +#define DC_HPD5_CONTROL 0x6054 +#define DC_HPD6_CONTROL 0x6060 +# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) +# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) +# define DC_HPDx_EN (1 << 28) + +/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ +#define CRTC_STATUS_FRAME_COUNT 0x6e98 + #define GRBM_CNTL 0x8000 #define GRBM_READ_TIMEOUT(x) ((x) << 0) @@ -273,6 +409,10 @@ #define GRBM_GFX_INDEX 0x802C +#define GRBM_INT_CNTL 0x8060 +# define RDERR_INT_ENABLE (1 << 0) +# define GUI_IDLE_INT_ENABLE (1 << 19) + #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 @@ -510,6 +650,24 @@ #define CP_RB2_RPTR_ADDR 0xC19C #define CP_RB2_RPTR_ADDR_HI 0xC1A0 #define CP_RB2_WPTR 0xC1A4 +#define CP_INT_CNTL_RING0 0xC1A8 +#define CP_INT_CNTL_RING1 0xC1AC +#define CP_INT_CNTL_RING2 0xC1B0 +# define CNTX_BUSY_INT_ENABLE (1 << 19) +# define CNTX_EMPTY_INT_ENABLE (1 << 20) +# define WAIT_MEM_SEM_INT_ENABLE (1 << 21) +# define TIME_STAMP_INT_ENABLE (1 << 26) +# define CP_RINGID2_INT_ENABLE (1 << 29) +# define CP_RINGID1_INT_ENABLE (1 << 30) +# define CP_RINGID0_INT_ENABLE (1 << 31) +#define CP_INT_STATUS_RING0 0xC1B4 +#define CP_INT_STATUS_RING1 0xC1B8 +#define CP_INT_STATUS_RING2 0xC1BC +# define WAIT_MEM_SEM_INT_STAT (1 << 21) +# define TIME_STAMP_INT_STAT (1 << 26) +# define CP_RINGID2_INT_STAT (1 << 29) +# define CP_RINGID1_INT_STAT (1 << 30) +# define CP_RINGID0_INT_STAT (1 << 31) #define CP_DEBUG 0xC1FC |