diff options
Diffstat (limited to 'arch/sh/drivers/dma/dma-sh.c')
-rw-r--r-- | arch/sh/drivers/dma/dma-sh.c | 290 |
1 files changed, 177 insertions, 113 deletions
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index a60da6dd4d17..4c171f13b0e8 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -14,35 +14,72 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/io.h> #include <mach-dreamcast/mach/dma.h> #include <asm/dma.h> -#include <asm/io.h> -#include <asm/dma-sh.h> +#include <asm/dma-register.h> +#include <cpu/dma-register.h> +#include <cpu/dma.h> -#if defined(DMAE1_IRQ) -#define NR_DMAE 2 -#else -#define NR_DMAE 1 +/* + * Define the default configuration for dual address memory-memory transfer. + * The 0x400 value represents auto-request, external->external. + */ +#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT)) + +static unsigned long dma_find_base(unsigned int chan) +{ + unsigned long base = SH_DMAC_BASE0; + +#ifdef SH_DMAC_BASE1 + if (chan >= 6) + base = SH_DMAC_BASE1; #endif -static const char *dmae_name[] = { - "DMAC Address Error0", "DMAC Address Error1" -}; + return base; +} + +static unsigned long dma_base_addr(unsigned int chan) +{ + unsigned long base = dma_find_base(chan); + + /* Normalize offset calculation */ + if (chan >= 9) + chan -= 6; + if (chan >= 4) + base += 0x10; + + return base + (chan * 0x10); +} +#ifdef CONFIG_SH_DMA_IRQ_MULTI static inline unsigned int get_dmte_irq(unsigned int chan) { - unsigned int irq = 0; - if (chan < ARRAY_SIZE(dmte_irq_map)) - irq = dmte_irq_map[chan]; - -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - if (irq > DMTE6_IRQ) - return DMTE6_IRQ; - return DMTE0_IRQ; + return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ; +} #else - return irq; + +static unsigned int dmte_irq_map[] = { + DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3, + +#ifdef DMTE4_IRQ + DMTE4_IRQ, DMTE4_IRQ + 1, +#endif + +#ifdef DMTE6_IRQ + DMTE6_IRQ, DMTE6_IRQ + 1, +#endif + +#ifdef DMTE8_IRQ + DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ, #endif +}; + +static inline unsigned int get_dmte_irq(unsigned int chan) +{ + return dmte_irq_map[chan]; } +#endif /* * We determine the correct shift size based off of the CHCR transmit size @@ -53,9 +90,10 @@ static inline unsigned int get_dmte_irq(unsigned int chan) * iterations to complete the transfer. */ static unsigned int ts_shift[] = TS_SHIFT; + static inline unsigned int calc_xmit_shift(struct dma_channel *chan) { - u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); + u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); @@ -73,13 +111,13 @@ static irqreturn_t dma_tei(int irq, void *dev_id) struct dma_channel *chan = dev_id; u32 chcr; - chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); if (!(chcr & CHCR_TE)) return IRQ_NONE; chcr &= ~(CHCR_IE | CHCR_DE); - __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); wake_up(&chan->wait_queue); @@ -91,13 +129,8 @@ static int sh_dmac_request_dma(struct dma_channel *chan) if (unlikely(!(chan->flags & DMA_TEI_CAPABLE))) return 0; - return request_irq(get_dmte_irq(chan->chan), dma_tei, -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - IRQF_SHARED, -#else - 0, -#endif - chan->dev_id, chan); + return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED, + chan->dev_id, chan); } static void sh_dmac_free_dma(struct dma_channel *chan) @@ -118,7 +151,7 @@ sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) chan->flags &= ~DMA_TEI_CAPABLE; } - __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); chan->flags |= DMA_CONFIGURED; return 0; @@ -129,13 +162,13 @@ static void sh_dmac_enable_dma(struct dma_channel *chan) int irq; u32 chcr; - chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); chcr |= CHCR_DE; if (chan->flags & DMA_TEI_CAPABLE) chcr |= CHCR_IE; - __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); if (chan->flags & DMA_TEI_CAPABLE) { irq = get_dmte_irq(chan->chan); @@ -153,9 +186,9 @@ static void sh_dmac_disable_dma(struct dma_channel *chan) disable_irq(irq); } - chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); + chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR); chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); - __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); + __raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR)); } static int sh_dmac_xfer_dma(struct dma_channel *chan) @@ -186,13 +219,13 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan) */ if (chan->sar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) - __raw_writel(chan->sar, (dma_base_addr[chan->chan]+SAR)); + __raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR)); if (chan->dar || (mach_is_dreamcast() && chan->chan == PVR2_CASCADE_CHAN)) - __raw_writel(chan->dar, (dma_base_addr[chan->chan] + DAR)); + __raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR)); __raw_writel(chan->count >> calc_xmit_shift(chan), - (dma_base_addr[chan->chan] + TCR)); + (dma_base_addr(chan->chan) + TCR)); sh_dmac_enable_dma(chan); @@ -201,13 +234,32 @@ static int sh_dmac_xfer_dma(struct dma_channel *chan) static int sh_dmac_get_dma_residue(struct dma_channel *chan) { - if (!(__raw_readl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE)) + if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE)) return 0; - return __raw_readl(dma_base_addr[chan->chan] + TCR) + return __raw_readl(dma_base_addr(chan->chan) + TCR) << calc_xmit_shift(chan); } +/* + * DMAOR handling + */ +#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7724) || \ + defined(CONFIG_CPU_SUBTYPE_SH7780) || \ + defined(CONFIG_CPU_SUBTYPE_SH7785) +#define NR_DMAOR 2 +#else +#define NR_DMAOR 1 +#endif + +/* + * DMAOR bases are broken out amongst channel groups. DMAOR0 manages + * channels 0 - 5, DMAOR1 6 - 11 (optional). + */ +#define dmaor_read_reg(n) __raw_readw(dma_find_base((n)*6)) +#define dmaor_write_reg(n, data) __raw_writew(data, dma_find_base(n)*6) + static inline int dmaor_reset(int no) { unsigned long dmaor = dmaor_read_reg(no); @@ -228,36 +280,86 @@ static inline int dmaor_reset(int no) return 0; } -#if defined(CONFIG_CPU_SH4) -static irqreturn_t dma_err(int irq, void *dummy) -{ -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - int cnt = 0; - switch (irq) { -#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) - case DMTE6_IRQ: - cnt++; +/* + * DMAE handling + */ +#ifdef CONFIG_CPU_SH4 + +#if defined(DMAE1_IRQ) +#define NR_DMAE 2 +#else +#define NR_DMAE 1 #endif - case DMTE0_IRQ: - if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { - disable_irq(irq); - /* DMA multi and error IRQ */ - return IRQ_HANDLED; - } - default: - return IRQ_NONE; - } + +static const char *dmae_name[] = { + "DMAC Address Error0", + "DMAC Address Error1" +}; + +#ifdef CONFIG_SH_DMA_IRQ_MULTI +static inline unsigned int get_dma_error_irq(int n) +{ + return get_dmte_irq(n * 6); +} #else - dmaor_reset(0); -#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) - dmaor_reset(1); + +static unsigned int dmae_irq_map[] = { + DMAE0_IRQ, + +#ifdef DMAE1_IRQ + DMAE1_IRQ, +#endif +}; + +static inline unsigned int get_dma_error_irq(int n) +{ + return dmae_irq_map[n]; +} #endif + +static irqreturn_t dma_err(int irq, void *dummy) +{ + int i; + + for (i = 0; i < NR_DMAOR; i++) + dmaor_reset(i); + disable_irq(irq); return IRQ_HANDLED; -#endif +} + +static int dmae_irq_init(void) +{ + int n; + + for (n = 0; n < NR_DMAE; n++) { + int i = request_irq(get_dma_error_irq(n), dma_err, + IRQF_SHARED, dmae_name[n], NULL); + if (unlikely(i < 0)) { + printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); + return i; + } + } + + return 0; +} + +static void dmae_irq_free(void) +{ + int n; + + for (n = 0; n < NR_DMAE; n++) + free_irq(get_dma_error_irq(n), NULL); +} +#else +static inline int dmae_irq_init(void) +{ + return 0; +} + +static void dmae_irq_free(void) +{ } #endif @@ -276,72 +378,34 @@ static struct dma_info sh_dmac_info = { .flags = DMAC_CHANNELS_TEI_CAPABLE, }; -#ifdef CONFIG_CPU_SH4 -static unsigned int get_dma_error_irq(int n) -{ -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6); -#else - return (n == 0) ? DMAE0_IRQ : -#if defined(DMAE1_IRQ) - DMAE1_IRQ; -#else - -1; -#endif -#endif -} -#endif - static int __init sh_dmac_init(void) { struct dma_info *info = &sh_dmac_info; - int i; - -#ifdef CONFIG_CPU_SH4 - int n; + int i, rc; - for (n = 0; n < NR_DMAE; n++) { - i = request_irq(get_dma_error_irq(n), dma_err, -#if defined(CONFIG_SH_DMA_IRQ_MULTI) - IRQF_SHARED, -#else - 0, -#endif - dmae_name[n], (void *)dmae_name[n]); - if (unlikely(i < 0)) { - printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); - return i; - } - } -#endif /* CONFIG_CPU_SH4 */ + /* + * Initialize DMAE, for parts that support it. + */ + rc = dmae_irq_init(); + if (unlikely(rc != 0)) + return rc; /* * Initialize DMAOR, and clean up any error flags that may have * been set. */ - i = dmaor_reset(0); - if (unlikely(i != 0)) - return i; -#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) - i = dmaor_reset(1); - if (unlikely(i != 0)) - return i; -#endif + for (i = 0; i < NR_DMAOR; i++) { + rc = dmaor_reset(i); + if (unlikely(rc != 0)) + return rc; + } return register_dmac(info); } static void __exit sh_dmac_exit(void) { -#ifdef CONFIG_CPU_SH4 - int n; - - for (n = 0; n < NR_DMAE; n++) { - free_irq(get_dma_error_irq(n), (void *)dmae_name[n]); - } -#endif /* CONFIG_CPU_SH4 */ + dmae_irq_free(); unregister_dmac(&sh_dmac_info); } |