diff options
author | Frank Li <Frank.Li@freescale.com> | 2013-01-03 16:04:23 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-01-04 15:15:39 -0800 |
commit | ff43da86c69d76a726ffe7d1666148960dc1d108 (patch) | |
tree | cb043ff99ad1a7df3b51e79907734173ea93d817 /drivers/net/ethernet/freescale/fec.c | |
parent | 579e1d816260d9f66cb63e4c2911794c9970c293 (diff) |
NET: FEC: dynamtic check DMA desc buff type
MX6 and mx28 support enhanced DMA descriptor buff to support 1588
ptp. But MX25, MX3x, MX5x can't support enhanced DMA descriptor buff.
Check fec type and choose correct DMA descriptor buff type.
Remove static config CONFIG_FEC_PTP.
ptp function will be auto detected.
Signed-off-by: Frank Li <Frank.Li@freescale.com>
Acked-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale/fec.c')
-rw-r--r-- | drivers/net/ethernet/freescale/fec.c | 175 |
1 files changed, 108 insertions, 67 deletions
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 0704bcab178a..a3793190437f 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -76,6 +76,8 @@ #define FEC_QUIRK_USE_GASKET (1 << 2) /* Controller has GBIT support */ #define FEC_QUIRK_HAS_GBIT (1 << 3) +/* Controller has extend desc buffer */ +#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) static struct platform_device_id fec_devtype[] = { { @@ -93,7 +95,8 @@ static struct platform_device_id fec_devtype[] = { .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, }, { .name = "imx6q-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT, + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX, }, { /* sentinel */ } @@ -140,7 +143,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif #endif /* CONFIG_M5272 */ -#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) +#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE) #error "FEC: descriptor ring size constants too large" #endif @@ -192,6 +195,24 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; +static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) +{ + struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; + if (is_ex) + return (struct bufdesc *)(ex + 1); + else + return bdp + 1; +} + +static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) +{ + struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; + if (is_ex) + return (struct bufdesc *)(ex - 1); + else + return bdp - 1; +} + static void *swap_buffer(void *bufaddr, int len) { int i; @@ -248,7 +269,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) */ if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { unsigned int index; - index = bdp - fep->tx_bd_base; + if (fep->bufdesc_ex) + index = (struct bufdesc_ex *)bdp - + (struct bufdesc_ex *)fep->tx_bd_base; + else + index = bdp - fep->tx_bd_base; memcpy(fep->tx_bounce[index], skb->data, skb->len); bufaddr = fep->tx_bounce[index]; } @@ -280,17 +305,19 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | BD_ENET_TX_LAST | BD_ENET_TX_TC); bdp->cbd_sc = status; -#ifdef CONFIG_FEC_PTP - bdp->cbd_bdu = 0; - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + if (fep->bufdesc_ex) { + + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_bdu = 0; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && fep->hwts_tx_en)) { - bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); + ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - } else { + } else { - bdp->cbd_esc = BD_ENET_TX_INT; + ebdp->cbd_esc = BD_ENET_TX_INT; + } } -#endif /* Trigger transmission start */ writel(0, fep->hwp + FEC_X_DES_ACTIVE); @@ -298,7 +325,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); if (bdp == fep->dirty_tx) { fep->tx_full = 1; @@ -359,8 +386,12 @@ fec_restart(struct net_device *ndev, int duplex) /* Set receive and transmit descriptor base. */ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, - fep->hwp + FEC_X_DES_START); + if (fep->bufdesc_ex) + writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) + * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); + else + writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) + * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; fep->cur_rx = fep->rx_bd_base; @@ -448,17 +479,16 @@ fec_restart(struct net_device *ndev, int duplex) writel(1 << 8, fep->hwp + FEC_X_WMRK); } -#ifdef CONFIG_FEC_PTP - ecntl |= (1 << 4); -#endif + if (fep->bufdesc_ex) + ecntl |= (1 << 4); /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_R_DES_ACTIVE); -#ifdef CONFIG_FEC_PTP - fec_ptp_start_cyclecounter(ndev); -#endif + if (fep->bufdesc_ex) + fec_ptp_start_cyclecounter(ndev); + /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } @@ -544,19 +574,20 @@ fec_enet_tx(struct net_device *ndev) ndev->stats.tx_packets++; } -#ifdef CONFIG_FEC_PTP - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && + fep->bufdesc_ex) { struct skb_shared_hwtstamps shhwtstamps; unsigned long flags; + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; memset(&shhwtstamps, 0, sizeof(shhwtstamps)); spin_lock_irqsave(&fep->tmreg_lock, flags); shhwtstamps.hwtstamp = ns_to_ktime( - timecounter_cyc2time(&fep->tc, bdp->ts)); + timecounter_cyc2time(&fep->tc, ebdp->ts)); spin_unlock_irqrestore(&fep->tmreg_lock, flags); skb_tstamp_tx(skb, &shhwtstamps); } -#endif + if (status & BD_ENET_TX_READY) printk("HEY! Enet xmit interrupt and TX_READY.\n"); @@ -575,7 +606,7 @@ fec_enet_tx(struct net_device *ndev) if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); /* Since we have freed up a buffer, the ring is no longer full */ @@ -683,21 +714,23 @@ fec_enet_rx(struct net_device *ndev) skb_put(skb, pkt_len - 4); /* Make room */ skb_copy_to_linear_data(skb, data, pkt_len - 4); skb->protocol = eth_type_trans(skb, ndev); -#ifdef CONFIG_FEC_PTP + /* Get receive timestamp from the skb */ - if (fep->hwts_rx_en) { + if (fep->hwts_rx_en && fep->bufdesc_ex) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); unsigned long flags; + struct bufdesc_ex *ebdp = + (struct bufdesc_ex *)bdp; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); spin_lock_irqsave(&fep->tmreg_lock, flags); shhwtstamps->hwtstamp = ns_to_ktime( - timecounter_cyc2time(&fep->tc, bdp->ts)); + timecounter_cyc2time(&fep->tc, ebdp->ts)); spin_unlock_irqrestore(&fep->tmreg_lock, flags); } -#endif + if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); } @@ -712,17 +745,19 @@ rx_processing_done: status |= BD_ENET_RX_EMPTY; bdp->cbd_sc = status; -#ifdef CONFIG_FEC_PTP - bdp->cbd_esc = BD_ENET_RX_INT; - bdp->cbd_prot = 0; - bdp->cbd_bdu = 0; -#endif + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_prot = 0; + ebdp->cbd_bdu = 0; + } /* Update BD pointer to next entry */ if (status & BD_ENET_RX_WRAP) bdp = fep->rx_bd_base; else - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. @@ -1157,10 +1192,9 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) if (!phydev) return -ENODEV; -#ifdef CONFIG_FEC_PTP - if (cmd == SIOCSHWTSTAMP) + if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex) return fec_ptp_ioctl(ndev, rq, cmd); -#endif + return phy_mii_ioctl(phydev, rq, cmd); } @@ -1180,7 +1214,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (skb) dev_kfree_skb(skb); - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } bdp = fep->tx_bd_base; @@ -1207,14 +1241,17 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); bdp->cbd_sc = BD_ENET_RX_EMPTY; -#ifdef CONFIG_FEC_PTP - bdp->cbd_esc = BD_ENET_RX_INT; -#endif - bdp++; + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = BD_ENET_RX_INT; + } + + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } /* Set the last buffer to wrap. */ - bdp--; + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp->cbd_sc |= BD_SC_WRAP; bdp = fep->tx_bd_base; @@ -1224,14 +1261,16 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) bdp->cbd_sc = 0; bdp->cbd_bufaddr = 0; -#ifdef CONFIG_FEC_PTP - bdp->cbd_esc = BD_ENET_RX_INT; -#endif - bdp++; + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = BD_ENET_RX_INT; + } + + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } /* Set the last buffer to wrap. */ - bdp--; + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -1444,7 +1483,11 @@ static int fec_enet_init(struct net_device *ndev) /* Set receive and transmit descriptor base. */ fep->rx_bd_base = cbd_base; - fep->tx_bd_base = cbd_base + RX_RING_SIZE; + if (fep->bufdesc_ex) + fep->tx_bd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); + else + fep->tx_bd_base = cbd_base + RX_RING_SIZE; /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; @@ -1457,11 +1500,11 @@ static int fec_enet_init(struct net_device *ndev) /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } /* Set the last buffer to wrap */ - bdp--; + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp->cbd_sc |= BD_SC_WRAP; /* ...and the same for transmit */ @@ -1471,11 +1514,11 @@ static int fec_enet_init(struct net_device *ndev) /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; bdp->cbd_bufaddr = 0; - bdp++; + bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); } /* Set the last buffer to wrap */ - bdp--; + bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp->cbd_sc |= BD_SC_WRAP; fec_restart(ndev, 0); @@ -1574,6 +1617,8 @@ fec_probe(struct platform_device *pdev) fep->pdev = pdev; fep->dev_id = dev_id++; + fep->bufdesc_ex = 0; + if (!fep->hwp) { ret = -ENOMEM; goto failed_ioremap; @@ -1628,19 +1673,19 @@ fec_probe(struct platform_device *pdev) goto failed_clk; } -#ifdef CONFIG_FEC_PTP fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); + fep->bufdesc_ex = + pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; if (IS_ERR(fep->clk_ptp)) { ret = PTR_ERR(fep->clk_ptp); - goto failed_clk; + fep->bufdesc_ex = 0; } -#endif clk_prepare_enable(fep->clk_ahb); clk_prepare_enable(fep->clk_ipg); -#ifdef CONFIG_FEC_PTP - clk_prepare_enable(fep->clk_ptp); -#endif + if (!IS_ERR(fep->clk_ptp)) + clk_prepare_enable(fep->clk_ptp); + reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(reg_phy)) { ret = regulator_enable(reg_phy); @@ -1668,9 +1713,8 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_register; -#ifdef CONFIG_FEC_PTP - fec_ptp_init(ndev, pdev); -#endif + if (fep->bufdesc_ex) + fec_ptp_init(ndev, pdev); return 0; @@ -1681,9 +1725,8 @@ failed_init: failed_regulator: clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); -#ifdef CONFIG_FEC_PTP - clk_disable_unprepare(fep->clk_ptp); -#endif + if (!IS_ERR(fep->clk_ptp)) + clk_disable_unprepare(fep->clk_ptp); failed_pin: failed_clk: for (i = 0; i < FEC_IRQ_NUM; i++) { @@ -1716,12 +1759,10 @@ fec_drv_remove(struct platform_device *pdev) if (irq > 0) free_irq(irq, ndev); } -#ifdef CONFIG_FEC_PTP del_timer_sync(&fep->time_keep); clk_disable_unprepare(fep->clk_ptp); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); -#endif clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); iounmap(fep->hwp); |