diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2006-04-25 10:58:50 -0700 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-04-26 06:19:45 -0400 |
commit | 734cbc363b159caee158d5a83408c72d98bcacf0 (patch) | |
tree | 14d903eaf2b7580f791af9fd0d2800f1eb91723f | |
parent | 3b908870b8332dfd40be0e919e187aa4991536fb (diff) |
[PATCH] sky2: reschedule if irq still pending
This is a workaround for the case edge-triggered irq's. Several users
seem to have broken configurations sharing edge-triggered irq's. To avoid
losing IRQ's, reshedule if more work arrives.
The changes to netdevice.h are to extract the part that puts device
back in list into separate inline.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r-- | drivers/net/sky2.c | 20 | ||||
-rw-r--r-- | include/linux/netdevice.h | 18 |
2 files changed, 26 insertions, 12 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 67b0eab16589..618fde8622ca 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -2093,6 +2093,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) int work_done = 0; u32 status = sky2_read32(hw, B0_Y2_SP_EISR); + restart_poll: if (unlikely(status & ~Y2_IS_STAT_BMU)) { if (status & Y2_IS_HW_ERR) sky2_hw_intr(hw); @@ -2123,7 +2124,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) } if (status & Y2_IS_STAT_BMU) { - work_done = sky2_status_intr(hw, work_limit); + work_done += sky2_status_intr(hw, work_limit - work_done); *budget -= work_done; dev0->quota -= work_done; @@ -2133,9 +2134,22 @@ static int sky2_poll(struct net_device *dev0, int *budget) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); } - netif_rx_complete(dev0); + local_irq_disable(); + __netif_rx_complete(dev0); status = sky2_read32(hw, B0_Y2_SP_LISR); + + if (unlikely(status)) { + /* More work pending, try and keep going */ + if (__netif_rx_schedule_prep(dev0)) { + __netif_rx_reschedule(dev0, work_done); + status = sky2_read32(hw, B0_Y2_SP_EISR); + local_irq_enable(); + goto restart_poll; + } + } + + local_irq_enable(); return 0; } @@ -2153,8 +2167,6 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) prefetch(&hw->st_le[hw->st_idx]); if (likely(__netif_rx_schedule_prep(dev0))) __netif_rx_schedule(dev0); - else - printk(KERN_DEBUG PFX "irq race detected\n"); return IRQ_HANDLED; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 40ccf8cc4239..01db7b88a2b1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -829,19 +829,21 @@ static inline void netif_rx_schedule(struct net_device *dev) __netif_rx_schedule(dev); } -/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). - * Do not inline this? - */ + +static inline void __netif_rx_reschedule(struct net_device *dev, int undo) +{ + dev->quota += undo; + list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); + __raise_softirq_irqoff(NET_RX_SOFTIRQ); +} + +/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ static inline int netif_rx_reschedule(struct net_device *dev, int undo) { if (netif_rx_schedule_prep(dev)) { unsigned long flags; - - dev->quota += undo; - local_irq_save(flags); - list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + __netif_rx_reschedule(dev, undo); local_irq_restore(flags); return 1; } |