Merge branch 'master' of code.vuplus.com:/opt/repository/openvuplus_3.0
[vuplus_openvuplus_3.0] / meta-bsp / recipes-kernel / linux / linux-vuplus-4.1.20 / bcmgenet-recovery-fix.patch
1 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
2 index 3d5c251..152774f 100644
3 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
4 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
5 @@ -129,7 +129,10 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
6  #define GENET_VER_FMT  "%1d.%1d EPHY: 0x%04x"
7  
8  #define GENET_MSG_DEFAULT      (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
9 -                               NETIF_MSG_LINK)
10 +                               NETIF_MSG_LINK | NETIF_MSG_INTR | \
11 +                NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS | \
12 +                NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
13 +                NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
14  
15  static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
16  {
17 @@ -3255,24 +3258,58 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
18         return IRQ_HANDLED;
19  }
20  
21 +struct rbuf_ovfl_work_struct {
22 +    struct delayed_work queue;
23 +    struct bcmgenet_priv *priv;
24 +} rbuf_ovfl_work;
25 +
26 +static void rbuf_ovfl_wq_func(struct work_struct *work) {
27 +    struct delayed_work *dwork = to_delayed_work(work);
28 +    struct rbuf_ovfl_work_struct *rbo_work = container_of(dwork, struct rbuf_ovfl_work_struct, queue);
29 +    struct bcmgenet_priv *priv = rbo_work->priv;
30 +
31 +    unsigned int flags = dev_get_flags(priv->dev);
32 +
33 +    rtnl_lock();
34 +    netif_carrier_off(priv->dev);
35 +    dev_change_flags(priv->dev, flags & ~IFF_UP);
36 +    rtnl_unlock();
37 +
38 +    msleep(1000);
39 +
40 +    rtnl_lock();
41 +    dev_change_flags(priv->dev, flags | IFF_UP);
42 +    rtnl_unlock();
43 +
44 +    return;
45 +}
46 +
47  /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
48  static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
49  {
50         struct bcmgenet_priv *priv = dev_id;
51         struct bcmgenet_rx_ring *rx_ring;
52         struct bcmgenet_tx_ring *tx_ring;
53 -       unsigned int status;
54 +       unsigned int status, status_unmask;
55         unsigned long flags;
56  
57         /* Read irq status */
58 -       status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
59 -               ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
60 +       status_unmask = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
61 +    status = status_unmask & ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
62 +
63 +    if (status_unmask & (UMAC_IRQ_RBUF_OVERFLOW)) {
64 +        netdev_info(priv->dev,
65 +            "%s: %d pkts\n", __func__, bcmgenet_rbuf_readl(priv, RBUF_OVFL_CNT_V3PLUS));
66 +        status |= UMAC_IRQ_RBUF_OVERFLOW;
67 +        schedule_delayed_work(&(rbuf_ovfl_work.queue), HZ);
68 +    }
69  
70         /* clear interrupts */
71         bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
72  
73 -       netif_dbg(priv, intr, priv->dev,
74 -                 "IRQ=0x%x\n", status);
75 +    if (status & ~(UMAC_IRQ_MDIO_DONE)) /* except for mdio event */
76 +       netif_dbg(priv, intr, priv->dev,
77 +               "%s: IRQ=0x%x\n", __func__, status);
78  
79         if (status & UMAC_IRQ_RXDMA_DONE) {
80                 rx_ring = &priv->rx_rings[DESC_INDEX];
81 @@ -3281,6 +3318,10 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
82                         rx_ring->int_disable(rx_ring);
83                         __napi_schedule(&rx_ring->napi);
84                 }
85 +        else {
86 +            netif_dbg(priv, intr, priv->dev, 
87 +                "%s: rx not sched. state=0x%08lx\n", __func__, rx_ring->napi.state);
88 +        }
89         }
90  
91         if (status & UMAC_IRQ_TXDMA_DONE) {
92 @@ -3306,6 +3347,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
93                 spin_unlock_irqrestore(&priv->lock, flags);
94  
95                 schedule_work(&priv->bcmgenet_irq_work);
96 +       netif_dbg(priv, intr, priv->dev,
97 +               "%s: link event. status=0x%08x\n", __func__, status);
98         }
99  
100         return IRQ_HANDLED;
101 @@ -3351,6 +3394,20 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
102         udelay(10);
103  }
104  
105 +static void bcmgenet_rbuf_reset(struct bcmgenet_priv *priv)
106 +{
107 +       u32 reg;
108 +
109 +       reg = bcmgenet_rbuf_ctrl_get(priv);
110 +       reg |= BIT(0);
111 +       bcmgenet_rbuf_ctrl_set(priv, reg);
112 +       udelay(100);
113 +
114 +       reg &= ~BIT(0);
115 +       bcmgenet_rbuf_ctrl_set(priv, reg);
116 +       udelay(100);
117 +}
118 +
119  static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
120                                  unsigned char *addr)
121  {
122 @@ -3434,6 +3491,7 @@ static int bcmgenet_open(struct net_device *dev)
123  
124         /* take MAC out of reset */
125         bcmgenet_umac_reset(priv);
126 +    bcmgenet_rbuf_reset(priv);
127  
128         ret = init_umac(priv);
129         if (ret)
130 @@ -4070,6 +4128,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
131         /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
132         priv->rx_buf_len = RX_BUF_LENGTH;
133         INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
134 +    rbuf_ovfl_work.priv = priv;
135 +    INIT_DELAYED_WORK(&(rbuf_ovfl_work.queue), rbuf_ovfl_wq_func);
136  
137         priv->clk_wol = devm_clk_get(&priv->pdev->dev, "sw_genetwol");
138         if (IS_ERR(priv->clk_wol)) {