BNX2X: spatch netif_addr_lock
authorBarret Rhoden <brho@cs.berkeley.edu>
Mon, 9 Feb 2015 20:22:12 +0000 (15:22 -0500)
committerBarret Rhoden <brho@cs.berkeley.edu>
Mon, 2 Mar 2015 16:59:09 +0000 (11:59 -0500)
It's not clear that the plan 9 qlock is the right thing to grab here.
It is the equivalent pattern, I think, but it's a qlock instead of a
spinlock.  There might be problems with grabbing a qlock in the wrong
context (IRQ, etc).

An alternative is to put a spinlock in plan 9 and use that.  Maybe they
don't need a qlock.

kern/drivers/net/bnx2x/bnx2x_cmn.c
kern/drivers/net/bnx2x/bnx2x_cmn.h
kern/drivers/net/bnx2x/bnx2x_main.c
scripts/spatch/linux/sync.cocci

index 0408058..e655a54 100644 (file)
@@ -2159,7 +2159,7 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
         * we take a lock surrounding both the initial send and the CONTs,
         * as we don't want a true completion to disrupt us in the middle.
         */
-       netif_addr_lock_bh(bp->dev);
+       qlock(&bp->dev->qlock);
        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
        if (rc < 0)
                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2171,13 +2171,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
                if (rc < 0) {
                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
                                  rc);
-                       netif_addr_unlock_bh(bp->dev);
+                       qunlock(&bp->dev->qlock);
                        return;
                }
 
                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
        }
-       netif_addr_unlock_bh(bp->dev);
+       qunlock(&bp->dev->qlock);
 }
 
 #ifndef BNX2X_STOP_ON_ERROR
index a9d2dcd..4fd359c 100644 (file)
@@ -1181,26 +1181,26 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
 
        while (tout--) {
                mb();
-               netif_addr_lock_bh(bp->dev);
+               qlock(&bp->dev->qlock);
                if (!(bp->sp_state & mask)) {
-                       netif_addr_unlock_bh(bp->dev);
+                       qunlock(&bp->dev->qlock);
                        return true;
                }
-               netif_addr_unlock_bh(bp->dev);
+               qunlock(&bp->dev->qlock);
 
                kthread_usleep(1000);
        }
 
        mb();
 
-       netif_addr_lock_bh(bp->dev);
+       qlock(&bp->dev->qlock);
        if (bp->sp_state & mask) {
                BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
                          bp->sp_state, mask);
-               netif_addr_unlock_bh(bp->dev);
+               qunlock(&bp->dev->qlock);
                return false;
        }
-       netif_addr_unlock_bh(bp->dev);
+       qunlock(&bp->dev->qlock);
 
        return true;
 }
index 9c45106..3227fbe 100644 (file)
@@ -5197,7 +5197,7 @@ static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
 
        rparam.mcast_obj = &bp->mcast_obj;
 
-       netif_addr_lock_bh(bp->dev);
+       qlock(&bp->dev->qlock);
 
        /* Clear pending state for the last command */
        bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
@@ -5210,7 +5210,7 @@ static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
                                  rc);
        }
 
-       netif_addr_unlock_bh(bp->dev);
+       qunlock(&bp->dev->qlock);
 }
 
 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
@@ -5259,7 +5259,7 @@ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
 
 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
 {
-       netif_addr_lock_bh(bp->dev);
+       qlock(&bp->dev->qlock);
 
        clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
 
@@ -5273,7 +5273,7 @@ static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
                                    &bp->sp_state))
                bnx2x_set_iscsi_eth_rx_mode(bp, false);
 
-       netif_addr_unlock_bh(bp->dev);
+       qunlock(&bp->dev->qlock);
 }
 
 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
@@ -9201,7 +9201,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
         * We need to take a netif_addr_lock() here in order to prevent
         * a race between the completion code and this code.
         */
-       netif_addr_lock_bh(bp->dev);
+       qlock(&bp->dev->qlock);
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
                set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
@@ -9214,7 +9214,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
        if (rc < 0)
                BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
 
-       netif_addr_unlock_bh(bp->dev);
+       qunlock(&bp->dev->qlock);
 
        bnx2x_iov_chip_cleanup(bp);
 
@@ -12368,7 +12368,7 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
 
        DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
 
-       netif_addr_lock_bh(bp->dev);
+       qlock(&bp->dev->qlock);
 
        if (bp->dev->flags & IFF_PROMISC) {
                rx_mode = BNX2X_RX_MODE_PROMISC;
@@ -12383,10 +12383,10 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
                                rx_mode = BNX2X_RX_MODE_ALLMULTI;
 
                        /* release bh lock, as bnx2x_set_uc_list might sleep */
-                       netif_addr_unlock_bh(bp->dev);
+                       qunlock(&bp->dev->qlock);
                        if (bnx2x_set_uc_list(bp) < 0)
                                rx_mode = BNX2X_RX_MODE_PROMISC;
-                       netif_addr_lock_bh(bp->dev);
+                       qlock(&bp->dev->qlock);
                } else {
                        /* configuring mcast to a vf involves sleeping (when we
                         * wait for the pf's response).
@@ -12404,19 +12404,19 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
                set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
-               netif_addr_unlock_bh(bp->dev);
+               qunlock(&bp->dev->qlock);
                return;
        }
 
        if (IS_PF(bp)) {
                bnx2x_set_storm_rx_mode(bp);
-               netif_addr_unlock_bh(bp->dev);
+               qunlock(&bp->dev->qlock);
        } else {
                /* VF will need to request the PF to make this change, and so
                 * the VF needs to release the bottom-half lock prior to the
                 * request (as it will likely require sleep on the VF side)
                 */
-               netif_addr_unlock_bh(bp->dev);
+               qunlock(&bp->dev->qlock);
                bnx2x_vfpf_storm_rx_mode(bp);
        }
 }
@@ -14196,9 +14196,9 @@ static int bnx2x_drv_ctl(struct ether *dev, struct drv_ctl_info *ctl)
 
                /* Start accepting on iSCSI L2 ring */
 
-               netif_addr_lock_bh(dev);
+               qlock(&dev->qlock);
                bnx2x_set_iscsi_eth_rx_mode(bp, true);
-               netif_addr_unlock_bh(dev);
+               qunlock(&dev->qlock);
 
                /* bits to wait on */
                __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
@@ -14215,9 +14215,9 @@ static int bnx2x_drv_ctl(struct ether *dev, struct drv_ctl_info *ctl)
                unsigned long sp_bits = 0;
 
                /* Stop accepting on iSCSI L2 ring */
-               netif_addr_lock_bh(dev);
+               qlock(&dev->qlock);
                bnx2x_set_iscsi_eth_rx_mode(bp, false);
-               netif_addr_unlock_bh(dev);
+               qunlock(&dev->qlock);
 
                /* bits to wait on */
                __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
index eb77c77..595f68b 100644 (file)
@@ -17,3 +17,29 @@ expression E;
 -mutex_unlock(
 +qunlock(
  E)
+
+// the netif_addr_lock is a spinlock in linux, but it seems to protect the list
+// of addresses.  That's the 'qlock' (great name) in plan 9
+@@
+expression DEV;
+@@
+-netif_addr_lock(DEV)
++qlock(&DEV->qlock)
+
+@@
+expression DEV;
+@@
+-netif_addr_unlock(DEV)
++qunlock(&DEV->qlock)
+
+@@
+expression DEV;
+@@
+-netif_addr_lock_bh(DEV)
++qlock(&DEV->qlock)
+
+@@
+expression DEV;
+@@
+-netif_addr_unlock_bh(DEV)
++qunlock(&DEV->qlock)