BNX2X: spatch spinlock conversions
authorBarret Rhoden <brho@cs.berkeley.edu>
Fri, 6 Feb 2015 16:57:17 +0000 (11:57 -0500)
committerBarret Rhoden <brho@cs.berkeley.edu>
Mon, 2 Mar 2015 16:59:08 +0000 (11:59 -0500)
BH's disable softirqs.  RKMs can't interfere with kthreads like a softirq
could, so we don't need to deal with it.

kern/drivers/net/bnx2x/bnx2x.h
kern/drivers/net/bnx2x/bnx2x_main.c
kern/drivers/net/bnx2x/bnx2x_sp.c
kern/drivers/net/bnx2x/bnx2x_stats.c
scripts/spatch/linux/funcs.cocci

index 8e18b27..7c1b749 100644 (file)
@@ -609,7 +609,7 @@ struct bnx2x_fastpath {
 #ifdef CONFIG_NET_RX_BUSY_POLL
 static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
 {
-       spin_lock_init(&fp->lock);
+       spinlock_init_irqsave(&fp->lock);
        fp->state = BNX2X_FP_STATE_IDLE;
 }
 
@@ -618,7 +618,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
 {
        bool rc = true;
 
-       spin_lock_bh(&fp->lock);
+       spin_lock(&fp->lock);
        if (fp->state & BNX2X_FP_LOCKED) {
                WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
                fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -627,7 +627,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
                /* we don't care if someone yielded */
                fp->state = BNX2X_FP_STATE_NAPI;
        }
-       spin_unlock_bh(&fp->lock);
+       spin_unlock(&fp->lock);
        return rc;
 }
 
@@ -636,7 +636,7 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
 {
        bool rc = false;
 
-       spin_lock_bh(&fp->lock);
+       spin_lock(&fp->lock);
        WARN_ON(fp->state &
                (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
 
@@ -645,7 +645,7 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
 
        /* state ==> idle, unless currently disabled */
        fp->state &= BNX2X_FP_STATE_DISABLED;
-       spin_unlock_bh(&fp->lock);
+       spin_unlock(&fp->lock);
        return rc;
 }
 
@@ -654,7 +654,7 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
 {
        bool rc = true;
 
-       spin_lock_bh(&fp->lock);
+       spin_lock(&fp->lock);
        if ((fp->state & BNX2X_FP_LOCKED)) {
                fp->state |= BNX2X_FP_STATE_POLL_YIELD;
                rc = false;
@@ -662,7 +662,7 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
                /* preserve yield marks */
                fp->state |= BNX2X_FP_STATE_POLL;
        }
-       spin_unlock_bh(&fp->lock);
+       spin_unlock(&fp->lock);
        return rc;
 }
 
@@ -671,7 +671,7 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
 {
        bool rc = false;
 
-       spin_lock_bh(&fp->lock);
+       spin_lock(&fp->lock);
        WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
 
        if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
@@ -679,7 +679,7 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
 
        /* state ==> idle, unless currently disabled */
        fp->state &= BNX2X_FP_STATE_DISABLED;
-       spin_unlock_bh(&fp->lock);
+       spin_unlock(&fp->lock);
        return rc;
 }
 
@@ -695,11 +695,11 @@ static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
 {
        int rc = true;
 
-       spin_lock_bh(&fp->lock);
+       spin_lock(&fp->lock);
        if (fp->state & BNX2X_FP_OWNED)
                rc = false;
        fp->state |= BNX2X_FP_STATE_DISABLED;
-       spin_unlock_bh(&fp->lock);
+       spin_unlock(&fp->lock);
 
        return rc;
 }
index 426124f..b7fcd8a 100644 (file)
@@ -485,7 +485,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
         * from ndo_set_rx_mode() flow that may be called from BH.
         */
 
-       spin_lock_bh(&bp->dmae_lock);
+       spin_lock(&bp->dmae_lock);
 
        /* reset completion */
        *comp = 0;
@@ -514,7 +514,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
 
 unlock:
 
-       spin_unlock_bh(&bp->dmae_lock);
+       spin_unlock(&bp->dmae_lock);
 
        return rc;
 }
@@ -3810,18 +3810,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
        }
 #endif
 
-       spin_lock_bh(&bp->spq_lock);
+       spin_lock(&bp->spq_lock);
 
        if (common) {
                if (!atomic_read(&bp->eq_spq_left)) {
                        BNX2X_ERR("BUG! EQ ring full!\n");
-                       spin_unlock_bh(&bp->spq_lock);
+                       spin_unlock(&bp->spq_lock);
                        bnx2x_panic();
                        return -EBUSY;
                }
        } else if (!atomic_read(&bp->cq_spq_left)) {
                        BNX2X_ERR("BUG! SPQ ring full!\n");
-                       spin_unlock_bh(&bp->spq_lock);
+                       spin_unlock(&bp->spq_lock);
                        bnx2x_panic();
                        return -EBUSY;
        }
@@ -3870,7 +3870,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
           atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
 
        bnx2x_sp_prod_update(bp);
-       spin_unlock_bh(&bp->spq_lock);
+       spin_unlock(&bp->spq_lock);
        return 0;
 }
 
@@ -6035,7 +6035,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
 
 static void bnx2x_init_sp_ring(struct bnx2x *bp)
 {
-       spin_lock_init(&bp->spq_lock);
+       spinlock_init_irqsave(&bp->spq_lock);
        atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
 
        bp->spq_prod_idx = 0;
@@ -6815,7 +6815,7 @@ static void bnx2x_reset_common(struct bnx2x *bp)
 static void bnx2x_setup_dmae(struct bnx2x *bp)
 {
        bp->dmae_ready = 0;
-       spin_lock_init(&bp->dmae_lock);
+       spinlock_init_irqsave(&bp->dmae_lock);
 }
 
 static void bnx2x_init_pxp(struct bnx2x *bp)
@@ -12009,7 +12009,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->fw_mb_mutex);
        mutex_init(&bp->drv_info_mutex);
        bp->drv_info_mng_owner = false;
-       spin_lock_init(&bp->stats_lock);
+       spinlock_init_irqsave(&bp->stats_lock);
        sema_init(&bp->stats_sema, 1);
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
@@ -13644,9 +13644,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        cancel_delayed_work_sync(&bp->sp_task);
        cancel_delayed_work_sync(&bp->period_task);
 
-       spin_lock_bh(&bp->stats_lock);
+       spin_lock(&bp->stats_lock);
        bp->stats_state = STATS_STATE_DISABLED;
-       spin_unlock_bh(&bp->stats_lock);
+       spin_unlock(&bp->stats_lock);
 
        bnx2x_save_statistics(bp);
 
@@ -13932,7 +13932,7 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
                return;
 #endif
 
-       spin_lock_bh(&bp->spq_lock);
+       spin_lock(&bp->spq_lock);
        BUG_ON(bp->cnic_spq_pending < count);
        bp->cnic_spq_pending -= count;
 
@@ -14000,7 +14000,7 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
                        bp->cnic_kwq_cons++;
        }
        bnx2x_sp_prod_update(bp);
-       spin_unlock_bh(&bp->spq_lock);
+       spin_unlock(&bp->spq_lock);
 }
 
 static int bnx2x_cnic_sp_queue(struct ether *dev,
@@ -14022,7 +14022,7 @@ static int bnx2x_cnic_sp_queue(struct ether *dev,
                return -EAGAIN;
        }
 
-       spin_lock_bh(&bp->spq_lock);
+       spin_lock(&bp->spq_lock);
 
        for (i = 0; i < count; i++) {
                struct eth_spe *spe = (struct eth_spe *)kwqes[i];
@@ -14046,7 +14046,7 @@ static int bnx2x_cnic_sp_queue(struct ether *dev,
                        bp->cnic_kwq_prod++;
        }
 
-       spin_unlock_bh(&bp->spq_lock);
+       spin_unlock(&bp->spq_lock);
 
        if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
                bnx2x_cnic_sp_post(bp, 0);
index 46ef4db..347705e 100644 (file)
@@ -53,7 +53,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
        INIT_LIST_HEAD(&o->exe_queue);
        INIT_LIST_HEAD(&o->pending_comp);
 
-       spin_lock_init(&o->lock);
+       spinlock_init_irqsave(&o->lock);
 
        o->exe_chunk_len = exe_len;
        o->owner         = owner;
@@ -81,12 +81,12 @@ static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
        struct bnx2x_exeq_elem *elem;
        int cnt = 0;
 
-       spin_lock_bh(&o->lock);
+       spin_lock(&o->lock);
 
        list_for_each_entry(elem, &o->exe_queue, link)
                cnt++;
 
-       spin_unlock_bh(&o->lock);
+       spin_unlock(&o->lock);
 
        return cnt;
 }
@@ -108,7 +108,7 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
 {
        int rc;
 
-       spin_lock_bh(&o->lock);
+       spin_lock(&o->lock);
 
        if (!restore) {
                /* Try to cancel this element queue */
@@ -127,14 +127,14 @@ static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
        /* If so, add it to the execution queue */
        list_add_tail(&elem->link, &o->exe_queue);
 
-       spin_unlock_bh(&o->lock);
+       spin_unlock(&o->lock);
 
        return 0;
 
 free_and_exit:
        bnx2x_exe_queue_free_elem(bp, elem);
 
-       spin_unlock_bh(&o->lock);
+       spin_unlock(&o->lock);
 
        return rc;
 }
@@ -503,9 +503,9 @@ int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 {
        int rc;
 
-       spin_lock_bh(&o->exe_queue.lock);
+       spin_lock(&o->exe_queue.lock);
        rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
-       spin_unlock_bh(&o->exe_queue.lock);
+       spin_unlock(&o->exe_queue.lock);
 
        return rc;
 }
@@ -558,9 +558,9 @@ static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
                                  struct bnx2x_vlan_mac_obj *o)
 {
-       spin_lock_bh(&o->exe_queue.lock);
+       spin_lock(&o->exe_queue.lock);
        __bnx2x_vlan_mac_h_read_unlock(bp, o);
-       spin_unlock_bh(&o->exe_queue.lock);
+       spin_unlock(&o->exe_queue.lock);
 }
 
 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
@@ -1395,7 +1395,7 @@ static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
 {
        int rc = 0;
 
-       spin_lock_bh(&o->exe_queue.lock);
+       spin_lock(&o->exe_queue.lock);
 
        DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
        rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
@@ -1410,7 +1410,7 @@ static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
        } else {
                rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
        }
-       spin_unlock_bh(&o->exe_queue.lock);
+       spin_unlock(&o->exe_queue.lock);
 
        return rc;
 }
@@ -1435,7 +1435,7 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
        /* Clearing the pending list & raw state should be made
         * atomically (as execution flow assumes they represent the same).
         */
-       spin_lock_bh(&o->exe_queue.lock);
+       spin_lock(&o->exe_queue.lock);
 
        /* Reset pending list */
        __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
@@ -1443,7 +1443,7 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
        /* Clear pending */
        r->clear_pending(r);
 
-       spin_unlock_bh(&o->exe_queue.lock);
+       spin_unlock(&o->exe_queue.lock);
 
        /* If ramrod failed this is most likely a SW bug */
        if (cqe->message.error)
@@ -1842,7 +1842,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
 
        /* Clear pending commands first */
 
-       spin_lock_bh(&exeq->lock);
+       spin_lock(&exeq->lock);
 
        list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
                flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
@@ -1851,7 +1851,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
                        rc = exeq->remove(bp, exeq->owner, exeq_pos);
                        if (rc) {
                                BNX2X_ERR("Failed to remove command\n");
-                               spin_unlock_bh(&exeq->lock);
+                               spin_unlock(&exeq->lock);
                                return rc;
                        }
                        list_del(&exeq_pos->link);
@@ -1859,7 +1859,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
                }
        }
 
-       spin_unlock_bh(&exeq->lock);
+       spin_unlock(&exeq->lock);
 
        /* Prepare a command request */
        memset(&p, 0, sizeof(p));
index 20f4c02..292170e 100644 (file)
@@ -126,10 +126,10 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
        if (!bp->stats_pending) {
                int rc;
 
-               spin_lock_bh(&bp->stats_lock);
+               spin_lock(&bp->stats_lock);
 
                if (bp->stats_pending) {
-                       spin_unlock_bh(&bp->stats_lock);
+                       spin_unlock(&bp->stats_lock);
                        return;
                }
 
@@ -151,7 +151,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
                if (rc == 0)
                        bp->stats_pending = 1;
 
-               spin_unlock_bh(&bp->stats_lock);
+               spin_unlock(&bp->stats_lock);
        }
 }
 
@@ -1415,11 +1415,11 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
        if (unlikely(bp->panic))
                return;
 
-       spin_lock_bh(&bp->stats_lock);
+       spin_lock(&bp->stats_lock);
        state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
        action = bnx2x_stats_stm[state][event].action;
-       spin_unlock_bh(&bp->stats_lock);
+       spin_unlock(&bp->stats_lock);
 
        action(bp);
 
index e502079..7d3983f 100644 (file)
@@ -90,3 +90,36 @@ type T;
 @@
 -clamp_t(T, V, LO, HI)
 +CLAMP_T(T, V, LO, HI)
+
+
+// locking
+// being conservative: they might not need irqsave
+@@
+expression E;
+@@
+-spin_lock_init(E)
++spinlock_init_irqsave(E)
+
+@@
+expression E;
+@@
+-spin_lock_bh(E)
++spin_lock(E)
+
+@@
+expression E;
+@@
+-spin_unlock_bh(E)
++spin_unlock(E)
+@@
+
+expression E;
+@@
+-spin_lock_irq(E)
++spin_lock_irqsave(E)
+
+@@
+expression E;
+@@
+-spin_unlock_irq(E)
++spin_unlock_irqsave(E)