BNX2X: spatch memory barriers
authorBarret Rhoden <brho@cs.berkeley.edu>
Thu, 5 Feb 2015 21:32:23 +0000 (16:32 -0500)
committerBarret Rhoden <brho@cs.berkeley.edu>
Mon, 2 Mar 2015 16:59:08 +0000 (11:59 -0500)
kern/drivers/net/bnx2x/bnx2x_cmn.c
kern/drivers/net/bnx2x/bnx2x_cmn.h
kern/drivers/net/bnx2x/bnx2x_ethtool.c
kern/drivers/net/bnx2x/bnx2x_main.c
kern/drivers/net/bnx2x/bnx2x_sp.c
kern/drivers/net/bnx2x/bnx2x_sriov.c
scripts/spatch/linux/funcs.cocci

index 0b7ca70..b9f80fb 100644 (file)
@@ -305,7 +305,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
         * ordering of bit testing in the following
         * netif_tx_queue_stopped(txq) call.
         */
-       smp_mb();
+       mb();
 
        if (unlikely(netif_tx_queue_stopped(txq))) {
                /* Taking tx_lock() is needed to prevent re-enabling the queue
@@ -2400,7 +2400,7 @@ static void bnx2x_nic_load_pmf(struct bnx2x *bp, uint32_t load_code)
                 * writing to bp->port.pmf here and reading it from the
                 * bnx2x_periodic_task().
                 */
-               smp_mb();
+               mb();
        } else {
                bp->port.pmf = 0;
        }
@@ -2816,7 +2816,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        case LOAD_OPEN:
                netif_tx_start_all_queues(bp->dev);
-               smp_mb__after_atomic();
+               cmb();
                break;
 
        case LOAD_DIAG:
@@ -2950,7 +2950,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
                bp->recovery_state = BNX2X_RECOVERY_DONE;
                bp->is_leader = 0;
                bnx2x_release_leader_lock(bp);
-               smp_mb();
+               mb();
 
                DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
                BNX2X_ERR("Can't unload in closed or error state\n");
@@ -2971,7 +2971,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
         * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
         */
        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
-       smp_mb();
+       mb();
 
        /* indicate to VFs that the PF is going down */
        bnx2x_iov_channel_down(bp);
@@ -3046,7 +3046,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 
        /* clear pending work in rtnl task */
        bp->sp_rtnl_state = 0;
-       smp_mb();
+       mb();
 
        /* Free SKBs, SGEs, TPA pool and driver internals */
        bnx2x_free_skbs(bp);
@@ -4127,7 +4127,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct ether *dev)
        wmb();
 
        txdata->tx_db.data.prod += nbd;
-       barrier();
+       cmb();
 
        DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
 
@@ -4141,7 +4141,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct ether *dev)
                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
                 * ordering of set_bit() in netif_tx_stop_queue() and read of
                 * fp->bd_tx_cons */
-               smp_mb();
+               mb();
 
                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
                if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
@@ -5011,9 +5011,9 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, uint8_t fw_sb_id,
 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
                            uint32_t verbose)
 {
-       smp_mb__before_atomic();
+       cmb();
        set_bit(flag, &bp->sp_rtnl_state);
-       smp_mb__after_atomic();
+       cmb();
        DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
           flag);
        schedule_delayed_work(&bp->sp_rtnl_task, 0);
index 07009ad..6dd9977 100644 (file)
@@ -622,7 +622,7 @@ void bnx2x_tx_timeout(struct ether *dev);
 /*********************** Fast path ********************************/
 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
 {
-       barrier(); /* status block is written to by the chip */
+       cmb(); /* status block is written to by the chip */
        fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
 }
 
@@ -645,7 +645,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, uint8_t igu_sb_id,
 
        /* Make sure that ACK is written */
        bus_wmb();
-       barrier();
+       cmb();
 }
 
 static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, uint8_t sb_id,
@@ -667,7 +667,7 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, uint8_t sb_id,
 
        /* Make sure that ACK is written */
        bus_wmb();
-       barrier();
+       cmb();
 }
 
 static inline void bnx2x_ack_sb(struct bnx2x *bp, uint8_t igu_sb_id,
@@ -697,7 +697,7 @@ static inline uint16_t bnx2x_hc_ack_int(struct bnx2x *bp)
                       COMMAND_REG_SIMD_MASK);
        uint32_t result = REG_RD(bp, hc_addr);
 
-       barrier();
+       cmb();
        return result;
 }
 
@@ -709,13 +709,13 @@ static inline uint16_t bnx2x_igu_ack_int(struct bnx2x *bp)
        DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
           result, igu_addr);
 
-       barrier();
+       cmb();
        return result;
 }
 
 static inline uint16_t bnx2x_ack_int(struct bnx2x *bp)
 {
-       barrier();
+       cmb();
        if (bp->common.int_block == INT_BLOCK_HC)
                return bnx2x_hc_ack_int(bp);
        else
@@ -725,7 +725,7 @@ static inline uint16_t bnx2x_ack_int(struct bnx2x *bp)
 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
 {
        /* Tell compiler that consumer and producer can change */
-       barrier();
+       cmb();
        return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
 }
 
@@ -755,7 +755,7 @@ static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
        uint16_t hw_cons;
 
        /* Tell compiler that status block fields can change */
-       barrier();
+       cmb();
        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
        return hw_cons != txdata->tx_pkt_cons;
 }
@@ -1180,7 +1180,7 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
        int tout = 5000; /* Wait for 5 secs tops */
 
        while (tout--) {
-               smp_mb();
+               mb();
                netif_addr_lock_bh(bp->dev);
                if (!(bp->sp_state & mask)) {
                        netif_addr_unlock_bh(bp->dev);
@@ -1191,7 +1191,7 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
                kthread_usleep(1000);
        }
 
-       smp_mb();
+       mb();
 
        netif_addr_lock_bh(bp->dev);
        if (bp->sp_state & mask) {
index c4c52e5..6abf12c 100644 (file)
@@ -2514,11 +2514,11 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        wmb();
 
        txdata->tx_db.data.prod += 2;
-       barrier();
+       cmb();
        DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
 
        bus_wmb();
-       barrier();
+       cmb();
 
        num_pkts++;
        txdata->tx_bd_prod += 2; /* start + pbd */
index 7be287b..cf2ae41 100644 (file)
@@ -1547,7 +1547,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
         * Ensure that HC_CONFIG is written before leading/trailing edge config
         */
        bus_wmb();
-       barrier();
+       cmb();
 
        if (!CHIP_IS_E1(bp)) {
                /* init leading/trailing edge */
@@ -1612,7 +1612,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
        if (val & IGU_PF_CONF_INT_LINE_EN)
                pci_intx(bp->pdev, true);
 
-       barrier();
+       cmb();
 
        /* init leading/trailing edge */
        if (IS_MF(bp)) {
@@ -1749,7 +1749,7 @@ static int bnx2x_schedule_sp_task(struct bnx2x *bp)
         * is set, otherwise we will get out of sync and miss all
         * further interrupts. Hence, the barrier.
         */
-       smp_wmb();
+       wmb();
 
        /* schedule sp_task to workqueue */
        return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -1832,10 +1832,10 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
                return;
 #endif
 
-       smp_mb__before_atomic();
+       cmb();
        atomic_inc(&bp->cq_spq_left);
        /* push the change in bp->spq_left and towards the memory */
-       smp_mb__after_atomic();
+       cmb();
 
        DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
 
@@ -1850,11 +1850,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
                 * sp_state is cleared, and this order prevents
                 * races
                 */
-               smp_mb__before_atomic();
+               cmb();
                set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
                wmb();
                clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
-               smp_mb__after_atomic();
+               cmb();
 
                /* schedule the sp task as mcp ack is required */
                bnx2x_schedule_sp_task(bp);
@@ -2934,7 +2934,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
         * We need the mb() to ensure the ordering between the writing to
         * bp->port.pmf here and reading it from the bnx2x_periodic_task().
         */
-       smp_mb();
+       mb();
 
        /* queue a periodic task */
        queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
@@ -3911,7 +3911,7 @@ static uint16_t bnx2x_update_dsb_idx(struct bnx2x *bp)
        struct host_sp_status_block *def_sb = bp->def_status_blk;
        uint16_t rc = 0;
 
-       barrier(); /* status block is written to by the chip */
+       cmb(); /* status block is written to by the chip */
        if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
                bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
                rc |= BNX2X_DEF_SB_ATT_IDX;
@@ -3923,7 +3923,7 @@ static uint16_t bnx2x_update_dsb_idx(struct bnx2x *bp)
        }
 
        /* Do not reorder: indices reading should complete before handling */
-       barrier();
+       cmb();
        return rc;
 }
 
@@ -4046,7 +4046,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, uint32_t asserted)
                        if (!igu_acked)
                                DP(NETIF_MSG_HW,
                                   "Failed to verify IGU ack on time\n");
-                       barrier();
+                       cmb();
                }
                REG_WR(bp, nig_int_mask_addr, nig_mask);
                bnx2x_release_phy_lock(bp);
@@ -5339,9 +5339,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
                __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
 
                /* mark latest Q bit */
-               smp_mb__before_atomic();
+               cmb();
                set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
-               smp_mb__after_atomic();
+               cmb();
 
                /* send Q update ramrod for FCoE Q */
                rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -5575,13 +5575,13 @@ next_spqe:
                spqe_cnt++;
        } /* for */
 
-       smp_mb__before_atomic();
+       cmb();
        atomic_add(spqe_cnt, &bp->eq_spq_left);
 
        bp->eq_cons = sw_cons;
        bp->eq_prod = sw_prod;
        /* Make sure that above mem writes were issued towards the memory */
-       smp_wmb();
+       wmb();
 
        /* update producer */
        bnx2x_update_eq_prod(bp, bp->eq_prod);
@@ -5594,7 +5594,7 @@ static void bnx2x_sp_task(struct work_struct *work)
        DP(BNX2X_MSG_SP, "sp task invoked\n");
 
        /* make sure the atomic interrupt_occurred has been written */
-       smp_rmb();
+       rmb();
        if (atomic_read(&bp->interrupt_occurred)) {
 
                /* what work needs to be performed? */
@@ -7678,12 +7678,12 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, uint8_t func,
                         data, igu_addr_data);
        REG_WR(bp, igu_addr_data, data);
        bus_wmb();
-       barrier();
+       cmb();
        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
                          ctl, igu_addr_ctl);
        REG_WR(bp, igu_addr_ctl, ctl);
        bus_wmb();
-       barrier();
+       cmb();
 
        /* wait for clean up to finish */
        while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
@@ -9572,13 +9572,13 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
               reset_mask1 & (~not_reset_mask1));
 
-       barrier();
+       cmb();
        bus_wmb();
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
               reset_mask2 & (~stay_reset2));
 
-       barrier();
+       cmb();
        bus_wmb();
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
@@ -9651,7 +9651,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
                return -EAGAIN;
        }
 
-       barrier();
+       cmb();
 
        /* Close gates #2, #3 and #4 */
        bnx2x_set_234_gates(bp, true);
@@ -9664,7 +9664,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
 
        /* Clear "unprepared" bit */
        REG_WR(bp, MISC_REG_UNPREPARED, 0);
-       barrier();
+       cmb();
 
        /* Make sure all is written to the chip before the reset */
        bus_wmb();
@@ -9681,11 +9681,11 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
 
        /* PXP */
        bnx2x_pxp_prep(bp);
-       barrier();
+       cmb();
 
        /* reset the chip */
        bnx2x_process_kill_chip_reset(bp, global);
-       barrier();
+       cmb();
 
        /* clear errors in PGB */
        if (!CHIP_IS_E1x(bp))
@@ -9763,7 +9763,7 @@ exit_leader_reset2:
 exit_leader_reset:
        bp->is_leader = 0;
        bnx2x_release_leader_lock(bp);
-       smp_mb();
+       mb();
        return rc;
 }
 
@@ -9785,7 +9785,7 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
 
        bp->recovery_state = BNX2X_RECOVERY_FAILED;
 
-       smp_mb();
+       mb();
 }
 
 /*
@@ -9833,7 +9833,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                         * "recovery_state" update values are seen on other
                         * CPUs.
                         */
-                       smp_mb();
+                       mb();
                        break;
 
                case BNX2X_RECOVERY_WAIT:
@@ -9927,12 +9927,12 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
                                                /* Shut down the power */
                                                bnx2x_set_power_state(
                                                        bp, PCI_D3hot);
-                                               smp_mb();
+                                               mb();
                                        } else {
                                                bp->recovery_state =
                                                        BNX2X_RECOVERY_DONE;
                                                error_recovered++;
-                                               smp_mb();
+                                               mb();
                                        }
                                        bp->eth_stats.recoverable_error =
                                                error_recovered;
@@ -9975,7 +9975,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                 * function anyway.
                 */
                bp->sp_rtnl_state = 0;
-               smp_mb();
+               mb();
 
                bnx2x_parity_recover(bp);
 
@@ -9995,7 +9995,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                 * function anyway.
                 */
                bp->sp_rtnl_state = 0;
-               smp_mb();
+               mb();
 
                bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -10085,7 +10085,7 @@ static void bnx2x_period_task(struct work_struct *work)
         * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
         * the reading here.
         */
-       smp_mb();
+       mb();
        if (bp->port.pmf) {
                bnx2x_period_func(&bp->link_params, &bp->link_vars);
 
@@ -14190,7 +14190,7 @@ static int bnx2x_drv_ctl(struct ether *dev, struct drv_ctl_info *ctl)
                        break;
 
                bus_wmb();
-               barrier();
+               cmb();
 
                /* Start accepting on iSCSI L2 ring */
 
@@ -14225,7 +14225,7 @@ static int bnx2x_drv_ctl(struct ether *dev, struct drv_ctl_info *ctl)
                        BNX2X_ERR("rx_mode completion timed out!\n");
 
                bus_wmb();
-               barrier();
+               cmb();
 
                /* Unset iSCSI L2 MAC */
                rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
@@ -14235,9 +14235,9 @@ static int bnx2x_drv_ctl(struct ether *dev, struct drv_ctl_info *ctl)
        case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
                int count = ctl->data.credit.credit_count;
 
-               smp_mb__before_atomic();
+               cmb();
                atomic_add(count, &bp->cq_spq_left);
-               smp_mb__after_atomic();
+               cmb();
                break;
        }
        case DRV_CTL_ULP_REGISTER_CMD: {
index 02faeb1..46ef4db 100644 (file)
@@ -253,16 +253,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
 
 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
 {
-       smp_mb__before_atomic();
+       cmb();
        clear_bit(o->state, o->pstate);
-       smp_mb__after_atomic();
+       cmb();
 }
 
 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
 {
-       smp_mb__before_atomic();
+       cmb();
        set_bit(o->state, o->pstate);
-       smp_mb__after_atomic();
+       cmb();
 }
 
 /**
@@ -2134,7 +2134,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
 
        /* The operation is completed */
        clear_bit(p->state, p->pstate);
-       smp_mb__after_atomic();
+       cmb();
 
        return 0;
 }
@@ -3579,16 +3579,16 @@ error_exit1:
 
 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
 {
-       smp_mb__before_atomic();
+       cmb();
        clear_bit(o->sched_state, o->raw.pstate);
-       smp_mb__after_atomic();
+       cmb();
 }
 
 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
 {
-       smp_mb__before_atomic();
+       cmb();
        set_bit(o->sched_state, o->raw.pstate);
-       smp_mb__after_atomic();
+       cmb();
 }
 
 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
@@ -3749,9 +3749,9 @@ static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
 {
        bool rc;
 
-       smp_mb();
+       mb();
        rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
-       smp_mb();
+       mb();
 
        return rc;
 }
@@ -3760,12 +3760,12 @@ static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
 {
        bool rc;
 
-       smp_mb();
+       mb();
 
        /* Don't let to refill if credit + cnt > pool_sz */
        rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
 
-       smp_mb();
+       mb();
 
        return rc;
 }
@@ -3774,7 +3774,7 @@ static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
 {
        int cur_credit;
 
-       smp_mb();
+       mb();
        cur_credit = atomic_read(&o->credit);
 
        return cur_credit;
@@ -3877,7 +3877,7 @@ static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
        p->base_pool_offset = base;
 
        /* Commit the change */
-       smp_mb();
+       mb();
 
        p->check = bnx2x_credit_pool_check;
 
@@ -4207,7 +4207,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
                if (rc) {
                        o->next_state = BNX2X_Q_STATE_MAX;
                        clear_bit(pending_bit, pending);
-                       smp_mb__after_atomic();
+                       cmb();
                        return rc;
                }
 
@@ -4295,7 +4295,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
        wmb();
 
        clear_bit(cmd, &o->pending);
-       smp_mb__after_atomic();
+       cmb();
 
        return 0;
 }
@@ -4567,7 +4567,7 @@ static inline int bnx2x_q_init(struct bnx2x *bp,
        o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
 
        bus_wmb();
-       smp_mb();
+       mb();
 
        return 0;
 }
@@ -5296,7 +5296,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
        wmb();
 
        clear_bit(cmd, &o->pending);
-       smp_mb__after_atomic();
+       cmb();
 
        return 0;
 }
@@ -6051,7 +6051,7 @@ int bnx2x_func_state_change(struct bnx2x *bp,
                if (rc) {
                        o->next_state = BNX2X_F_STATE_MAX;
                        clear_bit(cmd, pending);
-                       smp_mb__after_atomic();
+                       cmb();
                        return rc;
                }
 
index 6f94f77..9ab0da4 100644 (file)
@@ -100,13 +100,13 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
           cmd_data.sb_id_and_flags, igu_addr_data);
        REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
        bus_wmb();
-       barrier();
+       cmb();
 
        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
           ctl, igu_addr_ctl);
        REG_WR(bp, igu_addr_ctl, ctl);
        bus_wmb();
-       barrier();
+       cmb();
 }
 
 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
@@ -1762,9 +1762,9 @@ static
 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
                                 struct bnx2x_virtf *vf)
 {
-       smp_mb__before_atomic();
+       cmb();
        clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
-       smp_mb__after_atomic();
+       cmb();
 }
 
 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
@@ -3136,9 +3136,9 @@ void bnx2x_iov_task(struct work_struct *work)
 
 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
 {
-       smp_mb__before_atomic();
+       cmb();
        set_bit(flag, &bp->iov_task_state);
-       smp_mb__after_atomic();
+       cmb();
        DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
        queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
 }
index ebc26db..27a1ca7 100644 (file)
@@ -10,3 +10,36 @@ expression TMAX;
 @@
 -usleep_range(TMIN, TMAX);
 +kthread_usleep(TMIN);
+
+// barriers
+@@
+@@
+-barrier();
++cmb();
+
+// akaros RMW, locking atomics provide hw memory barriers.
+// (excluding set, init, and read)
+@@
+@@
+-smp_mb__before_atomic();
++cmb();
+
+@@
+@@
+-smp_mb__after_atomic();
++cmb();
+
+@@
+@@
+-smp_mb();
++mb();
+
+@@
+@@
+-smp_rmb();
++rmb();
+
+@@
+@@
+-smp_wmb();
++wmb();