akaros/kern/drivers/net/bnx2x/bnx2x_cmn.c
<<
>>
Prefs
   1/* bnx2x_cmn.c: Broadcom Everest network driver.
   2 *
   3 * Copyright (c) 2007-2013 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  10 * Written by: Eliezer Tamir
  11 * Based on code from Michael Chan's bnx2 driver
  12 * UDP CSUM errata workaround by Arik Gendelman
  13 * Slowpath and fastpath rework by Vladislav Zolotarov
  14 * Statistics and Link management by Yitchak Gertner
  15 *
  16 */
  17
  18#include <linux_compat.h>
  19
  20#include "bnx2x_cmn.h"
  21#include "bnx2x_init.h"
  22#include "bnx2x_sp.h"
  23
  24static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
  25static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
  26static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
  27static void bnx2x_poll(uint32_t srcid, long a0, long a1, long a2);
  28
  29static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
  30{
  31        int i;
  32
  33        /* Add NAPI objects */
  34        for_each_rx_queue_cnic(bp, i) {
  35                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  36                               bnx2x_poll, NAPI_POLL_WEIGHT);
  37                napi_hash_add(&bnx2x_fp(bp, i, napi));
  38        }
  39}
  40
  41static void bnx2x_add_all_napi(struct bnx2x *bp)
  42{
  43        int i;
  44
  45        /* Add NAPI objects */
  46        for_each_eth_queue(bp, i) {
  47                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  48                               bnx2x_poll, NAPI_POLL_WEIGHT);
  49                napi_hash_add(&bnx2x_fp(bp, i, napi));
  50        }
  51}
  52
  53static int bnx2x_calc_num_queues(struct bnx2x *bp)
  54{
  55        /* default is min(8, num_cores) in Linux.  we'll set it elsewhere */
  56        int nq = bnx2x_num_queues ? : 8;
  57
  58        /* Reduce memory usage in kdump environment by using only one queue */
  59        if (is_kdump_kernel())
  60                nq = 1;
  61
  62        nq = CLAMP(nq, 1, BNX2X_MAX_QUEUES(bp));
  63        return nq;
  64}
  65
  66/**
  67 * bnx2x_move_fp - move content of the fastpath structure.
  68 *
  69 * @bp:         driver handle
  70 * @from:       source FP index
  71 * @to:         destination FP index
  72 *
  73 * Makes sure the contents of the bp->fp[to].napi is kept
  74 * intact. This is done by first copying the napi struct from
  75 * the target to the source, and then mem copying the entire
  76 * source onto the target. Update txdata pointers and related
  77 * content.
  78 */
  79static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
  80{
  81        struct bnx2x_fastpath *from_fp = &bp->fp[from];
  82        struct bnx2x_fastpath *to_fp = &bp->fp[to];
  83        struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
  84        struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
  85        struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
  86        struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
  87        int old_max_eth_txqs, new_max_eth_txqs;
  88        int old_txdata_index = 0, new_txdata_index = 0;
  89        struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
  90
  91        /* Copy the NAPI object as it has been already initialized */
  92        from_fp->napi = to_fp->napi;
  93
  94        /* Move bnx2x_fastpath contents */
  95        memcpy(to_fp, from_fp, sizeof(*to_fp));
  96        to_fp->index = to;
  97
  98        /* Retain the tpa_info of the original `to' version as we don't want
  99         * 2 FPs to contain the same tpa_info pointer.
 100         */
 101        to_fp->tpa_info = old_tpa_info;
 102
 103        /* move sp_objs contents as well, as their indices match fp ones */
 104        memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
 105
 106        /* move fp_stats contents as well, as their indices match fp ones */
 107        memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
 108
 109        /* Update txdata pointers in fp and move txdata content accordingly:
 110         * Each fp consumes 'max_cos' txdata structures, so the index should be
 111         * decremented by max_cos x delta.
 112         */
 113
 114        old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
 115        new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
 116                                (bp)->max_cos;
 117        if (from == FCOE_IDX(bp)) {
 118                old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 119                new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 120        }
 121
 122        memcpy(&bp->bnx2x_txq[new_txdata_index],
 123               &bp->bnx2x_txq[old_txdata_index],
 124               sizeof(struct bnx2x_fp_txdata));
 125        to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
 126}
 127
 128/**
 129 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 130 *
 131 * @bp:        driver handle
 132 * @buf:       character buffer to fill with the fw name
 133 * @buf_len:   length of the above buffer
 134 *
 135 */
 136void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
 137{
 138        if (IS_PF(bp)) {
 139                uint8_t phy_fw_ver[PHY_FW_VER_LEN];
 140
 141                phy_fw_ver[0] = '\0';
 142                bnx2x_get_ext_phy_fw_version(&bp->link_params,
 143                                             phy_fw_ver, PHY_FW_VER_LEN);
 144                strlcpy(buf, bp->fw_ver, buf_len);
 145                snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
 146                         "bc %d.%d.%d%s%s",
 147                         (bp->common.bc_ver & 0xff0000) >> 16,
 148                         (bp->common.bc_ver & 0xff00) >> 8,
 149                         (bp->common.bc_ver & 0xff),
 150                         ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
 151        } else {
 152                bnx2x_vf_fill_fw_str(bp, buf, buf_len);
 153        }
 154}
 155
 156/**
 157 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 158 *
 159 * @bp: driver handle
 160 * @delta:      number of eth queues which were not allocated
 161 */
 162static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
 163{
 164        int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
 165
 166        /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
 167         * backward along the array could cause memory to be overridden
 168         */
 169        for (cos = 1; cos < bp->max_cos; cos++) {
 170                for (i = 0; i < old_eth_num - delta; i++) {
 171                        struct bnx2x_fastpath *fp = &bp->fp[i];
 172                        int new_idx = cos * (old_eth_num - delta) + i;
 173
 174                        memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
 175                               sizeof(struct bnx2x_fp_txdata));
 176                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
 177                }
 178        }
 179}
 180
 181int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 182
 183/* free skb in the packet ring at pos idx
 184 * return idx of last bd freed
 185 */
 186static uint16_t bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 187                             uint16_t idx, unsigned int *pkts_compl,
 188                             unsigned int *bytes_compl)
 189{
 190        struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 191        struct eth_tx_start_bd *tx_start_bd;
 192        struct eth_tx_bd *tx_data_bd;
 193        struct block *block = tx_buf->block;
 194        uint16_t bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 195        int nbd;
 196        uint16_t split_bd_len = 0;
 197
 198        /* prefetch skb end pointer to speedup dev_kfree_skb() */
 199        //prefetch(&skb->end); // AKAROS_PORT
 200
 201        DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->block %p\n",
 202           txdata->txq_index, idx, tx_buf, block);
 203
 204        tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 205
 206        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 207#ifdef BNX2X_STOP_ON_ERROR
 208        if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 209                BNX2X_ERR("BAD nbd!\n");
 210                bnx2x_panic();
 211        }
 212#endif
 213        new_cons = nbd + tx_buf->first_bd;
 214
 215        /* Get the next bd */
 216        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 217
 218        /* Skip a parse bd... */
 219        --nbd;
 220        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 221
 222        if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
 223                /* Skip second parse bd... */
 224                --nbd;
 225                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 226        }
 227
 228        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
 229        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 230                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 231                split_bd_len = BD_UNMAP_LEN(tx_data_bd);
 232                --nbd;
 233                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 234        }
 235
 236        /* unmap first bd */
 237        dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 238                         BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
 239                         DMA_TO_DEVICE);
 240
 241        /* now free frags */
 242        while (nbd > 0) {
 243
 244                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 245                dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 246                               BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 247                if (--nbd)
 248                        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 249        }
 250
 251        /* release block */
 252        warn_on(!block);
 253        if (likely(block)) {
 254                (*pkts_compl)++;
 255                (*bytes_compl) += BLEN(block);
 256        }
 257
 258        freeb(block);
 259        tx_buf->first_bd = 0;
 260        tx_buf->block = NULL;
 261
 262        return new_cons;
 263}
 264
 265int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 266{
 267        uint16_t hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 268        unsigned int pkts_compl = 0, bytes_compl = 0;
 269
 270#ifdef BNX2X_STOP_ON_ERROR
 271        if (unlikely(bp->panic))
 272                return -1;
 273#endif
 274
 275        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 276        sw_cons = txdata->tx_pkt_cons;
 277
 278        while (sw_cons != hw_cons) {
 279                uint16_t pkt_cons;
 280
 281                pkt_cons = TX_BD(sw_cons);
 282
 283                DP(NETIF_MSG_TX_DONE,
 284                   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
 285                   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 286
 287                bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
 288                                            &pkts_compl, &bytes_compl);
 289
 290                sw_cons++;
 291        }
 292
 293        txdata->tx_pkt_cons = sw_cons;
 294        txdata->tx_bd_cons = bd_cons;
 295
 296        poke(&txdata->poker, txdata);
 297        return 0;
 298}
 299
 300static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 301                                             uint16_t idx)
 302{
 303        uint16_t last_max = fp->last_max_sge;
 304
 305        if (SUB_S16(idx, last_max) > 0)
 306                fp->last_max_sge = idx;
 307}
 308
 309static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 310                                         uint16_t sge_len,
 311                                         struct eth_end_agg_rx_cqe *cqe)
 312{
 313        struct bnx2x *bp = fp->bp;
 314        uint16_t last_max, last_elem, first_elem;
 315        uint16_t delta = 0;
 316        uint16_t i;
 317
 318        if (!sge_len)
 319                return;
 320
 321        /* First mark all used pages */
 322        for (i = 0; i < sge_len; i++)
 323                BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 324                        RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
 325
 326        DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 327           sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 328
 329        /* Here we assume that the last SGE index is the biggest */
 330        prefetch((void *)(fp->sge_mask));
 331        bnx2x_update_last_max_sge(fp,
 332                le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 333
 334        last_max = RX_SGE(fp->last_max_sge);
 335        last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 336        first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 337
 338        /* If ring is not full */
 339        if (last_elem + 1 != first_elem)
 340                last_elem++;
 341
 342        /* Now update the prod */
 343        for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 344                if (likely(fp->sge_mask[i]))
 345                        break;
 346
 347                fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 348                delta += BIT_VEC64_ELEM_SZ;
 349        }
 350
 351        if (delta > 0) {
 352                fp->rx_sge_prod += delta;
 353                /* clear page-end entries */
 354                bnx2x_clear_sge_mask_next_elems(fp);
 355        }
 356
 357        DP(NETIF_MSG_RX_STATUS,
 358           "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 359           fp->last_max_sge, fp->rx_sge_prod);
 360}
 361
 362/* Get Toeplitz hash value in the skb using the value from the
 363 * CQE (calculated by HW).
 364 */
 365static uint32_t bnx2x_get_rxhash(const struct bnx2x *bp,
 366                            const struct eth_fast_path_rx_cqe *cqe,
 367                            enum pkt_hash_types *rxhash_type)
 368{
 369panic("Not implemented");
 370#if 0 // AKAROS_PORT
 371        /* Get Toeplitz hash from CQE */
 372        if ((bp->dev->feat & NETIF_F_RXHASH) &&
 373            (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
 374                enum eth_rss_hash_type htype;
 375
 376                htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
 377                *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
 378                                (htype == TCP_IPV6_HASH_TYPE)) ?
 379                               PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
 380
 381                return le32_to_cpu(cqe->rss_hash_result);
 382        }
 383        *rxhash_type = PKT_HASH_TYPE_NONE;
 384        return 0;
 385#endif
 386}
 387
 388static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, uint16_t queue,
 389                            uint16_t cons, uint16_t prod,
 390                            struct eth_fast_path_rx_cqe *cqe)
 391{
 392panic("Not implemented");
 393#if 0 // AKAROS_PORT
 394        struct bnx2x *bp = fp->bp;
 395        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 396        struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 397        struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 398        dma_addr_t mapping;
 399        struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 400        struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 401
 402        /* print error if current state != stop */
 403        if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 404                BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 405
 406        /* Try to map an empty data buffer from the aggregation info  */
 407        mapping = dma_map_single(&bp->pdev->dev,
 408                                 first_buf->data + NET_SKB_PAD,
 409                                 fp->rx_buf_size, DMA_FROM_DEVICE);
 410        /*
 411         *  ...if it fails - move the skb from the consumer to the producer
 412         *  and set the current aggregation state as ERROR to drop it
 413         *  when TPA_STOP arrives.
 414         */
 415
 416        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 417                /* Move the BD from the consumer to the producer */
 418                bnx2x_reuse_rx_data(fp, cons, prod);
 419                tpa_info->tpa_state = BNX2X_TPA_ERROR;
 420                return;
 421        }
 422
 423        /* move empty data from pool to prod */
 424        prod_rx_buf->data = first_buf->data;
 425        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 426        /* point prod_bd to new data */
 427        prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 428        prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 429
 430        /* move partial skb from cons to pool (don't unmap yet) */
 431        *first_buf = *cons_rx_buf;
 432
 433        /* mark bin state as START */
 434        tpa_info->parsing_flags =
 435                le16_to_cpu(cqe->pars_flags.flags);
 436        tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 437        tpa_info->tpa_state = BNX2X_TPA_START;
 438        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 439        tpa_info->placement_offset = cqe->placement_offset;
 440        tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
 441        if (fp->mode == TPA_MODE_GRO) {
 442                uint16_t gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 443                tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
 444                tpa_info->gro_size = gro_size;
 445        }
 446
 447#ifdef BNX2X_STOP_ON_ERROR
 448        fp->tpa_queue_used |= (1 << queue);
 449        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 450           fp->tpa_queue_used);
 451#endif
 452#endif
 453}
 454
 455/* Timestamp option length allowed for TPA aggregation:
 456 *
 457 *              nop nop kind length echo val
 458 */
 459#define TPA_TSTAMP_OPT_LEN      12
 460/**
 461 * bnx2x_set_gro_params - compute GRO values
 462 *
 463 * @skb:                packet skb
 464 * @parsing_flags:      parsing flags from the START CQE
 465 * @len_on_bd:          total length of the first packet for the
 466 *                      aggregation.
 467 * @pkt_len:            length of all segments
 468 *
 469 * Approximate value of the MSS for this aggregation calculated using
 470 * the first packet of it.
 471 * Compute number of aggregated segments, and gso_type.
 472 */
 473static void bnx2x_set_gro_params(struct sk_buff *skb, uint16_t parsing_flags,
 474                                 uint16_t len_on_bd, unsigned int pkt_len,
 475                                 uint16_t num_of_coalesced_segs)
 476{
 477panic("Not implemented");
 478#if 0 // AKAROS_PORT
 479        /* TPA aggregation won't have either IP options or TCP options
 480         * other than timestamp or IPv6 extension headers.
 481         */
 482        uint16_t hdrs_len = ETHERHDRSIZE + sizeof(struct tcphdr);
 483
 484        if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 485            PRS_FLAG_OVERETH_IPV6) {
 486                hdrs_len += sizeof(struct ipv6hdr);
 487                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 488        } else {
 489                hdrs_len += sizeof(struct iphdr);
 490                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 491        }
 492
 493        /* Check if there was a TCP timestamp, if there is it's will
 494         * always be 12 bytes length: nop nop kind length echo val.
 495         *
 496         * Otherwise FW would close the aggregation.
 497         */
 498        if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 499                hdrs_len += TPA_TSTAMP_OPT_LEN;
 500
 501        skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
 502
 503        /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
 504         * to skb_shinfo(skb)->gso_segs
 505         */
 506        NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
 507#endif
 508}
 509
 510static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 511                              uint16_t index, gfp_t gfp_mask)
 512{
 513        /* AKAROS_PORT: our get_cont_pages returns KVAs, not struct page * */
 514        struct page *page = kva2page(get_cont_pages(PAGES_PER_SGE_SHIFT, gfp_mask));
 515        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 516        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 517        dma_addr_t mapping;
 518
 519        if (unlikely(page == NULL)) {
 520                BNX2X_ERR("Can't alloc sge\n");
 521                return -ENOMEM;
 522        }
 523
 524        mapping = dma_map_page(&bp->pdev->dev, page, 0,
 525                               SGE_PAGES, DMA_FROM_DEVICE);
 526        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 527                free_cont_pages(page2kva(page), PAGES_PER_SGE_SHIFT);
 528                BNX2X_ERR("Can't map sge\n");
 529                return -ENOMEM;
 530        }
 531
 532        sw_buf->page = page;
 533        dma_unmap_addr_set(sw_buf, mapping, mapping);
 534
 535        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 536        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 537
 538        return 0;
 539}
 540
 541static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 542                               struct bnx2x_agg_info *tpa_info,
 543                               uint16_t pages,
 544                               struct sk_buff *skb,
 545                               struct eth_end_agg_rx_cqe *cqe,
 546                               uint16_t cqe_idx)
 547{
 548panic("Not implemented");
 549#if 0 // AKAROS_PORT
 550        struct sw_rx_page *rx_pg, old_rx_pg;
 551        uint32_t i, frag_len, frag_size;
 552        int err, j, frag_id = 0;
 553        uint16_t len_on_bd = tpa_info->len_on_bd;
 554        uint16_t full_page = 0, gro_size = 0;
 555
 556        frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 557
 558        if (fp->mode == TPA_MODE_GRO) {
 559                gro_size = tpa_info->gro_size;
 560                full_page = tpa_info->full_page;
 561        }
 562
 563        /* This is needed in order to enable forwarding support */
 564        if (frag_size)
 565                bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
 566                                     le16_to_cpu(cqe->pkt_len),
 567                                     le16_to_cpu(cqe->num_of_coalesced_segs));
 568
 569#ifdef BNX2X_STOP_ON_ERROR
 570        if (pages > MIN_T(uint32_t, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
 571                BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 572                          pages, cqe_idx);
 573                BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 574                bnx2x_panic();
 575                return -EINVAL;
 576        }
 577#endif
 578
 579        /* Run through the SGL and compose the fragmented skb */
 580        for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 581                uint16_t sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 582
 583                /* FW gives the indices of the SGE as if the ring is an array
 584                   (meaning that "next" element will consume 2 indices) */
 585                if (fp->mode == TPA_MODE_GRO)
 586                        frag_len = MIN_T(uint32_t, frag_size,
 587                                         (uint32_t)full_page);
 588                else /* LRO */
 589                        frag_len = MIN_T(uint32_t, frag_size,
 590                                         (uint32_t)SGE_PAGES);
 591
 592                rx_pg = &fp->rx_page_ring[sge_idx];
 593                old_rx_pg = *rx_pg;
 594
 595                /* If we fail to allocate a substitute page, we simply stop
 596                   where we are and drop the whole packet */
 597                err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, 0);
 598                if (unlikely(err)) {
 599                        bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 600                        return err;
 601                }
 602
 603                /* Unmap the page as we're going to pass it to the stack */
 604                dma_unmap_page(&bp->pdev->dev,
 605                               dma_unmap_addr(&old_rx_pg, mapping),
 606                               SGE_PAGES, DMA_FROM_DEVICE);
 607                /* Add one frag and update the appropriate fields in the skb */
 608                if (fp->mode == TPA_MODE_LRO)
 609                        skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
 610                else { /* GRO */
 611                        int rem;
 612                        int offset = 0;
 613                        for (rem = frag_len; rem > 0; rem -= gro_size) {
 614                                int len = rem > gro_size ? gro_size : rem;
 615                                skb_fill_page_desc(skb, frag_id++,
 616                                                   old_rx_pg.page, offset, len);
 617                                /* TODO: if this is pinning for I/O, we need to change to a
 618                                 * device-ownership / mmap model. */
 619                                if (offset)
 620                                        page_incref(old_rx_pg.page);
 621                                offset += len;
 622                        }
 623                }
 624
 625                skb->data_len += frag_len;
 626                skb->truesize += SGE_PAGES;
 627                skb->len += frag_len;
 628
 629                frag_size -= frag_len;
 630        }
 631
 632        return 0;
 633#endif
 634}
 635
 636static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 637{
 638        if (fp->rx_frag_size)
 639                page_decref(kva2page(data));
 640        else
 641                kfree(data);
 642}
 643
 644static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
 645{
 646        if (fp->rx_frag_size) {
 647                /* GFP_KERNEL allocations are used only during initialization */
 648                if (unlikely(gfp_mask & MEM_WAIT))
 649                        return (void *)kpage_alloc_addr();
 650
 651#if 0 // AKAROS_PORT
 652                return netdev_alloc_frag(fp->rx_frag_size);
 653#else
 654                return (void *)kpage_alloc_addr();
 655#endif
 656        }
 657
 658        return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
 659}
 660
 661#ifdef CONFIG_INET
 662static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
 663{
 664        const struct iphdr *iph = ip_hdr(skb);
 665        struct tcphdr *th;
 666
 667        skb_set_transport_header(skb, sizeof(struct iphdr));
 668        th = tcp_hdr(skb);
 669
 670        th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
 671                                  iph->saddr, iph->daddr, 0);
 672}
 673
 674static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
 675{
 676        struct ipv6hdr *iph = ipv6_hdr(skb);
 677        struct tcphdr *th;
 678
 679        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 680        th = tcp_hdr(skb);
 681
 682        th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 683                                  &iph->saddr, &iph->daddr, 0);
 684}
 685
 686static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
 687                            void (*gro_func)(struct bnx2x*, struct sk_buff*))
 688{
 689        skb_set_network_header(skb, 0);
 690        gro_func(bp, skb);
 691        tcp_gro_complete(skb);
 692}
 693#endif
 694
 695static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 696                               struct sk_buff *skb)
 697{
 698panic("Not implemented");
 699#if 0 // AKAROS_PORT
 700#ifdef CONFIG_INET
 701        if (skb_shinfo(skb)->gso_size) {
 702                switch (be16_to_cpu(skb->protocol)) {
 703                case ETH_P_IP:
 704                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
 705                        break;
 706                case ETH_P_IPV6:
 707                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
 708                        break;
 709                default:
 710                        BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
 711                                  be16_to_cpu(skb->protocol));
 712                }
 713        }
 714#endif
 715        skb_record_rx_queue(skb, fp->rx_queue);
 716        napi_gro_receive(&fp->napi, skb);
 717#endif
 718}
 719
 720static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 721                           struct bnx2x_agg_info *tpa_info,
 722                           uint16_t pages,
 723                           struct eth_end_agg_rx_cqe *cqe,
 724                           uint16_t cqe_idx)
 725{
 726panic("Not implemented");
 727#if 0 // AKAROS_PORT
 728        struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 729        uint8_t pad = tpa_info->placement_offset;
 730        uint16_t len = tpa_info->len_on_bd;
 731        struct sk_buff *skb = NULL;
 732        uint8_t *new_data, *data = rx_buf->data;
 733        uint8_t old_tpa_state = tpa_info->tpa_state;
 734
 735        tpa_info->tpa_state = BNX2X_TPA_STOP;
 736
 737        /* If we there was an error during the handling of the TPA_START -
 738         * drop this aggregation.
 739         */
 740        if (old_tpa_state == BNX2X_TPA_ERROR)
 741                goto drop;
 742
 743        /* Try to allocate the new data */
 744        new_data = bnx2x_frag_alloc(fp, 0);
 745        /* Unmap skb in the pool anyway, as we are going to change
 746           pool entry status to BNX2X_TPA_STOP even if new skb allocation
 747           fails. */
 748        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 749                         fp->rx_buf_size, DMA_FROM_DEVICE);
 750        if (likely(new_data))
 751                skb = build_skb(data, fp->rx_frag_size);
 752
 753        if (likely(skb)) {
 754#ifdef BNX2X_STOP_ON_ERROR
 755                if (pad + len > fp->rx_buf_size) {
 756                        BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
 757                                  pad, len, fp->rx_buf_size);
 758                        bnx2x_panic();
 759                        return;
 760                }
 761#endif
 762
 763                skb_reserve(skb, pad + NET_SKB_PAD);
 764                skb_put(skb, len);
 765                skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
 766
 767                skb->protocol = eth_type_trans(skb, bp->dev);
 768                skb->ip_summed = CHECKSUM_UNNECESSARY;
 769
 770                if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
 771                                         skb, cqe, cqe_idx)) {
 772                        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 773                                __vlan_hwaccel_put_tag(skb,
 774                                                       cpu_to_be16(ETH_P_8021Q),
 775                                                       tpa_info->vlan_tag);
 776                        bnx2x_gro_receive(bp, fp, skb);
 777                } else {
 778                        DP(NETIF_MSG_RX_STATUS,
 779                           "Failed to allocate new pages - dropping packet!\n");
 780                        dev_kfree_skb_any(skb);
 781                }
 782
 783                /* put new data in bin */
 784                rx_buf->data = new_data;
 785
 786                return;
 787        }
 788        if (new_data)
 789                bnx2x_frag_free(fp, new_data);
 790drop:
 791        /* drop the packet and keep the buffer in the bin */
 792        DP(NETIF_MSG_RX_STATUS,
 793           "Failed to allocate or map a new skb - dropping packet!\n");
 794        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
 795#endif
 796}
 797
 798static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 799                               uint16_t index, gfp_t gfp_mask)
 800{
 801        uint8_t *data;
 802        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 803        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 804        dma_addr_t mapping;
 805
 806        data = bnx2x_frag_alloc(fp, gfp_mask);
 807        if (unlikely(data == NULL))
 808                return -ENOMEM;
 809
 810        mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
 811                                 fp->rx_buf_size,
 812                                 DMA_FROM_DEVICE);
 813        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 814                bnx2x_frag_free(fp, data);
 815                BNX2X_ERR("Can't map rx data\n");
 816                return -ENOMEM;
 817        }
 818
 819        rx_buf->data = data;
 820        dma_unmap_addr_set(rx_buf, mapping, mapping);
 821
 822        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 823        rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 824
 825        return 0;
 826}
 827
 828static
 829void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
 830                                 struct bnx2x_fastpath *fp,
 831                                 struct bnx2x_eth_q_stats *qstats)
 832{
 833panic("Not implemented");
 834#if 0 // AKAROS_PORT
 835        /* Do nothing if no L4 csum validation was done.
 836         * We do not check whether IP csum was validated. For IPv4 we assume
 837         * that if the card got as far as validating the L4 csum, it also
 838         * validated the IP csum. IPv6 has no IP csum.
 839         */
 840        if (cqe->fast_path_cqe.status_flags &
 841            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
 842                return;
 843
 844        /* If L4 validation was done, check if an error was found. */
 845
 846        if (cqe->fast_path_cqe.type_error_flags &
 847            (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
 848             ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
 849                qstats->hw_csum_err++;
 850        else
 851                skb->ip_summed = CHECKSUM_UNNECESSARY;
 852#endif
 853}
 854
 855static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 856{
 857        struct bnx2x *bp = fp->bp;
 858        uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 859        uint16_t sw_comp_cons, sw_comp_prod;
 860        int rx_pkt = 0;
 861        union eth_rx_cqe *cqe;
 862        struct eth_fast_path_rx_cqe *cqe_fp;
 863
 864        struct block *block;
 865
 866#ifdef BNX2X_STOP_ON_ERROR
 867        if (unlikely(bp->panic))
 868                return 0;
 869#endif
 870        if (budget <= 0)
 871                return rx_pkt;
 872
 873        bd_cons = fp->rx_bd_cons;
 874        bd_prod = fp->rx_bd_prod;
 875        bd_prod_fw = bd_prod;
 876        sw_comp_cons = fp->rx_comp_cons;
 877        sw_comp_prod = fp->rx_comp_prod;
 878
 879        comp_ring_cons = RCQ_BD(sw_comp_cons);
 880        cqe = &fp->rx_comp_ring[comp_ring_cons];
 881        cqe_fp = &cqe->fast_path_cqe;
 882
 883        DP(NETIF_MSG_RX_STATUS,
 884           "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
 885
 886        while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
 887                struct sw_rx_bd *rx_buf = NULL;
 888                uint8_t cqe_fp_flags;
 889                enum eth_rx_cqe_type cqe_fp_type;
 890                uint16_t len, pad, queue;
 891                uint8_t *data;
 892                uint32_t rxhash;
 893
 894#ifdef BNX2X_STOP_ON_ERROR
 895                if (unlikely(bp->panic))
 896                        return 0;
 897#endif
 898
 899                bd_prod = RX_BD(bd_prod);
 900                bd_cons = RX_BD(bd_cons);
 901
 902                /* A rmb() is required to ensure that the CQE is not read
 903                 * before it is written by the adapter DMA.  PCI ordering
 904                 * rules will make sure the other fields are written before
 905                 * the marker at the end of struct eth_fast_path_rx_cqe
 906                 * but without rmb() a weakly ordered processor can process
 907                 * stale data.  Without the barrier TPA state-machine might
 908                 * enter inconsistent state and kernel stack might be
 909                 * provided with incorrect packet description - these lead
 910                 * to various kernel crashed.
 911                 */
 912                rmb();
 913
 914                cqe_fp_flags = cqe_fp->type_error_flags;
 915                cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 916
 917                DP(NETIF_MSG_RX_STATUS,
 918                   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
 919                   CQE_TYPE(cqe_fp_flags),
 920                   cqe_fp_flags, cqe_fp->status_flags,
 921                   le32_to_cpu(cqe_fp->rss_hash_result),
 922                   le16_to_cpu(cqe_fp->vlan_tag),
 923                   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
 924
 925                /* is this a slowpath msg? */
 926                if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 927                        bnx2x_sp_event(fp, cqe);
 928                        goto next_cqe;
 929                }
 930
 931                rx_buf = &fp->rx_buf_ring[bd_cons];
 932                data = rx_buf->data;
 933
 934                if (!CQE_TYPE_FAST(cqe_fp_type)) {
 935                        struct bnx2x_agg_info *tpa_info;
 936                        uint16_t frag_size, pages;
 937#ifdef BNX2X_STOP_ON_ERROR
 938                        /* sanity check */
 939                        if (fp->disable_tpa &&
 940                            (CQE_TYPE_START(cqe_fp_type) ||
 941                             CQE_TYPE_STOP(cqe_fp_type)))
 942                                BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
 943                                          CQE_TYPE(cqe_fp_type));
 944#endif
 945
 946                        if (CQE_TYPE_START(cqe_fp_type)) {
 947                                uint16_t queue = cqe_fp->queue_index;
 948                                DP(NETIF_MSG_RX_STATUS,
 949                                   "calling tpa_start on queue %d\n",
 950                                   queue);
 951
 952                                bnx2x_tpa_start(fp, queue,
 953                                                bd_cons, bd_prod,
 954                                                cqe_fp);
 955
 956                                goto next_rx;
 957                        }
 958                        queue = cqe->end_agg_cqe.queue_index;
 959                        tpa_info = &fp->tpa_info[queue];
 960                        DP(NETIF_MSG_RX_STATUS,
 961                           "calling tpa_stop on queue %d\n",
 962                           queue);
 963
 964                        frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
 965                                    tpa_info->len_on_bd;
 966
 967                        if (fp->mode == TPA_MODE_GRO)
 968                                pages = (frag_size + tpa_info->full_page - 1) /
 969                                         tpa_info->full_page;
 970                        else
 971                                pages = SGE_PAGE_ALIGN(frag_size) >>
 972                                        SGE_PAGE_SHIFT;
 973
 974                        bnx2x_tpa_stop(bp, fp, tpa_info, pages,
 975                                       &cqe->end_agg_cqe, comp_ring_cons);
 976#ifdef BNX2X_STOP_ON_ERROR
 977                        if (bp->panic)
 978                                return 0;
 979#endif
 980
 981                        bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
 982                        goto next_cqe;
 983                }
 984                /* non TPA */
 985                len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
 986                pad = cqe_fp->placement_offset;
 987                dma_sync_single_for_cpu(&bp->pdev->dev,
 988                                        dma_unmap_addr(rx_buf, mapping),
 989                                        pad + RX_COPY_THRESH,
 990                                        DMA_FROM_DEVICE);
 991                pad += NET_SKB_PAD;
 992                prefetch(data + pad); /* speedup eth_type_trans() */
 993                /* is this an error packet? */
 994                if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
 995                        DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
 996                           "ERROR  flags %x  rx packet %u\n",
 997                           cqe_fp_flags, sw_comp_cons);
 998                        bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
 999                        goto reuse_rx;
1000                }
1001
1002                /* Since we don't have a jumbo ring
1003                 * copy small packets if mtu > 1500
1004                 */
1005                /* TODO: AKAROS_PORT always copy out the packet for now. */
1006                if (1) {
1007//              if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1008//                  (len <= RX_COPY_THRESH)) {
1009                        block = block_alloc(len, MEM_ATOMIC);
1010                        if (block == NULL) {
1011                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1012                                   "ERROR  packet dropped because of alloc failure\n");
1013                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1014                                goto reuse_rx;
1015                        }
1016                        memcpy(block->wp, data + pad, len);
1017                        block->wp += len;
1018                        bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1019                } else {
1020                        if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1021                                                       0) == 0)) {
1022                                dma_unmap_single(&bp->pdev->dev,
1023                                                 dma_unmap_addr(rx_buf,
1024                                                                mapping),
1025                                                 fp->rx_buf_size,
1026                                                 DMA_FROM_DEVICE);
1027                                /* TODO: block extra data here */
1028                                panic("Extra-data not implemented");
1029                                #if 0 // AKAROS_PORT
1030                                skb = build_skb(data, fp->rx_frag_size);
1031                                if (unlikely(!skb)) {
1032                                        bnx2x_frag_free(fp, data);
1033                                        bnx2x_fp_qstats(bp, fp)->
1034                                                        rx_skb_alloc_failed++;
1035                                        goto next_rx;
1036                                }
1037                                skb_reserve(skb, pad);
1038                                #endif
1039                        } else {
1040                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1041                                   "ERROR  packet dropped because of alloc failure\n");
1042                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1043reuse_rx:
1044                                bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1045                                goto next_rx;
1046                        }
1047                }
1048
1049                // AKAROS_PORT TODO: set hash and checksum stuff
1050#if 0
1051                skb_put(skb, len);
1052                skb->protocol = eth_type_trans(skb, bp->dev);
1053
1054                /* Set Toeplitz hash for a none-LRO skb */
1055                rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1056                skb_set_hash(skb, rxhash, rxhash_type);
1057
1058                skb_checksum_none_assert(skb);
1059
1060                if (bp->dev->feat & NETIF_F_RXCSUM)
1061                        bnx2x_csum_validate(skb, cqe, fp,
1062                                            bnx2x_fp_qstats(bp, fp));
1063
1064                skb_record_rx_queue(skb, fp->rx_queue);
1065
1066                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1067                    PARSING_FLAGS_VLAN)
1068                        __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1069                                               le16_to_cpu(cqe_fp->vlan_tag));
1070
1071                skb_mark_napi_id(skb, &fp->napi);
1072
1073                if (bnx2x_fp_ll_polling(fp))
1074                        netif_receive_skb(skb);
1075                else
1076                        napi_gro_receive(&fp->napi, skb);
1077#endif
1078                etheriq(bp->edev, block, TRUE);
1079next_rx:
1080                rx_buf->data = NULL;
1081
1082                bd_cons = NEXT_RX_IDX(bd_cons);
1083                bd_prod = NEXT_RX_IDX(bd_prod);
1084                bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1085                rx_pkt++;
1086next_cqe:
1087                sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1088                sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1089
1090                /* mark CQE as free */
1091                BNX2X_SEED_CQE(cqe_fp);
1092
1093                if (rx_pkt == budget)
1094                        break;
1095
1096                comp_ring_cons = RCQ_BD(sw_comp_cons);
1097                cqe = &fp->rx_comp_ring[comp_ring_cons];
1098                cqe_fp = &cqe->fast_path_cqe;
1099        } /* while */
1100
1101        fp->rx_bd_cons = bd_cons;
1102        fp->rx_bd_prod = bd_prod_fw;
1103        fp->rx_comp_cons = sw_comp_cons;
1104        fp->rx_comp_prod = sw_comp_prod;
1105
1106        /* Update producers */
1107        bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1108                             fp->rx_sge_prod);
1109
1110        fp->rx_pkt += rx_pkt;
1111        fp->rx_calls++;
1112
1113        return rx_pkt;
1114}
1115
1116static void bnx2x_msix_fp_int(struct hw_trapframe *hw_tf, void *fp_cookie)
1117{
1118        struct bnx2x_fastpath *fp = fp_cookie;
1119        struct bnx2x *bp = fp->bp;
1120        uint8_t cos;
1121
1122        DP(NETIF_MSG_INTR,
1123           "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1124           fp->index, fp->fw_sb_id, fp->igu_sb_id);
1125
1126        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1127
1128#ifdef BNX2X_STOP_ON_ERROR
1129        if (unlikely(bp->panic))
1130                return;
1131#endif
1132
1133        /* Handle Rx and Tx according to MSI-X vector */
1134        for_each_cos_in_tx_queue(fp, cos)
1135                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1136
1137        prefetch(&fp->sb_running_index[SM_RX_ID]);
1138        // AKAROS_PORT
1139        send_kernel_message(core_id(), bnx2x_poll, (long)fp, 0, 0,
1140                            KMSG_ROUTINE);
1141        napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1142
1143        return;
1144}
1145
1146/* HW Lock for shared dual port PHYs */
1147void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1148{
1149        qlock(&bp->port.phy_mutex);
1150
1151        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1152}
1153
1154void bnx2x_release_phy_lock(struct bnx2x *bp)
1155{
1156        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1157
1158        qunlock(&bp->port.phy_mutex);
1159}
1160
1161/* calculates MF speed according to current linespeed and MF configuration */
1162uint16_t bnx2x_get_mf_speed(struct bnx2x *bp)
1163{
1164        uint16_t line_speed = bp->link_vars.line_speed;
1165        if (IS_MF(bp)) {
1166                uint16_t maxCfg = bnx2x_extract_max_cfg(bp,
1167                                                   bp->mf_config[BP_VN(bp)]);
1168
1169                /* Calculate the current MAX line speed limit for the MF
1170                 * devices
1171                 */
1172                if (IS_MF_SI(bp))
1173                        line_speed = (line_speed * maxCfg) / 100;
1174                else { /* SD mode */
1175                        uint16_t vn_max_rate = maxCfg * 100;
1176
1177                        if (vn_max_rate < line_speed)
1178                                line_speed = vn_max_rate;
1179                }
1180        }
1181
1182        return line_speed;
1183}
1184
1185/**
1186 * bnx2x_fill_report_data - fill link report data to report
1187 *
1188 * @bp:         driver handle
1189 * @data:       link state to update
1190 *
1191 * It uses a none-atomic bit operations because is called under the mutex.
1192 */
1193static void bnx2x_fill_report_data(struct bnx2x *bp,
1194                                   struct bnx2x_link_report_data *data)
1195{
1196        memset(data, 0, sizeof(*data));
1197
1198        if (IS_PF(bp)) {
1199                /* Fill the report data: effective line speed */
1200                data->line_speed = bnx2x_get_mf_speed(bp);
1201
1202                /* Link is down */
1203                if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1204                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1205                                  &data->link_report_flags);
1206
1207                if (!BNX2X_NUM_ETH_QUEUES(bp))
1208                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209                                  &data->link_report_flags);
1210
1211                /* Full DUPLEX */
1212                if (bp->link_vars.duplex == DUPLEX_FULL)
1213                        __set_bit(BNX2X_LINK_REPORT_FD,
1214                                  &data->link_report_flags);
1215
1216                /* Rx Flow Control is ON */
1217                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1218                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1219                                  &data->link_report_flags);
1220
1221                /* Tx Flow Control is ON */
1222                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1223                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1224                                  &data->link_report_flags);
1225        } else { /* VF */
1226                *data = bp->vf_link_vars;
1227        }
1228}
1229
1230/**
1231 * bnx2x_link_report - report link status to OS.
1232 *
1233 * @bp:         driver handle
1234 *
1235 * Calls the __bnx2x_link_report() under the same locking scheme
1236 * as a link/PHY state managing code to ensure a consistent link
1237 * reporting.
1238 */
1239
1240void bnx2x_link_report(struct bnx2x *bp)
1241{
1242        bnx2x_acquire_phy_lock(bp);
1243        __bnx2x_link_report(bp);
1244        bnx2x_release_phy_lock(bp);
1245}
1246
1247/**
1248 * __bnx2x_link_report - report link status to OS.
1249 *
1250 * @bp:         driver handle
1251 *
1252 * None atomic implementation.
1253 * Should be called under the phy_lock.
1254 */
1255void __bnx2x_link_report(struct bnx2x *bp)
1256{
1257        struct bnx2x_link_report_data cur_data;
1258
1259        /* reread mf_cfg */
1260        if (IS_PF(bp) && !CHIP_IS_E1(bp))
1261                bnx2x_read_mf_cfg(bp);
1262
1263        /* Read the current link report info */
1264        bnx2x_fill_report_data(bp, &cur_data);
1265
1266        /* Don't report link down or exactly the same link status twice */
1267        if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1268            (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1269                      &bp->last_reported_link.link_report_flags) &&
1270             test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1271                      &cur_data.link_report_flags)))
1272                return;
1273
1274        bp->link_cnt++;
1275
1276        /* We are going to report a new link parameters now -
1277         * remember the current data for the next time.
1278         */
1279        memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1280
1281        /* propagate status to VFs */
1282        if (IS_PF(bp))
1283                bnx2x_iov_link_update(bp);
1284
1285        if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1286                     &cur_data.link_report_flags)) {
1287                netif_carrier_off(bp->dev);
1288                netdev_err(bp->dev, "NIC Link is Down\n");
1289                return;
1290        } else {
1291                const char *duplex;
1292                const char *flow;
1293
1294                netif_carrier_on(bp->dev);
1295
1296                if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1297                                       &cur_data.link_report_flags))
1298                        duplex = "full";
1299                else
1300                        duplex = "half";
1301
1302                /* Handle the FC at the end so that only these flags would be
1303                 * possibly set. This way we may easily check if there is no FC
1304                 * enabled.
1305                 */
1306                if (cur_data.link_report_flags) {
1307                        if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1308                                     &cur_data.link_report_flags)) {
1309                                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1310                                     &cur_data.link_report_flags))
1311                                        flow = "ON - receive & transmit";
1312                                else
1313                                        flow = "ON - receive";
1314                        } else {
1315                                flow = "ON - transmit";
1316                        }
1317                } else {
1318                        flow = "none";
1319                }
1320                netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1321                            cur_data.line_speed, duplex, flow);
1322        }
1323}
1324
1325static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1326{
1327        int i;
1328
1329        for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1330                struct eth_rx_sge *sge;
1331
1332                sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1333                sge->addr_hi =
1334                        cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1335                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1336
1337                sge->addr_lo =
1338                        cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1339                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1340        }
1341}
1342
1343static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1344                                struct bnx2x_fastpath *fp, int last)
1345{
1346        int i;
1347
1348        for (i = 0; i < last; i++) {
1349                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1350                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1351                uint8_t *data = first_buf->data;
1352
1353                if (data == NULL) {
1354                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1355                        continue;
1356                }
1357                if (tpa_info->tpa_state == BNX2X_TPA_START)
1358                        dma_unmap_single(&bp->pdev->dev,
1359                                         dma_unmap_addr(first_buf, mapping),
1360                                         fp->rx_buf_size, DMA_FROM_DEVICE);
1361                bnx2x_frag_free(fp, data);
1362                first_buf->data = NULL;
1363        }
1364}
1365
1366void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1367{
1368        int j;
1369
1370        for_each_rx_queue_cnic(bp, j) {
1371                struct bnx2x_fastpath *fp = &bp->fp[j];
1372
1373                fp->rx_bd_cons = 0;
1374
1375                /* Activate BD ring */
1376                /* Warning!
1377                 * this will generate an interrupt (to the TSTORM)
1378                 * must only be done after chip is initialized
1379                 */
1380                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1381                                     fp->rx_sge_prod);
1382        }
1383}
1384
1385void bnx2x_init_rx_rings(struct bnx2x *bp)
1386{
1387        int func = BP_FUNC(bp);
1388        uint16_t ring_prod;
1389        int i, j;
1390
1391        /* Allocate TPA resources */
1392        for_each_eth_queue(bp, j) {
1393                struct bnx2x_fastpath *fp = &bp->fp[j];
1394
1395                DP(NETIF_MSG_IFUP,
1396                   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1397
1398                if (!fp->disable_tpa) {
1399                        /* Fill the per-aggregation pool */
1400                        for (i = 0; i < MAX_AGG_QS(bp); i++) {
1401                                struct bnx2x_agg_info *tpa_info =
1402                                        &fp->tpa_info[i];
1403                                struct sw_rx_bd *first_buf =
1404                                        &tpa_info->first_buf;
1405
1406                                first_buf->data =
1407                                        bnx2x_frag_alloc(fp, MEM_WAIT);
1408                                if (!first_buf->data) {
1409                                        BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1410                                                  j);
1411                                        bnx2x_free_tpa_pool(bp, fp, i);
1412                                        fp->disable_tpa = 1;
1413                                        break;
1414                                }
1415                                dma_unmap_addr_set(first_buf, mapping, 0);
1416                                tpa_info->tpa_state = BNX2X_TPA_STOP;
1417                        }
1418
1419                        /* "next page" elements initialization */
1420                        bnx2x_set_next_page_sgl(fp);
1421
1422                        /* set SGEs bit mask */
1423                        bnx2x_init_sge_ring_bit_mask(fp);
1424
1425                        /* Allocate SGEs and initialize the ring elements */
1426                        for (i = 0, ring_prod = 0;
1427                             i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1428
1429                                if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1430                                                       MEM_WAIT) < 0) {
1431                                        BNX2X_ERR("was only able to allocate %d rx sges\n",
1432                                                  i);
1433                                        BNX2X_ERR("disabling TPA for queue[%d]\n",
1434                                                  j);
1435                                        /* Cleanup already allocated elements */
1436                                        bnx2x_free_rx_sge_range(bp, fp,
1437                                                                ring_prod);
1438                                        bnx2x_free_tpa_pool(bp, fp,
1439                                                            MAX_AGG_QS(bp));
1440                                        fp->disable_tpa = 1;
1441                                        ring_prod = 0;
1442                                        break;
1443                                }
1444                                ring_prod = NEXT_SGE_IDX(ring_prod);
1445                        }
1446
1447                        fp->rx_sge_prod = ring_prod;
1448                }
1449        }
1450
1451        for_each_eth_queue(bp, j) {
1452                struct bnx2x_fastpath *fp = &bp->fp[j];
1453
1454                fp->rx_bd_cons = 0;
1455
1456                /* Activate BD ring */
1457                /* Warning!
1458                 * this will generate an interrupt (to the TSTORM)
1459                 * must only be done after chip is initialized
1460                 */
1461                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1462                                     fp->rx_sge_prod);
1463
1464                if (j != 0)
1465                        continue;
1466
1467                if (CHIP_IS_E1(bp)) {
1468                        REG_WR(bp, BAR_USTRORM_INTMEM +
1469                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1470                               U64_LO(fp->rx_comp_mapping));
1471                        REG_WR(bp, BAR_USTRORM_INTMEM +
1472                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1473                               U64_HI(fp->rx_comp_mapping));
1474                }
1475        }
1476}
1477
1478static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1479{
1480panic("Not implemented");
1481#if 0 // AKAROS_PORT
1482        uint8_t cos;
1483        struct bnx2x *bp = fp->bp;
1484
1485        for_each_cos_in_tx_queue(fp, cos) {
1486                struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1487                unsigned pkts_compl = 0, bytes_compl = 0;
1488
1489                uint16_t sw_prod = txdata->tx_pkt_prod;
1490                uint16_t sw_cons = txdata->tx_pkt_cons;
1491
1492                while (sw_cons != sw_prod) {
1493                        bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1494                                          &pkts_compl, &bytes_compl);
1495                        sw_cons++;
1496                }
1497
1498                netdev_tx_reset_queue(
1499                        netdev_get_tx_queue(bp->dev,
1500                                            txdata->txq_index));
1501        }
1502#endif
1503}
1504
1505static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1506{
1507        int i;
1508
1509        for_each_tx_queue_cnic(bp, i) {
1510                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1511        }
1512}
1513
1514static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1515{
1516        int i;
1517
1518        for_each_eth_queue(bp, i) {
1519                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1520        }
1521}
1522
1523static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1524{
1525        struct bnx2x *bp = fp->bp;
1526        int i;
1527
1528        /* ring wasn't allocated */
1529        if (fp->rx_buf_ring == NULL)
1530                return;
1531
1532        for (i = 0; i < NUM_RX_BD; i++) {
1533                struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1534                uint8_t *data = rx_buf->data;
1535
1536                if (data == NULL)
1537                        continue;
1538                dma_unmap_single(&bp->pdev->dev,
1539                                 dma_unmap_addr(rx_buf, mapping),
1540                                 fp->rx_buf_size, DMA_FROM_DEVICE);
1541
1542                rx_buf->data = NULL;
1543                bnx2x_frag_free(fp, data);
1544        }
1545}
1546
1547static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1548{
1549        int j;
1550
1551        for_each_rx_queue_cnic(bp, j) {
1552                bnx2x_free_rx_bds(&bp->fp[j]);
1553        }
1554}
1555
1556static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1557{
1558        int j;
1559
1560        for_each_eth_queue(bp, j) {
1561                struct bnx2x_fastpath *fp = &bp->fp[j];
1562
1563                bnx2x_free_rx_bds(fp);
1564
1565                if (!fp->disable_tpa)
1566                        bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1567        }
1568}
1569
1570static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1571{
1572        bnx2x_free_tx_skbs_cnic(bp);
1573        bnx2x_free_rx_skbs_cnic(bp);
1574}
1575
1576void bnx2x_free_skbs(struct bnx2x *bp)
1577{
1578        bnx2x_free_tx_skbs(bp);
1579        bnx2x_free_rx_skbs(bp);
1580}
1581
1582void bnx2x_update_max_mf_config(struct bnx2x *bp, uint32_t value)
1583{
1584        /* load old values */
1585        uint32_t mf_cfg = bp->mf_config[BP_VN(bp)];
1586
1587        if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1588                /* leave all but MAX value */
1589                mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1590
1591                /* set new MAX value */
1592                mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1593                                & FUNC_MF_CFG_MAX_BW_MASK;
1594
1595                bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1596        }
1597}
1598
1599/**
1600 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1601 *
1602 * @bp:         driver handle
1603 * @nvecs:      number of vectors to be released
1604 */
1605static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1606{
1607panic("Not implemented");
1608#if 0 // AKAROS_PORT
1609        int i, offset = 0;
1610
1611        if (nvecs == offset)
1612                return;
1613
1614        /* VFs don't have a default SB */
1615        if (IS_PF(bp)) {
1616                free_irq(bp->msix_table[offset].vector, bp->dev);
1617                DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1618                   bp->msix_table[offset].vector);
1619                offset++;
1620        }
1621
1622        if (CNIC_SUPPORT(bp)) {
1623                if (nvecs == offset)
1624                        return;
1625                offset++;
1626        }
1627
1628        for_each_eth_queue(bp, i) {
1629                if (nvecs == offset)
1630                        return;
1631                DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1632                   i, bp->msix_table[offset].vector);
1633
1634                free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1635        }
1636#endif
1637}
1638
1639void bnx2x_free_irq(struct bnx2x *bp)
1640{
1641panic("Not implemented");
1642#if 0 // AKAROS_PORT
1643        if (bp->flags & USING_MSIX_FLAG &&
1644            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1645                int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1646
1647                /* vfs don't have a default status block */
1648                if (IS_PF(bp))
1649                        nvecs++;
1650
1651                bnx2x_free_msix_irqs(bp, nvecs);
1652        } else {
1653                free_irq(bp->dev->irq, bp->dev);
1654        }
1655#endif
1656}
1657
1658int bnx2x_enable_msix(struct bnx2x *bp)
1659{
1660        int msix_vec = 0, i, rc;
1661panic("Not implemented");
1662#if 0 // AKAROS_PORT
1663        /* VFs don't have a default status block */
1664        if (IS_PF(bp)) {
1665                bp->msix_table[msix_vec].entry = msix_vec;
1666                BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1667                               bp->msix_table[0].entry);
1668                msix_vec++;
1669        }
1670
1671        /* Cnic requires an msix vector for itself */
1672        if (CNIC_SUPPORT(bp)) {
1673                bp->msix_table[msix_vec].entry = msix_vec;
1674                BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1675                               msix_vec, bp->msix_table[msix_vec].entry);
1676                msix_vec++;
1677        }
1678
1679        /* We need separate vectors for ETH queues only (not FCoE) */
1680        for_each_eth_queue(bp, i) {
1681                bp->msix_table[msix_vec].entry = msix_vec;
1682                BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1683                               msix_vec, msix_vec, i);
1684                msix_vec++;
1685        }
1686
1687        DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1688           msix_vec);
1689
1690        rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1691                                   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1692        /*
1693         * reconfigure number of tx/rx queues according to available
1694         * MSI-X vectors
1695         */
1696        if (rc == -ENOSPC) {
1697                /* Get by with single vector */
1698                rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1699                if (rc < 0) {
1700                        BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1701                                       rc);
1702                        goto no_msix;
1703                }
1704
1705                BNX2X_DEV_INFO("Using single MSI-X vector\n");
1706                bp->flags |= USING_SINGLE_MSIX_FLAG;
1707
1708                BNX2X_DEV_INFO("set number of queues to 1\n");
1709                bp->num_ethernet_queues = 1;
1710                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1711        } else if (rc < 0) {
1712                BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1713                goto no_msix;
1714        } else if (rc < msix_vec) {
1715                /* how less vectors we will have? */
1716                int diff = msix_vec - rc;
1717
1718                BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1719
1720                /*
1721                 * decrease number of queues by number of unallocated entries
1722                 */
1723                bp->num_ethernet_queues -= diff;
1724                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1725
1726                BNX2X_DEV_INFO("New queue configuration set: %d\n",
1727                               bp->num_queues);
1728        }
1729
1730        bp->flags |= USING_MSIX_FLAG;
1731
1732        return 0;
1733
1734no_msix:
1735        /* fall to INTx if not enough memory */
1736        if (rc == -ENOMEM)
1737                bp->flags |= DISABLE_MSI_FLAG;
1738
1739        return rc;
1740#endif
1741}
1742
1743static void bullshit_handler(struct hw_trapframe *hw_tf, void *cnic_turd)
1744{
1745        printk("bnx2x CNIC IRQ fired.  Probably a bug!\n");
1746}
1747
1748static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1749{
1750        int i, offset = 0;
1751        struct irq_handler *irq_h;
1752
1753        /* no default status block for vf */
1754        if (IS_PF(bp)) {
1755                irq_h = register_irq(bp->msix_table[offset++].vector,
1756                                     bnx2x_msix_sp_int, bp->dev,
1757                                     pci_to_tbdf(bp->pdev));
1758                if (!irq_h) {
1759                        BNX2X_ERR("request sp irq failed\n");
1760                        return -EBUSY;
1761                }
1762        }
1763
1764        if (CNIC_SUPPORT(bp)) {
1765                offset++;
1766                // AKAROS_PORT
1767                irq_h = register_irq(0, bullshit_handler, 0,
1768                                     pci_to_tbdf(bp->pdev));
1769                if (!irq_h) {
1770                        BNX2X_ERR("Fucked up getting a CNIC MSIX vector!");
1771                        return -EBUSY;
1772                }
1773        }
1774
1775        for_each_eth_queue(bp, i) {
1776                struct bnx2x_fastpath *fp = &bp->fp[i];
1777                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1778                         bp->dev->name, i);
1779
1780                irq_h = register_irq(bp->msix_table[offset].vector,
1781                                     bnx2x_msix_fp_int, fp,
1782                                     pci_to_tbdf(bp->pdev));
1783                if (!irq_h) {
1784                        BNX2X_ERR("request fp #%d irq (%d) failed\n", i,
1785                              bp->msix_table[offset].vector);
1786                        bnx2x_free_msix_irqs(bp, offset);
1787                        return -EBUSY;
1788                }
1789
1790                offset++;
1791        }
1792
1793        i = BNX2X_NUM_ETH_QUEUES(bp);
1794        if (IS_PF(bp)) {
1795                offset = 1 + CNIC_SUPPORT(bp);
1796                netdev_info(bp->dev,
1797                            "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1798                            bp->msix_table[0].vector,
1799                            0, bp->msix_table[offset].vector,
1800                            i - 1, bp->msix_table[offset + i - 1].vector);
1801        } else {
1802                offset = CNIC_SUPPORT(bp);
1803                netdev_info(bp->dev,
1804                            "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1805                            0, bp->msix_table[offset].vector,
1806                            i - 1, bp->msix_table[offset + i - 1].vector);
1807        }
1808        return 0;
1809}
1810
1811int bnx2x_enable_msi(struct bnx2x *bp)
1812{
1813panic("Not implemented");
1814#if 0 // AKAROS_PORT
1815        int rc;
1816
1817        rc = pci_enable_msi(bp->pdev);
1818        if (rc) {
1819                BNX2X_DEV_INFO("MSI is not attainable\n");
1820                return -1;
1821        }
1822        bp->flags |= USING_MSI_FLAG;
1823
1824        return 0;
1825#endif
1826}
1827
1828static int bnx2x_req_irq(struct bnx2x *bp)
1829{
1830        unsigned long flags;
1831panic("Not implemented");
1832#if 0 // AKAROS_PORT
1833        unsigned int irq;
1834
1835        if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1836                flags = 0;
1837        else
1838                flags = IRQF_SHARED;
1839
1840        if (bp->flags & USING_MSIX_FLAG)
1841                irq = bp->msix_table[0].vector;
1842        else
1843                irq = bp->pdev->irq;
1844
1845        return register_irq(irq, bnx2x_interrupt, bp->dev,
1846                            pci_to_tbdf(bp->pdev));
1847#endif
1848}
1849
1850static int bnx2x_setup_irqs(struct bnx2x *bp)
1851{
1852        return bnx2x_req_msix_irqs(bp);
1853#if 0 // AKAROS_PORT we just register_irq
1854        if (bp->flags & USING_MSIX_FLAG &&
1855            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1856                rc = bnx2x_req_msix_irqs(bp);
1857                if (rc)
1858                        return rc;
1859        } else {
1860                rc = bnx2x_req_irq(bp);
1861                if (rc) {
1862                        BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1863                        return rc;
1864                }
1865                if (bp->flags & USING_MSI_FLAG) {
1866                        bp->dev->irq = bp->pdev->irq;
1867                        netdev_info(bp->dev, "using MSI IRQ %d\n",
1868                                    bp->dev->irq);
1869                }
1870                if (bp->flags & USING_MSIX_FLAG) {
1871                        bp->dev->irq = bp->msix_table[0].vector;
1872                        netdev_info(bp->dev, "using MSIX IRQ %d\n",
1873                                    bp->dev->irq);
1874                }
1875        }
1876
1877        return 0;
1878#endif
1879}
1880
1881static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1882{
1883        int i;
1884
1885        for_each_rx_queue_cnic(bp, i) {
1886                bnx2x_fp_init_lock(&bp->fp[i]);
1887                napi_enable(&bnx2x_fp(bp, i, napi));
1888        }
1889}
1890
1891static void bnx2x_napi_enable(struct bnx2x *bp)
1892{
1893        int i;
1894
1895        for_each_eth_queue(bp, i) {
1896                bnx2x_fp_init_lock(&bp->fp[i]);
1897                napi_enable(&bnx2x_fp(bp, i, napi));
1898        }
1899}
1900
1901static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1902{
1903        int i;
1904
1905        for_each_rx_queue_cnic(bp, i) {
1906                napi_disable(&bnx2x_fp(bp, i, napi));
1907                while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1908                        kthread_usleep(1000);
1909        }
1910}
1911
1912static void bnx2x_napi_disable(struct bnx2x *bp)
1913{
1914        int i;
1915
1916        for_each_eth_queue(bp, i) {
1917                napi_disable(&bnx2x_fp(bp, i, napi));
1918                while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1919                        kthread_usleep(1000);
1920        }
1921}
1922
1923void bnx2x_netif_start(struct bnx2x *bp)
1924{
1925panic("Not implemented");
1926#if 0 // AKAROS_PORT
1927        if (netif_running(bp->dev)) {
1928                bnx2x_napi_enable(bp);
1929                if (CNIC_LOADED(bp))
1930                        bnx2x_napi_enable_cnic(bp);
1931                bnx2x_int_enable(bp);
1932                if (bp->state == BNX2X_STATE_OPEN)
1933                        netif_tx_wake_all_queues(bp->dev);
1934        }
1935#endif
1936}
1937
1938void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1939{
1940        bnx2x_int_disable_sync(bp, disable_hw);
1941        bnx2x_napi_disable(bp);
1942        if (CNIC_LOADED(bp))
1943                bnx2x_napi_disable_cnic(bp);
1944}
1945
1946uint16_t bnx2x_select_queue(struct ether *dev, struct sk_buff *skb,
1947                       void *accel_priv, select_queue_fallback_t fallback)
1948{
1949panic("Not implemented");
1950#if 0 // AKAROS_PORT
1951        struct bnx2x *bp = netdev_priv(dev);
1952
1953        if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1954                struct ethhdr *hdr = (struct ethhdr *)skb->data;
1955                uint16_t ether_type = be16_to_cpu(hdr->h_proto);
1956
1957                /* Skip VLAN tag if present */
1958                if (ether_type == ETH_P_8021Q) {
1959                        struct vlan_ethhdr *vhdr =
1960                                (struct vlan_ethhdr *)skb->data;
1961
1962                        ether_type = be16_to_cpu(vhdr->h_vlan_encapsulated_proto);
1963                }
1964
1965                /* If ethertype is FCoE or FIP - use FCoE ring */
1966                if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1967                        return bnx2x_fcoe_tx(bp, txq_index);
1968        }
1969
1970        /* select a non-FCoE queue */
1971        return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1972#endif
1973}
1974
1975void bnx2x_set_num_queues(struct bnx2x *bp)
1976{
1977        /* RSS queues */
1978        bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1979
1980        /* override in STORAGE SD modes */
1981        if (IS_MF_STORAGE_ONLY(bp))
1982                bp->num_ethernet_queues = 1;
1983
1984        /* Add special queues */
1985        bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1986        bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1987
1988        BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1989}
1990
1991/**
1992 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1993 *
1994 * @bp:         Driver handle
1995 *
1996 * We currently support for at most 16 Tx queues for each CoS thus we will
1997 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1998 * bp->max_cos.
1999 *
2000 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
2001 * index after all ETH L2 indices.
2002 *
2003 * If the actual number of Tx queues (for each CoS) is less than 16 then there
2004 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
2005 * 16..31,...) with indices that are not coupled with any real Tx queue.
2006 *
2007 * The proper configuration of skb->queue_mapping is handled by
2008 * bnx2x_select_queue() and __skb_tx_hash().
2009 *
2010 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
2011 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
2012 */
2013static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
2014{
2015        int rc, tx, rx;
2016
2017        tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
2018        rx = BNX2X_NUM_ETH_QUEUES(bp);
2019
2020/* account for fcoe queue */
2021        if (include_cnic && !NO_FCOE(bp)) {
2022                rx++;
2023                tx++;
2024        }
2025
2026#if 0 // AKAROS_PORT XME: set queues in ether
2027        rc = netif_set_real_num_tx_queues(bp->dev, tx);
2028        if (rc) {
2029                BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2030                return rc;
2031        }
2032        rc = netif_set_real_num_rx_queues(bp->dev, rx);
2033        if (rc) {
2034                BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2035                return rc;
2036        }
2037#else
2038        rc = 0;
2039#endif
2040
2041        DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2042                          tx, rx);
2043
2044        return rc;
2045}
2046
2047static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2048{
2049        int i;
2050
2051        for_each_queue(bp, i) {
2052                struct bnx2x_fastpath *fp = &bp->fp[i];
2053                uint32_t mtu;
2054
2055                /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2056                if (IS_FCOE_IDX(i))
2057                        /*
2058                         * Although there are no IP frames expected to arrive to
2059                         * this ring we still want to add an
2060                         * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2061                         * overrun attack.
2062                         */
2063                        mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2064                else
2065                        mtu = bp->dev->mtu;
2066                /* AKAROS_PORT XME struct block alignment and size issues? */
2067                fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2068                                  IP_HEADER_ALIGNMENT_PADDING +
2069                                  ETH_OVREHEAD +
2070                                  mtu +
2071                                  BNX2X_FW_RX_ALIGN_END;
2072                /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2073                if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2074                        fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2075                else
2076                        fp->rx_frag_size = 0;
2077        }
2078}
2079
2080static int bnx2x_init_rss(struct bnx2x *bp)
2081{
2082        int i;
2083        uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2084
2085        /* Prepare the initial contents for the indirection table if RSS is
2086         * enabled
2087         */
2088        for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2089                bp->rss_conf_obj.ind_table[i] =
2090                        bp->fp->cl_id +
2091                        ethtool_rxfh_indir_default(i, num_eth_queues);
2092
2093        /*
2094         * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2095         * per-port, so if explicit configuration is needed , do it only
2096         * for a PMF.
2097         *
2098         * For 57712 and newer on the other hand it's a per-function
2099         * configuration.
2100         */
2101        return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2102}
2103
2104int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2105              bool config_hash, bool enable)
2106{
2107        struct bnx2x_config_rss_params params = {NULL};
2108
2109        /* Although RSS is meaningless when there is a single HW queue we
2110         * still need it enabled in order to have HW Rx hash generated.
2111         *
2112         * if (!is_eth_multi(bp))
2113         *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2114         */
2115
2116        params.rss_obj = rss_obj;
2117
2118        __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2119
2120        if (enable) {
2121                __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2122
2123                /* RSS configuration */
2124                __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2125                __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2126                __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2127                __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2128                if (rss_obj->udp_rss_v4)
2129                        __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2130                if (rss_obj->udp_rss_v6)
2131                        __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2132
2133                if (!CHIP_IS_E1x(bp))
2134                        /* valid only for TUNN_MODE_GRE tunnel mode */
2135                        __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
2136        } else {
2137                __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2138        }
2139
2140        /* Hash bits */
2141        params.rss_result_mask = MULTI_MASK;
2142
2143        memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2144
2145        if (config_hash) {
2146                /* RSS keys */
2147                #if 0 // AKAROS_PORT
2148                netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2149                #else
2150                /* linux picks a random, once, then uses it here.  it could be
2151                 * 5a! */
2152                memset(params.rss_key, 0x5a, T_ETH_RSS_KEY * 4);
2153                #endif
2154                __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2155        }
2156
2157        if (IS_PF(bp))
2158                return bnx2x_config_rss(bp, &params);
2159        else
2160                return bnx2x_vfpf_config_rss(bp, &params);
2161}
2162
2163static int bnx2x_init_hw(struct bnx2x *bp, uint32_t load_code)
2164{
2165        struct bnx2x_func_state_params func_params = {NULL};
2166
2167        /* Prepare parameters for function state transitions */
2168        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2169
2170        func_params.f_obj = &bp->func_obj;
2171        func_params.cmd = BNX2X_F_CMD_HW_INIT;
2172
2173        func_params.params.hw_init.load_phase = load_code;
2174
2175        return bnx2x_func_state_change(bp, &func_params);
2176}
2177
2178/*
2179 * Cleans the object that have internal lists without sending
2180 * ramrods. Should be run when interrupts are disabled.
2181 */
2182void bnx2x_squeeze_objects(struct bnx2x *bp)
2183{
2184        int rc;
2185        unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2186        struct bnx2x_mcast_ramrod_params rparam = {NULL};
2187        struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2188
2189        /***************** Cleanup MACs' object first *************************/
2190
2191        /* Wait for completion of requested */
2192        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2193        /* Perform a dry cleanup */
2194        __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2195
2196        /* Clean ETH primary MAC */
2197        __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2198        rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2199                                 &ramrod_flags);
2200        if (rc != 0)
2201                BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2202
2203        /* Cleanup UC list */
2204        vlan_mac_flags = 0;
2205        __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2206        rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2207                                 &ramrod_flags);
2208        if (rc != 0)
2209                BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2210
2211        /***************** Now clean mcast object *****************************/
2212        rparam.mcast_obj = &bp->mcast_obj;
2213        __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2214
2215        /* Add a DEL command... - Since we're doing a driver cleanup only,
2216         * we take a lock surrounding both the initial send and the CONTs,
2217         * as we don't want a true completion to disrupt us in the middle.
2218         */
2219        qlock(&bp->dev->qlock);
2220        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2221        if (rc < 0)
2222                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2223                          rc);
2224
2225        /* ...and wait until all pending commands are cleared */
2226        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2227        while (rc != 0) {
2228                if (rc < 0) {
2229                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2230                                  rc);
2231                        qunlock(&bp->dev->qlock);
2232                        return;
2233                }
2234
2235                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2236        }
2237        qunlock(&bp->dev->qlock);
2238}
2239
2240#ifndef BNX2X_STOP_ON_ERROR
2241#define LOAD_ERROR_EXIT(bp, label) \
2242        do { \
2243                (bp)->state = BNX2X_STATE_ERROR; \
2244                goto label; \
2245        } while (0)
2246
2247#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2248        do { \
2249                bp->cnic_loaded = false; \
2250                goto label; \
2251        } while (0)
2252#else /*BNX2X_STOP_ON_ERROR*/
2253#define LOAD_ERROR_EXIT(bp, label) \
2254        do { \
2255                (bp)->state = BNX2X_STATE_ERROR; \
2256                (bp)->panic = 1; \
2257                return -EBUSY; \
2258        } while (0)
2259#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2260        do { \
2261                bp->cnic_loaded = false; \
2262                (bp)->panic = 1; \
2263                return -EBUSY; \
2264        } while (0)
2265#endif /*BNX2X_STOP_ON_ERROR*/
2266
2267static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2268{
2269        BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2270                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2271        return;
2272}
2273
2274static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2275{
2276        int num_groups, vf_headroom = 0;
2277        int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2278
2279        /* number of queues for statistics is number of eth queues + FCoE */
2280        uint8_t num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2281
2282        /* Total number of FW statistics requests =
2283         * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2284         * and fcoe l2 queue) stats + num of queues (which includes another 1
2285         * for fcoe l2 queue if applicable)
2286         */
2287        bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2288
2289        /* vf stats appear in the request list, but their data is allocated by
2290         * the VFs themselves. We don't include them in the bp->fw_stats_num as
2291         * it is used to determine where to place the vf stats queries in the
2292         * request struct
2293         */
2294        if (IS_SRIOV(bp))
2295                vf_headroom = bnx2x_vf_headroom(bp);
2296
2297        /* Request is built from stats_query_header and an array of
2298         * stats_query_cmd_group each of which contains
2299         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2300         * configured in the stats_query_header.
2301         */
2302        num_groups =
2303                (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2304                 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2305                 1 : 0));
2306
2307        DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2308           bp->fw_stats_num, vf_headroom, num_groups);
2309        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2310                num_groups * sizeof(struct stats_query_cmd_group);
2311
2312        /* Data for statistics requests + stats_counter
2313         * stats_counter holds per-STORM counters that are incremented
2314         * when STORM has finished with the current request.
2315         * memory for FCoE offloaded statistics are counted anyway,
2316         * even if they will not be sent.
2317         * VF stats are not accounted for here as the data of VF stats is stored
2318         * in memory allocated by the VF, not here.
2319         */
2320        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2321                sizeof(struct per_pf_stats) +
2322                sizeof(struct fcoe_statistics_params) +
2323                sizeof(struct per_queue_stats) * num_queue_stats +
2324                sizeof(struct stats_counter);
2325
2326        bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2327                                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2328        if (!bp->fw_stats)
2329                goto alloc_mem_err;
2330
2331        /* Set shortcuts */
2332        bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2333        bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2334        bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2335                ((uint8_t *)bp->fw_stats + bp->fw_stats_req_sz);
2336        bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2337                bp->fw_stats_req_sz;
2338
2339        DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2340           U64_HI(bp->fw_stats_req_mapping),
2341           U64_LO(bp->fw_stats_req_mapping));
2342        DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2343           U64_HI(bp->fw_stats_data_mapping),
2344           U64_LO(bp->fw_stats_data_mapping));
2345        return 0;
2346
2347alloc_mem_err:
2348        bnx2x_free_fw_stats_mem(bp);
2349        BNX2X_ERR("Can't allocate FW stats memory\n");
2350        return -ENOMEM;
2351}
2352
2353/* send load request to mcp and analyze response */
2354static int bnx2x_nic_load_request(struct bnx2x *bp, uint32_t *load_code)
2355{
2356        uint32_t param;
2357
2358        /* init fw_seq */
2359        bp->fw_seq =
2360                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2361                 DRV_MSG_SEQ_NUMBER_MASK);
2362        BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2363
2364        /* Get current FW pulse sequence */
2365        bp->fw_drv_pulse_wr_seq =
2366                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2367                 DRV_PULSE_SEQ_MASK);
2368        BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2369
2370        param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2371
2372        if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2373                param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2374
2375        /* load request */
2376        (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2377
2378        /* if mcp fails to respond we must abort */
2379        if (!(*load_code)) {
2380                BNX2X_ERR("MCP response failure, aborting\n");
2381                return -EBUSY;
2382        }
2383
2384        /* If mcp refused (e.g. other port is in diagnostic mode) we
2385         * must abort
2386         */
2387        if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2388                BNX2X_ERR("MCP refused load request, aborting\n");
2389                return -EBUSY;
2390        }
2391        return 0;
2392}
2393
2394/* check whether another PF has already loaded FW to chip. In
2395 * virtualized environments a pf from another VM may have already
2396 * initialized the device including loading FW
2397 */
2398int bnx2x_compare_fw_ver(struct bnx2x *bp, uint32_t load_code,
2399                         bool print_err)
2400{
2401        /* is another pf loaded on this engine? */
2402        if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2403            load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2404                /* build my FW version dword */
2405                uint32_t my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2406                        (BCM_5710_FW_MINOR_VERSION << 8) +
2407                        (BCM_5710_FW_REVISION_VERSION << 16) +
2408                        (BCM_5710_FW_ENGINEERING_VERSION << 24);
2409
2410                /* read loaded FW from chip */
2411                uint32_t loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2412
2413                DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2414                   loaded_fw, my_fw);
2415
2416                /* abort nic load if version mismatch */
2417                if (my_fw != loaded_fw) {
2418                        if (print_err)
2419                                BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2420                                          loaded_fw, my_fw);
2421                        else
2422                                BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2423                                               loaded_fw, my_fw);
2424                        return -EBUSY;
2425                }
2426        }
2427        return 0;
2428}
2429
2430/* returns the "mcp load_code" according to global load_count array */
2431static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2432{
2433        int path = BP_PATH(bp);
2434
2435        DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2436           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2437           bnx2x_load_count[path][2]);
2438        bnx2x_load_count[path][0]++;
2439        bnx2x_load_count[path][1 + port]++;
2440        DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2441           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2442           bnx2x_load_count[path][2]);
2443        if (bnx2x_load_count[path][0] == 1)
2444                return FW_MSG_CODE_DRV_LOAD_COMMON;
2445        else if (bnx2x_load_count[path][1 + port] == 1)
2446                return FW_MSG_CODE_DRV_LOAD_PORT;
2447        else
2448                return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2449}
2450
2451/* mark PMF if applicable */
2452static void bnx2x_nic_load_pmf(struct bnx2x *bp, uint32_t load_code)
2453{
2454        if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2455            (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2456            (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2457                bp->port.pmf = 1;
2458                /* We need the barrier to ensure the ordering between the
2459                 * writing to bp->port.pmf here and reading it from the
2460                 * bnx2x_periodic_task().
2461                 */
2462                mb();
2463        } else {
2464                bp->port.pmf = 0;
2465        }
2466
2467        DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2468}
2469
2470static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2471{
2472        if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2473             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2474            (bp->common.shmem2_base)) {
2475                if (SHMEM2_HAS(bp, dcc_support))
2476                        SHMEM2_WR(bp, dcc_support,
2477                                  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2478                                   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2479                if (SHMEM2_HAS(bp, afex_driver_support))
2480                        SHMEM2_WR(bp, afex_driver_support,
2481                                  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2482        }
2483
2484        /* Set AFEX default VLAN tag to an invalid value */
2485        bp->afex_def_vlan_tag = -1;
2486}
2487
2488/**
2489 * bnx2x_bz_fp - zero content of the fastpath structure.
2490 *
2491 * @bp:         driver handle
2492 * @index:      fastpath index to be zeroed
2493 *
2494 * Makes sure the contents of the bp->fp[index].napi is kept
2495 * intact.
2496 */
2497static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2498{
2499        struct bnx2x_fastpath *fp = &bp->fp[index];
2500        int cos;
2501        struct napi_struct orig_napi = fp->napi;
2502        struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2503
2504        /* bzero bnx2x_fastpath contents */
2505        if (fp->tpa_info)
2506                memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2507                       sizeof(struct bnx2x_agg_info));
2508        memset(fp, 0, sizeof(*fp));
2509
2510        /* AKAROS_PORT: let the code set up whatever fake napi stuff it needs */
2511        /* Restore the NAPI object as it has been already initialized */
2512        fp->napi = orig_napi;
2513        fp->tpa_info = orig_tpa_info;
2514        fp->bp = bp;
2515        fp->index = index;
2516        if (IS_ETH_FP(fp))
2517                fp->max_cos = bp->max_cos;
2518        else
2519                /* Special queues support only one CoS */
2520                fp->max_cos = 1;
2521
2522        /* Init txdata pointers */
2523        if (IS_FCOE_FP(fp))
2524                fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2525        if (IS_ETH_FP(fp))
2526                for_each_cos_in_tx_queue(fp, cos)
2527                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2528                                BNX2X_NUM_ETH_QUEUES(bp) + index];
2529
2530        /* set the tpa flag for each queue. The tpa flag determines the queue
2531         * minimal size so it must be set prior to queue memory allocation
2532         */
2533        fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2534                                  (bp->flags & GRO_ENABLE_FLAG &&
2535                                   bnx2x_mtu_allows_gro(bp->dev->mtu)));
2536        if (bp->flags & TPA_ENABLE_FLAG)
2537                fp->mode = TPA_MODE_LRO;
2538        else if (bp->flags & GRO_ENABLE_FLAG)
2539                fp->mode = TPA_MODE_GRO;
2540
2541        /* We don't want TPA on an FCoE L2 ring */
2542        if (IS_FCOE_FP(fp))
2543                fp->disable_tpa = 1;
2544}
2545
2546int bnx2x_load_cnic(struct bnx2x *bp)
2547{
2548        int i, rc, port = BP_PORT(bp);
2549
2550        DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2551
2552        qlock_init(&bp->cnic_mutex);
2553
2554        if (IS_PF(bp)) {
2555                rc = bnx2x_alloc_mem_cnic(bp);
2556                if (rc) {
2557                        BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2558                        LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2559                }
2560        }
2561
2562        rc = bnx2x_alloc_fp_mem_cnic(bp);
2563        if (rc) {
2564                BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2565                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2566        }
2567
2568        /* Update the number of queues with the cnic queues */
2569        rc = bnx2x_set_real_num_queues(bp, 1);
2570        if (rc) {
2571                BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2572                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2573        }
2574
2575        /* Add all CNIC NAPI objects */
2576        bnx2x_add_all_napi_cnic(bp);
2577        DP(NETIF_MSG_IFUP, "cnic napi added\n");
2578        bnx2x_napi_enable_cnic(bp);
2579
2580        rc = bnx2x_init_hw_func_cnic(bp);
2581        if (rc)
2582                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2583
2584        bnx2x_nic_init_cnic(bp);
2585
2586        if (IS_PF(bp)) {
2587                /* Enable Timer scan */
2588                REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2589
2590                /* setup cnic queues */
2591                for_each_cnic_queue(bp, i) {
2592                        rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2593                        if (rc) {
2594                                BNX2X_ERR("Queue setup failed\n");
2595                                LOAD_ERROR_EXIT(bp, load_error_cnic2);
2596                        }
2597                }
2598        }
2599
2600        /* Initialize Rx filter. */
2601        bnx2x_set_rx_mode_inner(bp);
2602
2603        /* re-read iscsi info */
2604        bnx2x_get_iscsi_info(bp);
2605        bnx2x_setup_cnic_irq_info(bp);
2606        bnx2x_setup_cnic_info(bp);
2607        bp->cnic_loaded = true;
2608        if (bp->state == BNX2X_STATE_OPEN)
2609                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2610
2611        DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2612
2613        return 0;
2614
2615#ifndef BNX2X_STOP_ON_ERROR
2616load_error_cnic2:
2617        /* Disable Timer scan */
2618        REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2619
2620load_error_cnic1:
2621        bnx2x_napi_disable_cnic(bp);
2622        /* Update the number of queues without the cnic queues */
2623        if (bnx2x_set_real_num_queues(bp, 0))
2624                BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2625load_error_cnic0:
2626        BNX2X_ERR("CNIC-related load failed\n");
2627        bnx2x_free_fp_mem_cnic(bp);
2628        bnx2x_free_mem_cnic(bp);
2629        return rc;
2630#endif /* ! BNX2X_STOP_ON_ERROR */
2631}
2632
2633/* must be called with rtnl_lock */
2634int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2635{
2636        int port = BP_PORT(bp);
2637        int i, rc = 0;
2638        uint32_t load_code = 0;
2639
2640        DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2641        DP(NETIF_MSG_IFUP,
2642           "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2643
2644#ifdef BNX2X_STOP_ON_ERROR
2645        if (unlikely(bp->panic)) {
2646                BNX2X_ERR("Can't load NIC when there is panic\n");
2647                return -EPERM;
2648        }
2649#endif
2650
2651        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2652
2653        /* zero the structure w/o any lock, before SP handler is initialized */
2654        memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2655        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2656                &bp->last_reported_link.link_report_flags);
2657
2658        if (IS_PF(bp))
2659                /* must be called before memory allocation and HW init */
2660                bnx2x_ilt_set_info(bp);
2661
2662        /*
2663         * Zero fastpath structures preserving invariants like napi, which are
2664         * allocated only once, fp index, max_cos, bp pointer.
2665         * Also set fp->disable_tpa and txdata_ptr.
2666         */
2667        DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2668        for_each_queue(bp, i)
2669                bnx2x_bz_fp(bp, i);
2670        memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2671                                  bp->num_cnic_queues) *
2672                                  sizeof(struct bnx2x_fp_txdata));
2673
2674        bp->fcoe_init = false;
2675
2676        /* Set the receive queues buffer size */
2677        bnx2x_set_rx_buf_size(bp);
2678
2679        if (IS_PF(bp)) {
2680                rc = bnx2x_alloc_mem(bp);
2681                if (rc) {
2682                        BNX2X_ERR("Unable to allocate bp memory\n");
2683                        return rc;
2684                }
2685        }
2686
2687        /* need to be done after alloc mem, since it's self adjusting to amount
2688         * of memory available for RSS queues
2689         */
2690        rc = bnx2x_alloc_fp_mem(bp);
2691        if (rc) {
2692                BNX2X_ERR("Unable to allocate memory for fps\n");
2693                LOAD_ERROR_EXIT(bp, load_error0);
2694        }
2695
2696        /* Allocated memory for FW statistics  */
2697        if (bnx2x_alloc_fw_stats_mem(bp))
2698                LOAD_ERROR_EXIT(bp, load_error0);
2699
2700        /* request pf to initialize status blocks */
2701        if (IS_VF(bp)) {
2702                rc = bnx2x_vfpf_init(bp);
2703                if (rc)
2704                        LOAD_ERROR_EXIT(bp, load_error0);
2705        }
2706
2707        /* As long as bnx2x_alloc_mem() may possibly update
2708         * bp->num_queues, bnx2x_set_real_num_queues() should always
2709         * come after it. At this stage cnic queues are not counted.
2710         */
2711        rc = bnx2x_set_real_num_queues(bp, 0);
2712        if (rc) {
2713                BNX2X_ERR("Unable to set real_num_queues\n");
2714                LOAD_ERROR_EXIT(bp, load_error0);
2715        }
2716
2717        /* configure multi cos mappings in kernel.
2718         * this configuration may be overridden by a multi class queue
2719         * discipline or by a dcbx negotiation result.
2720         */
2721        bnx2x_setup_tc(bp->dev, bp->max_cos);
2722
2723        /* Add all NAPI objects */
2724        bnx2x_add_all_napi(bp);
2725        DP(NETIF_MSG_IFUP, "napi added\n");
2726        bnx2x_napi_enable(bp);
2727
2728        if (IS_PF(bp)) {
2729                /* set pf load just before approaching the MCP */
2730                bnx2x_set_pf_load(bp);
2731
2732                /* if mcp exists send load request and analyze response */
2733                if (!BP_NOMCP(bp)) {
2734                        /* attempt to load pf */
2735                        rc = bnx2x_nic_load_request(bp, &load_code);
2736                        if (rc)
2737                                LOAD_ERROR_EXIT(bp, load_error1);
2738
2739                        /* what did mcp say? */
2740                        rc = bnx2x_compare_fw_ver(bp, load_code, true);
2741                        if (rc) {
2742                                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2743                                LOAD_ERROR_EXIT(bp, load_error2);
2744                        }
2745                } else {
2746                        load_code = bnx2x_nic_load_no_mcp(bp, port);
2747                }
2748
2749                /* mark pmf if applicable */
2750                bnx2x_nic_load_pmf(bp, load_code);
2751
2752                /* Init Function state controlling object */
2753                bnx2x__init_func_obj(bp);
2754
2755                /* Initialize HW */
2756                rc = bnx2x_init_hw(bp, load_code);
2757                if (rc) {
2758                        BNX2X_ERR("HW init failed, aborting\n");
2759                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2760                        LOAD_ERROR_EXIT(bp, load_error2);
2761                }
2762        }
2763
2764        bnx2x_pre_irq_nic_init(bp);
2765
2766        /* Connect to IRQs */
2767        rc = bnx2x_setup_irqs(bp);
2768        if (rc) {
2769                BNX2X_ERR("setup irqs failed\n");
2770                if (IS_PF(bp))
2771                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2772                LOAD_ERROR_EXIT(bp, load_error2);
2773        }
2774
2775        /* Init per-function objects */
2776        if (IS_PF(bp)) {
2777                /* Setup NIC internals and enable interrupts */
2778                bnx2x_post_irq_nic_init(bp, load_code);
2779
2780                bnx2x_init_bp_objs(bp);
2781                bnx2x_iov_nic_init(bp);
2782
2783                /* Set AFEX default VLAN tag to an invalid value */
2784                bp->afex_def_vlan_tag = -1;
2785                bnx2x_nic_load_afex_dcc(bp, load_code);
2786                bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2787                rc = bnx2x_func_start(bp);
2788                if (rc) {
2789                        BNX2X_ERR("Function start failed!\n");
2790                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2791
2792                        LOAD_ERROR_EXIT(bp, load_error3);
2793                }
2794
2795                /* Send LOAD_DONE command to MCP */
2796                if (!BP_NOMCP(bp)) {
2797                        load_code = bnx2x_fw_command(bp,
2798                                                     DRV_MSG_CODE_LOAD_DONE, 0);
2799                        if (!load_code) {
2800                                BNX2X_ERR("MCP response failure, aborting\n");
2801                                rc = -EBUSY;
2802                                LOAD_ERROR_EXIT(bp, load_error3);
2803                        }
2804                }
2805
2806                /* initialize FW coalescing state machines in RAM */
2807                bnx2x_update_coalesce(bp);
2808        }
2809
2810        /* setup the leading queue */
2811        rc = bnx2x_setup_leading(bp);
2812        if (rc) {
2813                BNX2X_ERR("Setup leading failed!\n");
2814                LOAD_ERROR_EXIT(bp, load_error3);
2815        }
2816
2817        /* set up the rest of the queues */
2818        for_each_nondefault_eth_queue(bp, i) {
2819                if (IS_PF(bp))
2820                        rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2821                else /* VF */
2822                        rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2823                if (rc) {
2824                        BNX2X_ERR("Queue %d setup failed\n", i);
2825                        LOAD_ERROR_EXIT(bp, load_error3);
2826                }
2827        }
2828
2829        /* setup rss */
2830        rc = bnx2x_init_rss(bp);
2831        if (rc) {
2832                BNX2X_ERR("PF RSS init failed\n");
2833                LOAD_ERROR_EXIT(bp, load_error3);
2834        }
2835
2836        /* Now when Clients are configured we are ready to work */
2837        bp->state = BNX2X_STATE_OPEN;
2838
2839        /* Configure a ucast MAC */
2840        if (IS_PF(bp))
2841                rc = bnx2x_set_eth_mac(bp, true);
2842        else /* vf */
2843                rc = bnx2x_vfpf_config_mac(bp, bp->dev->ea, bp->fp->index,
2844                                           true);
2845        if (rc) {
2846                BNX2X_ERR("Setting Ethernet MAC failed\n");
2847                LOAD_ERROR_EXIT(bp, load_error3);
2848        }
2849
2850        if (IS_PF(bp) && bp->pending_max) {
2851                bnx2x_update_max_mf_config(bp, bp->pending_max);
2852                bp->pending_max = 0;
2853        }
2854
2855        if (bp->port.pmf) {
2856                rc = bnx2x_initial_phy_init(bp, load_mode);
2857                if (rc)
2858                        LOAD_ERROR_EXIT(bp, load_error3);
2859        }
2860        bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2861
2862        /* Start fast path */
2863
2864        /* Initialize Rx filter. */
2865        bnx2x_set_rx_mode_inner(bp);
2866
2867        /* Start Tx */
2868        switch (load_mode) {
2869        case LOAD_NORMAL:
2870                /* Tx queue should be only re-enabled */
2871                netif_tx_wake_all_queues(bp->dev);
2872                break;
2873
2874        case LOAD_OPEN:
2875                netif_tx_start_all_queues(bp->dev);
2876                cmb();
2877                break;
2878
2879        case LOAD_DIAG:
2880        case LOAD_LOOPBACK_EXT:
2881                bp->state = BNX2X_STATE_DIAG;
2882                break;
2883
2884        default:
2885                break;
2886        }
2887
2888        if (bp->port.pmf)
2889                bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2890        else
2891                bnx2x__link_status_update(bp);
2892
2893        /* start the timer */
2894        set_awaiter_rel(&bp->timer, bp->current_interval * 1000); // fudge
2895        set_alarm(&per_cpu_info[0].tchain, &bp->timer);
2896
2897        if (CNIC_ENABLED(bp))
2898                bnx2x_load_cnic(bp);
2899
2900        if (IS_PF(bp))
2901                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2902
2903        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2904                /* mark driver is loaded in shmem2 */
2905                uint32_t val;
2906                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2907                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2908                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2909                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
2910        }
2911
2912        /* Wait for all pending SP commands to complete */
2913        if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2914                BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2915                bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2916                return -EBUSY;
2917        }
2918
2919        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2920        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2921                bnx2x_dcbx_init(bp, false);
2922
2923        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2924
2925        return 0;
2926
2927#ifndef BNX2X_STOP_ON_ERROR
2928load_error3:
2929        if (IS_PF(bp)) {
2930                bnx2x_int_disable_sync(bp, 1);
2931
2932                /* Clean queueable objects */
2933                bnx2x_squeeze_objects(bp);
2934        }
2935
2936        /* Free SKBs, SGEs, TPA pool and driver internals */
2937        bnx2x_free_skbs(bp);
2938        for_each_rx_queue(bp, i)
2939                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2940
2941        /* Release IRQs */
2942        bnx2x_free_irq(bp);
2943load_error2:
2944        if (IS_PF(bp) && !BP_NOMCP(bp)) {
2945                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2946                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2947        }
2948
2949        bp->port.pmf = 0;
2950load_error1:
2951        bnx2x_napi_disable(bp);
2952        bnx2x_del_all_napi(bp);
2953
2954        /* clear pf_load status, as it was already set */
2955        if (IS_PF(bp))
2956                bnx2x_clear_pf_load(bp);
2957load_error0:
2958        bnx2x_free_fw_stats_mem(bp);
2959        bnx2x_free_fp_mem(bp);
2960        bnx2x_free_mem(bp);
2961
2962        return rc;
2963#endif /* ! BNX2X_STOP_ON_ERROR */
2964}
2965
2966int bnx2x_drain_tx_queues(struct bnx2x *bp)
2967{
2968        uint8_t rc = 0, cos, i;
2969
2970        /* Wait until tx fastpath tasks complete */
2971        for_each_tx_queue(bp, i) {
2972                struct bnx2x_fastpath *fp = &bp->fp[i];
2973
2974                for_each_cos_in_tx_queue(fp, cos)
2975                        rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2976                if (rc)
2977                        return rc;
2978        }
2979        return 0;
2980}
2981
2982/* must be called with rtnl_lock */
2983int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2984{
2985panic("Not implemented");
2986#if 0 // AKAROS_PORT
2987        int i;
2988        bool global = false;
2989
2990        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2991
2992        /* mark driver is unloaded in shmem2 */
2993        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2994                uint32_t val;
2995                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2996                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2997                          val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2998        }
2999
3000        if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3001            (bp->state == BNX2X_STATE_CLOSED ||
3002             bp->state == BNX2X_STATE_ERROR)) {
3003                /* We can get here if the driver has been unloaded
3004                 * during parity error recovery and is either waiting for a
3005                 * leader to complete or for other functions to unload and
3006                 * then ifdown has been issued. In this case we want to
3007                 * unload and let other functions to complete a recovery
3008                 * process.
3009                 */
3010                bp->recovery_state = BNX2X_RECOVERY_DONE;
3011                bp->is_leader = 0;
3012                bnx2x_release_leader_lock(bp);
3013                mb();
3014
3015                DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3016                BNX2X_ERR("Can't unload in closed or error state\n");
3017                return -EINVAL;
3018        }
3019
3020        /* Nothing to do during unload if previous bnx2x_nic_load()
3021         * have not completed successfully - all resources are released.
3022         *
3023         * we can get here only after unsuccessful ndo_* callback, during which
3024         * dev->IFF_UP flag is still on.
3025         */
3026        if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3027                return 0;
3028
3029        /* It's important to set the bp->state to the value different from
3030         * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3031         * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3032         */
3033        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3034        mb();
3035
3036        /* indicate to VFs that the PF is going down */
3037        bnx2x_iov_channel_down(bp);
3038
3039        if (CNIC_LOADED(bp))
3040                bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3041
3042        /* Stop Tx */
3043        bnx2x_tx_disable(bp);
3044        netdev_reset_tc(bp->dev);
3045
3046        bp->rx_mode = BNX2X_RX_MODE_NONE;
3047
3048        del_timer_sync(&bp->timer);
3049
3050        if (IS_PF(bp)) {
3051                /* Set ALWAYS_ALIVE bit in shmem */
3052                bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3053                bnx2x_drv_pulse(bp);
3054                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3055                bnx2x_save_statistics(bp);
3056        }
3057
3058        /* wait till consumers catch up with producers in all queues */
3059        bnx2x_drain_tx_queues(bp);
3060
3061        /* if VF indicate to PF this function is going down (PF will delete sp
3062         * elements and clear initializations
3063         */
3064        if (IS_VF(bp))
3065                bnx2x_vfpf_close_vf(bp);
3066        else if (unload_mode != UNLOAD_RECOVERY)
3067                /* if this is a normal/close unload need to clean up chip*/
3068                bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3069        else {
3070                /* Send the UNLOAD_REQUEST to the MCP */
3071                bnx2x_send_unload_req(bp, unload_mode);
3072
3073                /* Prevent transactions to host from the functions on the
3074                 * engine that doesn't reset global blocks in case of global
3075                 * attention once global blocks are reset and gates are opened
3076                 * (the engine which leader will perform the recovery
3077                 * last).
3078                 */
3079                if (!CHIP_IS_E1x(bp))
3080                        bnx2x_pf_disable(bp);
3081
3082                /* Disable HW interrupts, NAPI */
3083                bnx2x_netif_stop(bp, 1);
3084                /* Delete all NAPI objects */
3085                bnx2x_del_all_napi(bp);
3086                if (CNIC_LOADED(bp))
3087                        bnx2x_del_all_napi_cnic(bp);
3088                /* Release IRQs */
3089                bnx2x_free_irq(bp);
3090
3091                /* Report UNLOAD_DONE to MCP */
3092                bnx2x_send_unload_done(bp, false);
3093        }
3094
3095        /*
3096         * At this stage no more interrupts will arrive so we may safely clean
3097         * the queueable objects here in case they failed to get cleaned so far.
3098         */
3099        if (IS_PF(bp))
3100                bnx2x_squeeze_objects(bp);
3101
3102        /* There should be no more pending SP commands at this stage */
3103        bp->sp_state = 0;
3104
3105        bp->port.pmf = 0;
3106
3107        /* clear pending work in rtnl task */
3108        bp->sp_rtnl_state = 0;
3109        mb();
3110
3111        /* Free SKBs, SGEs, TPA pool and driver internals */
3112        bnx2x_free_skbs(bp);
3113        if (CNIC_LOADED(bp))
3114                bnx2x_free_skbs_cnic(bp);
3115        for_each_rx_queue(bp, i)
3116                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3117
3118        bnx2x_free_fp_mem(bp);
3119        if (CNIC_LOADED(bp))
3120                bnx2x_free_fp_mem_cnic(bp);
3121
3122        if (IS_PF(bp)) {
3123                if (CNIC_LOADED(bp))
3124                        bnx2x_free_mem_cnic(bp);
3125        }
3126        bnx2x_free_mem(bp);
3127
3128        bp->state = BNX2X_STATE_CLOSED;
3129        bp->cnic_loaded = false;
3130
3131        /* Clear driver version indication in shmem */
3132        if (IS_PF(bp))
3133                bnx2x_update_mng_version(bp);
3134
3135        /* Check if there are pending parity attentions. If there are - set
3136         * RECOVERY_IN_PROGRESS.
3137         */
3138        if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3139                bnx2x_set_reset_in_progress(bp);
3140
3141                /* Set RESET_IS_GLOBAL if needed */
3142                if (global)
3143                        bnx2x_set_reset_global(bp);
3144        }
3145
3146        /* The last driver must disable a "close the gate" if there is no
3147         * parity attention or "process kill" pending.
3148         */
3149        if (IS_PF(bp) &&
3150            !bnx2x_clear_pf_load(bp) &&
3151            bnx2x_reset_is_done(bp, BP_PATH(bp)))
3152                bnx2x_disable_close_the_gate(bp);
3153
3154        DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3155
3156        return 0;
3157#endif
3158}
3159
3160#if 0 // AKAROS_PORT
3161int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3162{
3163        uint16_t pmcsr;
3164
3165        /* If there is no power capability, silently succeed */
3166        if (!bp->pdev->pm_cap) {
3167                BNX2X_DEV_INFO("No power capability. Breaking.\n");
3168                return 0;
3169        }
3170
3171        pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3172
3173        switch (state) {
3174        case PCI_D0:
3175                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3176                                      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3177                                       PCI_PM_CTRL_PME_STATUS));
3178
3179                if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3180                        /* delay required during transition out of D3hot */
3181                        kthread_usleep(1000 * 20);
3182                break;
3183
3184        case PCI_D3hot:
3185                /* If there are other clients above don't
3186                   shut down the power */
3187                if (atomic_read(&bp->pdev->enable_cnt) != 1)
3188                        return 0;
3189                /* Don't shut down the power for emulation and FPGA */
3190                if (CHIP_REV_IS_SLOW(bp))
3191                        return 0;
3192
3193                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3194                pmcsr |= 3;
3195
3196                if (bp->wol)
3197                        pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3198
3199                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3200                                      pmcsr);
3201
3202                /* No more memory access after this point until
3203                * device is brought back to D0.
3204                */
3205                break;
3206
3207        default:
3208                dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3209                return -EINVAL;
3210        }
3211        return 0;
3212}
3213#endif
3214
3215/*
3216 * net_device service functions
3217 */
3218static void bnx2x_poll(uint32_t srcid, long a0, long a1, long a2)
3219{
3220        struct bnx2x_fastpath *fp = (struct bnx2x_fastpath*)a0;
3221        int work_done = 0;
3222        int budget = INT32_MAX; // AKAROS_PORT  comes from napi; just let it run
3223        uint8_t cos;
3224        struct bnx2x *bp = fp->bp;
3225
3226        while (1) {
3227#ifdef BNX2X_STOP_ON_ERROR
3228                if (unlikely(bp->panic)) {
3229                        napi_complete(napi);
3230                        return 0;
3231                }
3232#endif
3233                if (!bnx2x_fp_lock_napi(fp))
3234                        return;
3235
3236                for_each_cos_in_tx_queue(fp, cos)
3237                        if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3238                                bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3239
3240                if (bnx2x_has_rx_work(fp)) {
3241                        work_done += bnx2x_rx_int(fp, budget - work_done);
3242
3243                        /* must not complete if we consumed full budget */
3244                        if (work_done >= budget) {
3245                                bnx2x_fp_unlock_napi(fp);
3246                                break;
3247                        }
3248                }
3249
3250                /* Fall out from the NAPI loop if needed */
3251                if (!bnx2x_fp_unlock_napi(fp) &&
3252                    !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3253
3254                        /* No need to update SB for FCoE L2 ring as long as
3255                         * it's connected to the default SB and the SB
3256                         * has been updated when NAPI was scheduled.
3257                         */
3258                        if (IS_FCOE_FP(fp)) {
3259                                napi_complete(napi);
3260                                break;
3261                        }
3262                        bnx2x_update_fpsb_idx(fp);
3263                        /* bnx2x_has_rx_work() reads the status block,
3264                         * thus we need to ensure that status block indices
3265                         * have been actually read (bnx2x_update_fpsb_idx)
3266                         * prior to this check (bnx2x_has_rx_work) so that
3267                         * we won't write the "newer" value of the status block
3268                         * to IGU (if there was a DMA right after
3269                         * bnx2x_has_rx_work and if there is no rmb, the memory
3270                         * reading (bnx2x_update_fpsb_idx) may be postponed
3271                         * to right before bnx2x_ack_sb). In this case there
3272                         * will never be another interrupt until there is
3273                         * another update of the status block, while there
3274                         * is still unhandled work.
3275                         */
3276                        rmb();
3277
3278                        if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3279                                napi_complete(napi);
3280                                /* Re-enable interrupts */
3281                                DP(NETIF_MSG_RX_STATUS,
3282                                   "Update index to %d\n", fp->fp_hc_idx);
3283                                bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3284                                             le16_to_cpu(fp->fp_hc_idx),
3285                                             IGU_INT_ENABLE, 1);
3286                                break;
3287                        }
3288                }
3289        }
3290}
3291
3292#ifdef CONFIG_NET_RX_BUSY_POLL
3293/* must be called with local_bh_disable()d */
3294int bnx2x_low_latency_recv(struct napi_struct *napi)
3295{
3296        struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3297                                                 napi);
3298        struct bnx2x *bp = fp->bp;
3299        int found = 0;
3300
3301        if ((bp->state == BNX2X_STATE_CLOSED) ||
3302            (bp->state == BNX2X_STATE_ERROR) ||
3303            (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3304                return LL_FLUSH_FAILED;
3305
3306        if (!bnx2x_fp_lock_poll(fp))
3307                return LL_FLUSH_BUSY;
3308
3309        if (bnx2x_has_rx_work(fp))
3310                found = bnx2x_rx_int(fp, 4);
3311
3312        bnx2x_fp_unlock_poll(fp);
3313
3314        return found;
3315}
3316#endif
3317
3318/* we split the first BD into headers and data BDs
3319 * to ease the pain of our fellow microcode engineers
3320 * we use one mapping for both BDs
3321 */
3322static uint16_t bnx2x_tx_split(struct bnx2x *bp,
3323                          struct bnx2x_fp_txdata *txdata,
3324                          struct sw_tx_bd *tx_buf,
3325                          struct eth_tx_start_bd **tx_bd, uint16_t hlen,
3326                          uint16_t bd_prod)
3327{
3328        struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3329        struct eth_tx_bd *d_tx_bd;
3330        dma_addr_t mapping;
3331        int old_len = le16_to_cpu(h_tx_bd->nbytes);
3332
3333        /* first fix first BD */
3334        h_tx_bd->nbytes = cpu_to_le16(hlen);
3335
3336        DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3337           h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3338
3339        /* now get a new data BD
3340         * (after the pbd) and fill it */
3341        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3342        d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3343
3344        mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3345                           le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3346
3347        d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3348        d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3349        d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3350
3351        /* this marks the BD as one that has no individual mapping */
3352        tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3353
3354        DP(NETIF_MSG_TX_QUEUED,
3355           "TSO split data size is %d (%x:%x)\n",
3356           d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3357
3358        /* update tx_bd */
3359        *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3360
3361        return bd_prod;
3362}
3363
3364#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3365#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3366static __le16 bnx2x_csum_fix(unsigned char *t_header, uint16_t csum,
3367                             int8_t fix)
3368{
3369panic("Not implemented");
3370#if 0 // AKAROS_PORT
3371        __sum16 tsum = (__force __sum16) csum;
3372
3373        if (fix > 0)
3374                tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3375                                  csum_partial(t_header - fix, fix, 0)));
3376
3377        else if (fix < 0)
3378                tsum = ~csum_fold(csum_add((__force __wsum) csum,
3379                                  csum_partial(t_header, -fix, 0)));
3380
3381        return bswab16(tsum);
3382#endif
3383}
3384
3385static uint32_t bnx2x_xmit_type(struct bnx2x *bp, struct block *block)
3386{
3387        uint32_t rc;
3388        __u8 prot = 0;
3389        __be16 protocol;
3390
3391        /* TODO: AKAROS_PORT ask for checksums */
3392        return XMIT_PLAIN;
3393
3394#if 0 // AKAROS_PORT
3395        if (skb->ip_summed != CHECKSUM_PARTIAL)
3396                return XMIT_PLAIN;
3397
3398        protocol = vlan_get_protocol(skb);
3399        if (protocol == cpu_to_be16(ETH_P_IPV6)) {
3400                rc = XMIT_CSUM_V6;
3401                prot = ipv6_hdr(skb)->nexthdr;
3402        } else {
3403                rc = XMIT_CSUM_V4;
3404                prot = ip_hdr(skb)->protocol;
3405        }
3406
3407        if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3408                if (inner_ip_hdr(skb)->version == 6) {
3409                        rc |= XMIT_CSUM_ENC_V6;
3410                        if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3411                                rc |= XMIT_CSUM_TCP;
3412                } else {
3413                        rc |= XMIT_CSUM_ENC_V4;
3414                        if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3415                                rc |= XMIT_CSUM_TCP;
3416                }
3417        }
3418        if (prot == IPPROTO_TCP)
3419                rc |= XMIT_CSUM_TCP;
3420
3421        if (skb_is_gso(skb)) {
3422                if (skb_is_gso_v6(skb)) {
3423                        rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3424                        if (rc & XMIT_CSUM_ENC)
3425                                rc |= XMIT_GSO_ENC_V6;
3426                } else {
3427                        rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3428                        if (rc & XMIT_CSUM_ENC)
3429                                rc |= XMIT_GSO_ENC_V4;
3430                }
3431        }
3432
3433        return rc;
3434#endif
3435}
3436
3437#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3438/* check if packet requires linearization (packet is too fragmented)
3439   no need to check fragmentation if page size > 8K (there will be no
3440   violation to FW restrictions) */
3441static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3442                             uint32_t xmit_type)
3443{
3444panic("Not implemented");
3445#if 0 // AKAROS_PORT
3446        int to_copy = 0;
3447        int hlen = 0;
3448        int first_bd_sz = 0;
3449
3450        /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3451        if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3452
3453                if (xmit_type & XMIT_GSO) {
3454                        unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3455                        /* Check if LSO packet needs to be copied:
3456                           3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3457                        int wnd_size = MAX_FETCH_BD - 3;
3458                        /* Number of windows to check */
3459                        int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3460                        int wnd_idx = 0;
3461                        int frag_idx = 0;
3462                        uint32_t wnd_sum = 0;
3463
3464                        /* Headers length */
3465                        hlen = (int)(skb_transport_header(skb) - skb->data) +
3466                                tcp_hdrlen(skb);
3467
3468                        /* Amount of data (w/o headers) on linear part of SKB*/
3469                        first_bd_sz = skb_headlen(skb) - hlen;
3470
3471                        wnd_sum  = first_bd_sz;
3472
3473                        /* Calculate the first sum - it's special */
3474                        for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3475                                wnd_sum +=
3476                                        skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3477
3478                        /* If there was data on linear skb data - check it */
3479                        if (first_bd_sz > 0) {
3480                                if (unlikely(wnd_sum < lso_mss)) {
3481                                        to_copy = 1;
3482                                        goto exit_lbl;
3483                                }
3484
3485                                wnd_sum -= first_bd_sz;
3486                        }
3487
3488                        /* Others are easier: run through the frag list and
3489                           check all windows */
3490                        for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3491                                wnd_sum +=
3492                          skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3493
3494                                if (unlikely(wnd_sum < lso_mss)) {
3495                                        to_copy = 1;
3496                                        break;
3497                                }
3498                                wnd_sum -=
3499                                        skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3500                        }
3501                } else {
3502                        /* in non-LSO too fragmented packet should always
3503                           be linearized */
3504                        to_copy = 1;
3505                }
3506        }
3507
3508exit_lbl:
3509        if (unlikely(to_copy))
3510                DP(NETIF_MSG_TX_QUEUED,
3511                   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3512                   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3513                   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3514
3515        return to_copy;
3516#endif
3517}
3518#endif
3519
3520/**
3521 * bnx2x_set_pbd_gso - update PBD in GSO case.
3522 *
3523 * @skb:        packet skb
3524 * @pbd:        parse BD
3525 * @xmit_type:  xmit flags
3526 */
3527static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3528                              struct eth_tx_parse_bd_e1x *pbd,
3529                              uint32_t xmit_type)
3530{
3531panic("Not implemented");
3532#if 0 // AKAROS_PORT
3533        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3534        pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3535        pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3536
3537        if (xmit_type & XMIT_GSO_V4) {
3538                pbd->ip_id = bswab16(ip_hdr(skb)->id);
3539                pbd->tcp_pseudo_csum =
3540                        bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3541                                                   ip_hdr(skb)->daddr,
3542                                                   0, IPPROTO_TCP, 0));
3543        } else {
3544                pbd->tcp_pseudo_csum =
3545                        bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3546                                                 &ipv6_hdr(skb)->daddr,
3547                                                 0, IPPROTO_TCP, 0));
3548        }
3549
3550        pbd->global_data |=
3551                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3552#endif
3553}
3554
3555/**
3556 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3557 *
3558 * @bp:                 driver handle
3559 * @skb:                packet skb
3560 * @parsing_data:       data to be updated
3561 * @xmit_type:          xmit flags
3562 *
3563 * 57712/578xx related, when skb has encapsulation
3564 */
3565static uint8_t bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3566                                 uint32_t *parsing_data, uint32_t xmit_type)
3567{
3568panic("Not implemented");
3569#if 0 // AKAROS_PORT
3570        *parsing_data |=
3571                ((((uint8_t *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3572                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3573                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3574
3575        if (xmit_type & XMIT_CSUM_TCP) {
3576                *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3577                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3578                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3579
3580                return skb_inner_transport_header(skb) +
3581                        inner_tcp_hdrlen(skb) - skb->data;
3582        }
3583
3584        /* We support checksum offload for TCP and UDP only.
3585         * No need to pass the UDP header length - it's a constant.
3586         */
3587        return skb_inner_transport_header(skb) +
3588                sizeof(struct udphdr) - skb->data;
3589#endif
3590}
3591
3592/**
3593 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3594 *
3595 * @bp:                 driver handle
3596 * @skb:                packet skb
3597 * @parsing_data:       data to be updated
3598 * @xmit_type:          xmit flags
3599 *
3600 * 57712/578xx related
3601 */
3602static uint8_t bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3603                                uint32_t *parsing_data, uint32_t xmit_type)
3604{
3605panic("Not implemented");
3606#if 0 // AKAROS_PORT
3607        *parsing_data |=
3608                ((((uint8_t *)skb_transport_header(skb) - skb->data) >> 1) <<
3609                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3610                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3611
3612        if (xmit_type & XMIT_CSUM_TCP) {
3613                *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3614                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3615                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3616
3617                return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3618        }
3619        /* We support checksum offload for TCP and UDP only.
3620         * No need to pass the UDP header length - it's a constant.
3621         */
3622        return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3623#endif
3624}
3625
3626/* set FW indication according to inner or outer protocols if tunneled */
3627static void bnx2x_set_sbd_csum(struct bnx2x *bp, void *ignored_skb,
3628                               struct eth_tx_start_bd *tx_start_bd,
3629                               uint32_t xmit_type)
3630{
3631        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3632
3633        if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3634                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3635
3636        if (!(xmit_type & XMIT_CSUM_TCP))
3637                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3638}
3639
3640/**
3641 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3642 *
3643 * @bp:         driver handle
3644 * @skb:        packet skb
3645 * @pbd:        parse BD to be updated
3646 * @xmit_type:  xmit flags
3647 */
3648static uint8_t bnx2x_set_pbd_csum(struct bnx2x *bp, struct block *block,
3649                             struct eth_tx_parse_bd_e1x *pbd,
3650                             uint32_t xmit_type)
3651{
3652panic("Not implemented");
3653#if 0 // AKAROS_PORT
3654        uint8_t hlen = (skb_network_header(skb) - skb->data) >> 1;
3655
3656        /* for now NS flag is not used in Linux */
3657        pbd->global_data =
3658                cpu_to_le16(hlen |
3659                            ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3660                             ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3661
3662        pbd->ip_hlen_w = (skb_transport_header(skb) -
3663                        skb_network_header(skb)) >> 1;
3664
3665        hlen += pbd->ip_hlen_w;
3666
3667        /* We support checksum offload for TCP and UDP only */
3668        if (xmit_type & XMIT_CSUM_TCP)
3669                hlen += tcp_hdrlen(skb) / 2;
3670        else
3671                hlen += sizeof(struct udphdr) / 2;
3672
3673        pbd->total_hlen_w = cpu_to_le16(hlen);
3674        hlen = hlen*2;
3675
3676        if (xmit_type & XMIT_CSUM_TCP) {
3677                pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3678
3679        } else {
3680                int8_t fix = SKB_CS_OFF(skb); /* signed! */
3681
3682                DP(NETIF_MSG_TX_QUEUED,
3683                   "hlen %d  fix %d  csum before fix %x\n",
3684                   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3685
3686                /* HW bug: fixup the CSUM */
3687                pbd->tcp_pseudo_csum =
3688                        bnx2x_csum_fix(skb_transport_header(skb),
3689                                       SKB_CS(skb), fix);
3690
3691                DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3692                   pbd->tcp_pseudo_csum);
3693        }
3694
3695        return hlen;
3696#endif
3697}
3698
3699static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3700                                      struct eth_tx_parse_bd_e2 *pbd_e2,
3701                                      struct eth_tx_parse_2nd_bd *pbd2,
3702                                      uint16_t *global_data,
3703                                      uint32_t xmit_type)
3704{
3705panic("Not implemented");
3706#if 0 // AKAROS_PORT
3707        uint16_t hlen_w = 0;
3708        uint8_t outerip_off, outerip_len = 0;
3709
3710        /* from outer IP to transport */
3711        hlen_w = (skb_inner_transport_header(skb) -
3712                  skb_network_header(skb)) >> 1;
3713
3714        /* transport len */
3715        hlen_w += inner_tcp_hdrlen(skb) >> 1;
3716
3717        pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3718
3719        /* outer IP header info */
3720        if (xmit_type & XMIT_CSUM_V4) {
3721                struct iphdr *iph = ip_hdr(skb);
3722                uint32_t csum = (__force uint32_t)(~iph->check) -
3723                           (__force uint32_t)iph->tot_len -
3724                           (__force uint32_t)iph->frag_off;
3725
3726                outerip_len = iph->ihl << 1;
3727
3728                pbd2->fw_ip_csum_wo_len_flags_frag =
3729                        bswab16(csum_fold((__force __wsum)csum));
3730        } else {
3731                pbd2->fw_ip_hdr_to_payload_w =
3732                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3733                pbd_e2->data.tunnel_data.flags |=
3734                        ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
3735        }
3736
3737        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3738
3739        pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3740
3741        /* inner IP header info */
3742        if (xmit_type & XMIT_CSUM_ENC_V4) {
3743                pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3744
3745                pbd_e2->data.tunnel_data.pseudo_csum =
3746                        bswab16(~csum_tcpudp_magic(
3747                                        inner_ip_hdr(skb)->saddr,
3748                                        inner_ip_hdr(skb)->daddr,
3749                                        0, IPPROTO_TCP, 0));
3750        } else {
3751                pbd_e2->data.tunnel_data.pseudo_csum =
3752                        bswab16(~csum_ipv6_magic(
3753                                        &inner_ipv6_hdr(skb)->saddr,
3754                                        &inner_ipv6_hdr(skb)->daddr,
3755                                        0, IPPROTO_TCP, 0));
3756        }
3757
3758        outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3759
3760        *global_data |=
3761                outerip_off |
3762                (outerip_len <<
3763                        ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3764                ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3765                        ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3766
3767        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3768                SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3769                pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3770        }
3771#endif
3772}
3773
3774static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb,
3775                                         uint32_t *parsing_data,
3776                                         uint32_t xmit_type)
3777{
3778panic("Not implemented");
3779#if 0 // AKAROS_PORT
3780        struct ipv6hdr *ipv6;
3781
3782        if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3783                return;
3784
3785        if (xmit_type & XMIT_GSO_ENC_V6)
3786                ipv6 = inner_ipv6_hdr(skb);
3787        else /* XMIT_GSO_V6 */
3788                ipv6 = ipv6_hdr(skb);
3789
3790        if (ipv6->nexthdr == NEXTHDR_IPV6)
3791                *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3792#endif
3793}
3794
3795/* called with netif_tx_lock
3796 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3797 * netif_wake_queue()
3798 */
3799netdev_tx_t bnx2x_start_xmit(struct block *block,
3800                             struct bnx2x_fp_txdata *txdata)
3801{
3802        struct bnx2x *bp = txdata->parent_fp->bp;
3803
3804        struct sw_tx_bd *tx_buf;
3805        struct eth_tx_start_bd *tx_start_bd, *first_bd;
3806        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3807        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3808        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3809        struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3810        uint32_t pbd_e2_parsing_data = 0;
3811        uint16_t pkt_prod, bd_prod;
3812        int nbd, txq_index;
3813        dma_addr_t mapping;
3814        uint32_t xmit_type = bnx2x_xmit_type(bp, block);
3815        int i;
3816        uint8_t hlen = 0;
3817        __le16 pkt_size = 0;
3818        struct etherpkt *eth;
3819        uint8_t mac_type = UNICAST_ADDRESS;
3820
3821#ifdef BNX2X_STOP_ON_ERROR
3822        if (unlikely(bp->panic))
3823                return NETDEV_TX_BUSY;
3824#endif
3825
3826        txq_index = txdata->txq_index;
3827        assert(txq_index == 0); // AKAROS_PORT til we get multi-queue working
3828        assert(txdata == &bp->bnx2x_txq[txq_index]);
3829
3830        assert(!(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)));
3831
3832
3833        /* enable this debug print to view the transmission queue being used
3834        DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3835           txq_index, fp_index, txdata_index); */
3836
3837        /* enable this debug print to view the transmission details
3838        DP(NETIF_MSG_TX_QUEUED,
3839           "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3840           txdata->cid, fp_index, txdata_index, txdata, fp); */
3841
3842        if (unlikely(bnx2x_tx_avail(bp, txdata) <
3843#if 0 // AKAROS_PORT TODO: block extra
3844                        skb_shinfo(skb)->nr_frags +
3845#else
3846                        1 +
3847#endif
3848                        BDS_PER_TX_PKT +
3849                        NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3850                /* Handle special storage cases separately */
3851                if (txdata->tx_ring_size == 0) {
3852                        struct bnx2x_eth_q_stats *q_stats =
3853                                bnx2x_fp_qstats(bp, txdata->parent_fp);
3854                        q_stats->driver_filtered_tx_pkt++;
3855                        freeb(block);
3856                        return NETDEV_TX_OK;
3857                }
3858                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3859                netif_tx_stop_queue(txq);
3860                BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3861
3862                return NETDEV_TX_BUSY;
3863        }
3864
3865#if 0 // AKAROS_PORT
3866        DP(NETIF_MSG_TX_QUEUED,
3867           "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3868           txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3869           ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3870           skb->len);
3871#endif
3872
3873        eth = (struct etherpkt *)block->rp;
3874
3875        /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3876        if (unlikely(is_multicast_ether_addr(eth->d))) {
3877                if (eaddrcmp(eth->d, bp->edev->bcast))
3878                        mac_type = BROADCAST_ADDRESS;
3879                else
3880                        mac_type = MULTICAST_ADDRESS;
3881        }
3882
3883#if 0 // AKAROS_PORT TODO block extra
3884#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3885        /* First, check if we need to linearize the skb (due to FW
3886           restrictions). No need to check fragmentation if page size > 8K
3887           (there will be no violation to FW restrictions) */
3888        if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3889                /* Statistics of linearization */
3890                bp->lin_cnt++;
3891                if (skb_linearize(skb) != 0) {
3892                        DP(NETIF_MSG_TX_QUEUED,
3893                           "SKB linearization failed - silently dropping this SKB\n");
3894                        dev_kfree_skb_any(skb);
3895                        return NETDEV_TX_OK;
3896                }
3897        }
3898#endif
3899#endif
3900        /* Map skb linear data for DMA */
3901        mapping = dma_map_single(&bp->pdev->dev, block->rp,
3902                                 BLEN(block), DMA_TO_DEVICE);
3903        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3904                DP(NETIF_MSG_TX_QUEUED,
3905                   "SKB mapping failed - silently dropping this SKB\n");
3906                freeb(block);
3907                return NETDEV_TX_OK;
3908        }
3909        /*
3910        Please read carefully. First we use one BD which we mark as start,
3911        then we have a parsing info BD (used for TSO or xsum),
3912        and only then we have the rest of the TSO BDs.
3913        (don't forget to mark the last one as last,
3914        and to unmap only AFTER you write to the BD ...)
3915        And above all, all pdb sizes are in words - NOT DWORDS!
3916        */
3917
3918        /* get current pkt produced now - advance it just before sending packet
3919         * since mapping of pages may fail and cause packet to be dropped
3920         */
3921        pkt_prod = txdata->tx_pkt_prod;
3922        bd_prod = TX_BD(txdata->tx_bd_prod);
3923
3924        /* get a tx_buf and first BD
3925         * tx_start_bd may be changed during SPLIT,
3926         * but first_bd will always stay first
3927         */
3928        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3929        tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3930        first_bd = tx_start_bd;
3931
3932        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3933
3934        /* header nbd: indirectly zero other flags! */
3935        tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3936
3937        /* remember the first BD of the packet */
3938        tx_buf->first_bd = txdata->tx_bd_prod;
3939        tx_buf->block = block;
3940        tx_buf->flags = 0;
3941
3942        DP(NETIF_MSG_TX_QUEUED,
3943           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3944           pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3945
3946#if 0 // AKAROS_PORT skipping vlan stuff
3947        if (vlan_tx_tag_present(skb)) {
3948                tx_start_bd->vlan_or_ethertype =
3949                    cpu_to_le16(vlan_tx_tag_get(skb));
3950                tx_start_bd->bd_flags.as_bitfield |=
3951                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3952        } else {
3953#else
3954        {
3955#endif
3956                /* when transmitting in a vf, start bd must hold the ethertype
3957                 * for fw to enforce it
3958                 */
3959                // AKAROS_PORT
3960                uint16_t type_le16 = (eth->type[0] << 8) | eth->type[1];
3961#ifndef BNX2X_STOP_ON_ERROR
3962                if (IS_VF(bp))
3963#endif
3964                        tx_start_bd->vlan_or_ethertype =
3965                                cpu_to_le16(type_le16);
3966#ifndef BNX2X_STOP_ON_ERROR
3967                else
3968                        /* used by FW for packet accounting */
3969                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3970#endif
3971        }
3972
3973        nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3974
3975        /* turn on parsing and get a BD */
3976        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3977
3978        if (xmit_type & XMIT_CSUM)
3979                bnx2x_set_sbd_csum(bp, block, tx_start_bd, xmit_type);
3980
3981        if (!CHIP_IS_E1x(bp)) {
3982                panic("Not implemented");
3983                #if 0 // AKAROS_PORT
3984                pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3985                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3986
3987                if (xmit_type & XMIT_CSUM_ENC) {
3988                        uint16_t global_data = 0;
3989
3990                        /* Set PBD in enc checksum offload case */
3991                        hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3992                                                      &pbd_e2_parsing_data,
3993                                                      xmit_type);
3994
3995                        /* turn on 2nd parsing and get a BD */
3996                        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3997
3998                        pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3999
4000                        memset(pbd2, 0, sizeof(*pbd2));
4001
4002                        pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
4003                                (skb_inner_network_header(skb) -
4004                                 skb->data) >> 1;
4005
4006                        if (xmit_type & XMIT_GSO_ENC)
4007                                bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
4008                                                          &global_data,
4009                                                          xmit_type);
4010
4011                        pbd2->global_data = cpu_to_le16(global_data);
4012
4013                        /* add addition parse BD indication to start BD */
4014                        SET_FLAG(tx_start_bd->general_data,
4015                                 ETH_TX_START_BD_PARSE_NBDS, 1);
4016                        /* set encapsulation flag in start BD */
4017                        SET_FLAG(tx_start_bd->general_data,
4018                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
4019
4020                        tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
4021
4022                        nbd++;
4023                } else if (xmit_type & XMIT_CSUM) {
4024                        /* Set PBD in checksum offload case w/o encapsulation */
4025                        hlen = bnx2x_set_pbd_csum_e2(bp, skb,
4026                                                     &pbd_e2_parsing_data,
4027                                                     xmit_type);
4028                }
4029
4030                bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
4031                /* Add the macs to the parsing BD if this is a vf or if
4032                 * Tx Switching is enabled.
4033                 */
4034                if (IS_VF(bp)) {
4035                        /* override GRE parameters in BD */
4036                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4037                                              &pbd_e2->data.mac_addr.src_mid,
4038                                              &pbd_e2->data.mac_addr.src_lo,
4039                                              eth->h_source);
4040
4041                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4042                                              &pbd_e2->data.mac_addr.dst_mid,
4043                                              &pbd_e2->data.mac_addr.dst_lo,
4044                                              eth->h_dest);
4045                } else {
4046                        if (bp->flags & TX_SWITCHING)
4047                                bnx2x_set_fw_mac_addr(
4048                                                &pbd_e2->data.mac_addr.dst_hi,
4049                                                &pbd_e2->data.mac_addr.dst_mid,
4050                                                &pbd_e2->data.mac_addr.dst_lo,
4051                                                eth->h_dest);
4052#ifdef BNX2X_STOP_ON_ERROR
4053                        /* Enforce security is always set in Stop on Error -
4054                         * source mac should be present in the parsing BD
4055                         */
4056                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4057                                              &pbd_e2->data.mac_addr.src_mid,
4058                                              &pbd_e2->data.mac_addr.src_lo,
4059                                              eth->h_source);
4060#endif
4061                }
4062
4063                SET_FLAG(pbd_e2_parsing_data,
4064                         ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4065                #endif
4066        } else {
4067                uint16_t global_data = 0;
4068                pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4069                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4070                /* Set PBD in checksum offload case */
4071                if (xmit_type & XMIT_CSUM) {
4072                        panic("Not implemented");
4073                        #if 0 // AKAROS_PORT (xsum offload)
4074                        hlen = bnx2x_set_pbd_csum(bp, block, pbd_e1x, xmit_type);
4075                        #endif
4076                }
4077
4078                SET_FLAG(global_data,
4079                         ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4080                pbd_e1x->global_data |= cpu_to_le16(global_data);
4081        }
4082
4083        /* Setup the data pointer of the first BD of the packet */
4084        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4085        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4086        tx_start_bd->nbytes = cpu_to_le16(BLEN(block));
4087        pkt_size = tx_start_bd->nbytes;
4088
4089        DP(NETIF_MSG_TX_QUEUED,
4090           "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4091           tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4092           le16_to_cpu(tx_start_bd->nbytes),
4093           tx_start_bd->bd_flags.as_bitfield,
4094           le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4095
4096        if (xmit_type & XMIT_GSO) {
4097                panic("Not implemented");
4098                #if 0 // AKAROS_PORT
4099
4100                DP(NETIF_MSG_TX_QUEUED,
4101                   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4102                   skb->len, hlen, skb_headlen(skb),
4103                   skb_shinfo(skb)->gso_size);
4104
4105                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4106
4107                if (unlikely(skb_headlen(skb) > hlen)) {
4108                        nbd++;
4109                        bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4110                                                 &tx_start_bd, hlen,
4111                                                 bd_prod);
4112                }
4113                if (!CHIP_IS_E1x(bp))
4114                        pbd_e2_parsing_data |=
4115                                (skb_shinfo(skb)->gso_size <<
4116                                 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4117                                 ETH_TX_PARSE_BD_E2_LSO_MSS;
4118                else
4119                        bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4120                #endif
4121        }
4122
4123        /* Set the PBD's parsing_data field if not zero
4124         * (for the chips newer than 57711).
4125         */
4126        if (pbd_e2_parsing_data)
4127                pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4128
4129        tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4130
4131#if 0 // AKAROS_PORT TODO block extra
4132        /* Handle fragmented skb */
4133        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4134                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4135
4136                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4137                                           skb_frag_size(frag), DMA_TO_DEVICE);
4138                if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4139                        unsigned int pkts_compl = 0, bytes_compl = 0;
4140
4141                        DP(NETIF_MSG_TX_QUEUED,
4142                           "Unable to map page - dropping packet...\n");
4143
4144                        /* we need unmap all buffers already mapped
4145                         * for this SKB;
4146                         * first_bd->nbd need to be properly updated
4147                         * before call to bnx2x_free_tx_pkt
4148                         */
4149                        first_bd->nbd = cpu_to_le16(nbd);
4150                        bnx2x_free_tx_pkt(bp, txdata,
4151                                          TX_BD(txdata->tx_pkt_prod),
4152                                          &pkts_compl, &bytes_compl);
4153                        return NETDEV_TX_OK;
4154                }
4155
4156                bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4157                tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4158                if (total_pkt_bd == NULL)
4159                        total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4160
4161                tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4162                tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4163                tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4164                le16_add_cpu(&pkt_size, skb_frag_size(frag));
4165                nbd++;
4166
4167                DP(NETIF_MSG_TX_QUEUED,
4168                   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4169                   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4170                   le16_to_cpu(tx_data_bd->nbytes));
4171        }
4172#endif
4173
4174        DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4175
4176        /* update with actual num BDs */
4177        first_bd->nbd = cpu_to_le16(nbd);
4178
4179        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4180
4181        /* now send a tx doorbell, counting the next BD
4182         * if the packet contains or ends with it
4183         */
4184        if (TX_BD_POFF(bd_prod) < nbd)
4185                nbd++;
4186
4187        /* total_pkt_bytes should be set on the first data BD if
4188         * it's not an LSO packet and there is more than one
4189         * data BD. In this case pkt_size is limited by an MTU value.
4190         * However we prefer to set it for an LSO packet (while we don't
4191         * have to) in order to save some CPU cycles in a none-LSO
4192         * case, when we much more care about them.
4193         */
4194        if (total_pkt_bd != NULL)
4195                total_pkt_bd->total_pkt_bytes = pkt_size;
4196
4197        if (pbd_e1x)
4198                DP(NETIF_MSG_TX_QUEUED,
4199                   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4200                   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4201                   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4202                   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4203                    le16_to_cpu(pbd_e1x->total_hlen_w));
4204        if (pbd_e2)
4205                DP(NETIF_MSG_TX_QUEUED,
4206                   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4207                   pbd_e2,
4208                   pbd_e2->data.mac_addr.dst_hi,
4209                   pbd_e2->data.mac_addr.dst_mid,
4210                   pbd_e2->data.mac_addr.dst_lo,
4211                   pbd_e2->data.mac_addr.src_hi,
4212                   pbd_e2->data.mac_addr.src_mid,
4213                   pbd_e2->data.mac_addr.src_lo,
4214                   pbd_e2->parsing_data);
4215        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4216
4217        netdev_tx_sent_queue(txq, skb->len);
4218
4219        skb_tx_timestamp(skb);
4220
4221        txdata->tx_pkt_prod++;
4222        /*
4223         * Make sure that the BD data is updated before updating the producer
4224         * since FW might read the BD right after the producer is updated.
4225         * This is only applicable for weak-ordered memory model archs such
4226         * as IA-64. The following barrier is also mandatory since FW will
4227         * assumes packets must have BDs.
4228         */
4229        wmb();
4230
4231        txdata->tx_db.data.prod += nbd;
4232        cmb();
4233
4234        DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4235
4236        bus_wmb();
4237
4238        txdata->tx_bd_prod += nbd;
4239
4240        txdata->tx_pkt++;
4241
4242        return NETDEV_TX_OK;
4243}
4244
4245/**
4246 * bnx2x_setup_tc - routine to configure net_device for multi tc
4247 *
4248 * @netdev: net device to configure
4249 * @tc: number of traffic classes to enable
4250 *
4251 * callback connected to the ndo_setup_tc function pointer
4252 */
4253int bnx2x_setup_tc(struct ether *dev, uint8_t num_tc)
4254{
4255        /* XME skipping traffic classes */
4256        return 0;
4257#if 0 // AKAROS_PORT
4258        int cos, prio, count, offset;
4259        struct bnx2x *bp = netdev_priv(dev);
4260
4261        /* setup tc must be called under rtnl lock */
4262        ASSERT_RTNL();
4263
4264        /* no traffic classes requested. Aborting */
4265        if (!num_tc) {
4266                netdev_reset_tc(dev);
4267                return 0;
4268        }
4269
4270        /* requested to support too many traffic classes */
4271        if (num_tc > bp->max_cos) {
4272                BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4273                          num_tc, bp->max_cos);
4274                return -EINVAL;
4275        }
4276
4277        /* declare amount of supported traffic classes */
4278        if (netdev_set_num_tc(dev, num_tc)) {
4279                BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4280                return -EINVAL;
4281        }
4282
4283        /* configure priority to traffic class mapping */
4284        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4285                netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4286                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4287                   "mapping priority %d to tc %d\n",
4288                   prio, bp->prio_to_cos[prio]);
4289        }
4290
4291        /* Use this configuration to differentiate tc0 from other COSes
4292           This can be used for ets or pfc, and save the effort of setting
4293           up a multio class queue disc or negotiating DCBX with a switch
4294        netdev_set_prio_tc_map(dev, 0, 0);
4295        DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4296        for (prio = 1; prio < 16; prio++) {
4297                netdev_set_prio_tc_map(dev, prio, 1);
4298                DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4299        } */
4300
4301        /* configure traffic class to transmission queue mapping */
4302        for (cos = 0; cos < bp->max_cos; cos++) {
4303                count = BNX2X_NUM_ETH_QUEUES(bp);
4304                offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4305                netdev_set_tc_queue(dev, cos, count, offset);
4306                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4307                   "mapping tc %d to offset %d count %d\n",
4308                   cos, offset, count);
4309        }
4310
4311        return 0;
4312#endif
4313}
4314
4315/* called with rtnl_lock */
4316int bnx2x_change_mac_addr(struct ether *dev, void *p)
4317{
4318panic("Not implemented");
4319#if 0 // AKAROS_PORT
4320        struct sockaddr *addr = p;
4321        struct bnx2x *bp = netdev_priv(dev);
4322        int rc = 0;
4323
4324        if (!is_valid_ether_addr(addr->sa_data)) {
4325                BNX2X_ERR("Requested MAC address is not valid\n");
4326                return -EINVAL;
4327        }
4328
4329        if (IS_MF_STORAGE_ONLY(bp)) {
4330                BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4331                return -EINVAL;
4332        }
4333
4334        if (netif_running(dev))  {
4335                rc = bnx2x_set_eth_mac(bp, false);
4336                if (rc)
4337                        return rc;
4338        }
4339
4340        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4341
4342        if (netif_running(dev))
4343                rc = bnx2x_set_eth_mac(bp, true);
4344
4345        return rc;
4346#endif
4347}
4348
4349static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4350{
4351        union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4352        struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4353        uint8_t cos;
4354
4355        /* Common */
4356
4357        if (IS_FCOE_IDX(fp_index)) {
4358                memset(sb, 0, sizeof(union host_hc_status_block));
4359                fp->status_blk_mapping = 0;
4360        } else {
4361                /* status blocks */
4362                if (!CHIP_IS_E1x(bp))
4363                        BNX2X_PCI_FREE(sb->e2_sb,
4364                                       bnx2x_fp(bp, fp_index,
4365                                                status_blk_mapping),
4366                                       sizeof(struct host_hc_status_block_e2));
4367                else
4368                        BNX2X_PCI_FREE(sb->e1x_sb,
4369                                       bnx2x_fp(bp, fp_index,
4370                                                status_blk_mapping),
4371                                       sizeof(struct host_hc_status_block_e1x));
4372        }
4373
4374        /* Rx */
4375        if (!skip_rx_queue(bp, fp_index)) {
4376                bnx2x_free_rx_bds(fp);
4377
4378                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4379                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4380                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4381                               bnx2x_fp(bp, fp_index, rx_desc_mapping),
4382                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
4383
4384                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4385                               bnx2x_fp(bp, fp_index, rx_comp_mapping),
4386                               sizeof(struct eth_fast_path_rx_cqe) *
4387                               NUM_RCQ_BD);
4388
4389                /* SGE ring */
4390                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4391                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4392                               bnx2x_fp(bp, fp_index, rx_sge_mapping),
4393                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4394        }
4395
4396        /* Tx */
4397        if (!skip_tx_queue(bp, fp_index)) {
4398                /* fastpath tx rings: tx_buf tx_desc */
4399                for_each_cos_in_tx_queue(fp, cos) {
4400                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4401
4402                        DP(NETIF_MSG_IFDOWN,
4403                           "freeing tx memory of fp %d cos %d cid %d\n",
4404                           fp_index, cos, txdata->cid);
4405
4406                        BNX2X_FREE(txdata->tx_buf_ring);
4407                        BNX2X_PCI_FREE(txdata->tx_desc_ring,
4408                                txdata->tx_desc_mapping,
4409                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4410                }
4411        }
4412        /* end of fastpath */
4413}
4414
4415static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4416{
4417        int i;
4418        for_each_cnic_queue(bp, i)
4419                bnx2x_free_fp_mem_at(bp, i);
4420}
4421
4422void bnx2x_free_fp_mem(struct bnx2x *bp)
4423{
4424        int i;
4425        for_each_eth_queue(bp, i)
4426                bnx2x_free_fp_mem_at(bp, i);
4427}
4428
4429static void set_sb_shortcuts(struct bnx2x *bp, int index)
4430{
4431        union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4432        if (!CHIP_IS_E1x(bp)) {
4433                bnx2x_fp(bp, index, sb_index_values) =
4434                        (__le16 *)status_blk.e2_sb->sb.index_values;
4435                bnx2x_fp(bp, index, sb_running_index) =
4436                        (__le16 *)status_blk.e2_sb->sb.running_index;
4437        } else {
4438                bnx2x_fp(bp, index, sb_index_values) =
4439                        (__le16 *)status_blk.e1x_sb->sb.index_values;
4440                bnx2x_fp(bp, index, sb_running_index) =
4441                        (__le16 *)status_blk.e1x_sb->sb.running_index;
4442        }
4443}
4444
4445/* Returns the number of actually allocated BDs */
4446static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4447                              int rx_ring_size)
4448{
4449        struct bnx2x *bp = fp->bp;
4450        uint16_t ring_prod, cqe_ring_prod;
4451        int i, failure_cnt = 0;
4452
4453        fp->rx_comp_cons = 0;
4454        cqe_ring_prod = ring_prod = 0;
4455
4456        /* This routine is called only during fo init so
4457         * fp->eth_q_stats.rx_skb_alloc_failed = 0
4458         */
4459        for (i = 0; i < rx_ring_size; i++) {
4460                if (bnx2x_alloc_rx_data(bp, fp, ring_prod, MEM_WAIT) < 0) {
4461                        failure_cnt++;
4462                        continue;
4463                }
4464                ring_prod = NEXT_RX_IDX(ring_prod);
4465                cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4466                warn_on(ring_prod <= (i - failure_cnt));
4467        }
4468
4469        if (failure_cnt)
4470                BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4471                          i - failure_cnt, fp->index);
4472
4473        fp->rx_bd_prod = ring_prod;
4474        /* Limit the CQE producer by the CQE ring size */
4475        fp->rx_comp_prod = MIN_T(uint16_t, NUM_RCQ_RINGS * RCQ_DESC_CNT,
4476                                 cqe_ring_prod);
4477        fp->rx_pkt = fp->rx_calls = 0;
4478
4479        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4480
4481        return i - failure_cnt;
4482}
4483
4484static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4485{
4486        int i;
4487
4488        for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4489                struct eth_rx_cqe_next_page *nextpg;
4490
4491                nextpg = (struct eth_rx_cqe_next_page *)
4492                        &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4493                nextpg->addr_hi =
4494                        cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4495                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4496                nextpg->addr_lo =
4497                        cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4498                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4499        }
4500}
4501
4502static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4503{
4504        union host_hc_status_block *sb;
4505        struct bnx2x_fastpath *fp = &bp->fp[index];
4506        int ring_size = 0;
4507        uint8_t cos;
4508        int rx_ring_size = 0;
4509
4510        if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4511                rx_ring_size = MIN_RX_SIZE_NONTPA;
4512                bp->rx_ring_size = rx_ring_size;
4513        } else if (!bp->rx_ring_size) {
4514                rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4515
4516                if (CHIP_IS_E3(bp)) {
4517                        uint32_t cfg = SHMEM_RD(bp,
4518                                           dev_info.port_hw_config[BP_PORT(bp)].
4519                                           default_cfg);
4520
4521                        /* Decrease ring size for 1G functions */
4522                        if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4523                            PORT_HW_CFG_NET_SERDES_IF_SGMII)
4524                                rx_ring_size /= 10;
4525                }
4526
4527                /* allocate at least number of buffers required by FW */
4528                rx_ring_size = MAX_T(int,
4529                                     bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA,
4530                                     rx_ring_size);
4531
4532                bp->rx_ring_size = rx_ring_size;
4533        } else /* if rx_ring_size specified - use it */
4534                rx_ring_size = bp->rx_ring_size;
4535
4536        DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4537
4538        /* Common */
4539        sb = &bnx2x_fp(bp, index, status_blk);
4540
4541        if (!IS_FCOE_IDX(index)) {
4542                /* status blocks */
4543                if (!CHIP_IS_E1x(bp)) {
4544                        sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4545                                                    sizeof(struct host_hc_status_block_e2));
4546                        if (!sb->e2_sb)
4547                                goto alloc_mem_err;
4548                } else {
4549                        sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4550                                                     sizeof(struct host_hc_status_block_e1x));
4551                        if (!sb->e1x_sb)
4552                                goto alloc_mem_err;
4553                }
4554        }
4555
4556        /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4557         * set shortcuts for it.
4558         */
4559        if (!IS_FCOE_IDX(index))
4560                set_sb_shortcuts(bp, index);
4561
4562        /* Tx */
4563        if (!skip_tx_queue(bp, index)) {
4564                /* fastpath tx rings: tx_buf tx_desc */
4565                for_each_cos_in_tx_queue(fp, cos) {
4566                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4567
4568                        DP(NETIF_MSG_IFUP,
4569                           "allocating tx memory of fp %d cos %d\n",
4570                           index, cos);
4571
4572                        txdata->tx_buf_ring = kzmalloc((NUM_TX_BD) * (sizeof(struct sw_tx_bd)),
4573                                                       MEM_WAIT);
4574                        if (!txdata->tx_buf_ring)
4575                                goto alloc_mem_err;
4576                        txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4577                                                               sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4578                        if (!txdata->tx_desc_ring)
4579                                goto alloc_mem_err;
4580                }
4581        }
4582
4583        /* Rx */
4584        if (!skip_rx_queue(bp, index)) {
4585                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4586                bnx2x_fp(bp, index, rx_buf_ring) =
4587                        kzmalloc((NUM_RX_BD) * (sizeof(struct sw_rx_bd)),
4588                                 MEM_WAIT);
4589                if (!bnx2x_fp(bp, index, rx_buf_ring))
4590                        goto alloc_mem_err;
4591                bnx2x_fp(bp, index, rx_desc_ring) =
4592                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4593                                        sizeof(struct eth_rx_bd) * NUM_RX_BD);
4594                if (!bnx2x_fp(bp, index, rx_desc_ring))
4595                        goto alloc_mem_err;
4596
4597                /* Seed all CQEs by 1s */
4598                bnx2x_fp(bp, index, rx_comp_ring) =
4599                        BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4600                                         sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4601                if (!bnx2x_fp(bp, index, rx_comp_ring))
4602                        goto alloc_mem_err;
4603
4604                /* SGE ring */
4605                bnx2x_fp(bp, index, rx_page_ring) =
4606                        kzmalloc((NUM_RX_SGE) * (sizeof(struct sw_rx_page)),
4607                                 MEM_WAIT);
4608                if (!bnx2x_fp(bp, index, rx_page_ring))
4609                        goto alloc_mem_err;
4610                bnx2x_fp(bp, index, rx_sge_ring) =
4611                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4612                                        BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4613                if (!bnx2x_fp(bp, index, rx_sge_ring))
4614                        goto alloc_mem_err;
4615                /* RX BD ring */
4616                bnx2x_set_next_page_rx_bd(fp);
4617
4618                /* CQ ring */
4619                bnx2x_set_next_page_rx_cq(fp);
4620
4621                /* BDs */
4622                ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4623                if (ring_size < rx_ring_size)
4624                        goto alloc_mem_err;
4625        }
4626
4627        return 0;
4628
4629/* handles low memory cases */
4630alloc_mem_err:
4631        BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4632                                                index, ring_size);
4633        /* FW will drop all packets if queue is not big enough,
4634         * In these cases we disable the queue
4635         * Min size is different for OOO, TPA and non-TPA queues
4636         */
4637        if (ring_size < (fp->disable_tpa ?
4638                                MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4639                        /* release memory allocated for this queue */
4640                        bnx2x_free_fp_mem_at(bp, index);
4641                        return -ENOMEM;
4642        }
4643        return 0;
4644}
4645
4646static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4647{
4648        if (!NO_FCOE(bp))
4649                /* FCoE */
4650                if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4651                        /* we will fail load process instead of mark
4652                         * NO_FCOE_FLAG
4653                         */
4654                        return -ENOMEM;
4655
4656        return 0;
4657}
4658
4659static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4660{
4661        int i;
4662
4663        /* 1. Allocate FP for leading - fatal if error
4664         * 2. Allocate RSS - fix number of queues if error
4665         */
4666
4667        /* leading */
4668        if (bnx2x_alloc_fp_mem_at(bp, 0))
4669                return -ENOMEM;
4670
4671        /* RSS */
4672        for_each_nondefault_eth_queue(bp, i)
4673                if (bnx2x_alloc_fp_mem_at(bp, i))
4674                        break;
4675
4676        /* handle memory failures */
4677        if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4678                int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4679
4680                warn_on(delta < 0);
4681                bnx2x_shrink_eth_fp(bp, delta);
4682                if (CNIC_SUPPORT(bp))
4683                        /* move non eth FPs next to last eth FP
4684                         * must be done in that order
4685                         * FCOE_IDX < FWD_IDX < OOO_IDX
4686                         */
4687
4688                        /* move FCoE fp even NO_FCOE_FLAG is on */
4689                        bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4690                bp->num_ethernet_queues -= delta;
4691                bp->num_queues = bp->num_ethernet_queues +
4692                                 bp->num_cnic_queues;
4693                BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4694                          bp->num_queues + delta, bp->num_queues);
4695        }
4696
4697        return 0;
4698}
4699
4700void bnx2x_free_mem_bp(struct bnx2x *bp)
4701{
4702        int i;
4703
4704        for (i = 0; i < bp->fp_array_size; i++)
4705                kfree(bp->fp[i].tpa_info);
4706        kfree(bp->fp);
4707        kfree(bp->sp_objs);
4708        kfree(bp->fp_stats);
4709        kfree(bp->bnx2x_txq);
4710        kfree(bp->msix_table);
4711        kfree(bp->ilt);
4712}
4713
4714int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4715{
4716        struct bnx2x_fastpath *fp;
4717        struct msix_entry *tbl;
4718        struct bnx2x_ilt *ilt;
4719        int msix_table_size = 0;
4720        int fp_array_size, txq_array_size;
4721        int i;
4722
4723        /*
4724         * The biggest MSI-X table we might need is as a maximum number of fast
4725         * path IGU SBs plus default SB (for PF only).
4726         */
4727        msix_table_size = bp->igu_sb_cnt;
4728        if (IS_PF(bp))
4729                msix_table_size++;
4730        BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4731
4732        /* fp array: RSS plus CNIC related L2 queues */
4733        fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4734        bp->fp_array_size = fp_array_size;
4735        BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4736
4737        fp = kzmalloc((bp->fp_array_size) * (sizeof(*fp)), MEM_WAIT);
4738        if (!fp)
4739                goto alloc_err;
4740        for (i = 0; i < bp->fp_array_size; i++) {
4741                fp[i].tpa_info =
4742                        kzmalloc((ETH_MAX_AGGREGATION_QUEUES_E1H_E2) * (sizeof(struct bnx2x_agg_info)),
4743                                 MEM_WAIT);
4744                if (!(fp[i].tpa_info))
4745                        goto alloc_err;
4746        }
4747
4748        bp->fp = fp;
4749
4750        /* allocate sp objs */
4751        bp->sp_objs = kzmalloc((bp->fp_array_size) * (sizeof(struct bnx2x_sp_objs)),
4752                               MEM_WAIT);
4753        if (!bp->sp_objs)
4754                goto alloc_err;
4755
4756        /* allocate fp_stats */
4757        bp->fp_stats = kzmalloc((bp->fp_array_size) * (sizeof(struct bnx2x_fp_stats)),
4758                                MEM_WAIT);
4759        if (!bp->fp_stats)
4760                goto alloc_err;
4761
4762        /* Allocate memory for the transmission queues array */
4763        txq_array_size =
4764                BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4765        BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4766
4767        bp->bnx2x_txq = kzmalloc((txq_array_size) * (sizeof(struct bnx2x_fp_txdata)),
4768                                 MEM_WAIT);
4769        if (!bp->bnx2x_txq)
4770                goto alloc_err;
4771
4772        // AKAROS_PORT: we probably won't use this table */
4773        /* msix table */
4774        tbl = kzmalloc((msix_table_size) * (sizeof(*tbl)), MEM_WAIT);
4775        if (!tbl)
4776                goto alloc_err;
4777        bp->msix_table = tbl;
4778
4779        /* ilt */
4780        ilt = kzmalloc(sizeof(*ilt), MEM_WAIT);
4781        if (!ilt)
4782                goto alloc_err;
4783        bp->ilt = ilt;
4784
4785        return 0;
4786alloc_err:
4787        bnx2x_free_mem_bp(bp);
4788        return -ENOMEM;
4789}
4790
4791int bnx2x_reload_if_running(struct ether *dev)
4792{
4793panic("Not implemented");
4794#if 0 // AKAROS_PORT
4795        struct bnx2x *bp = netdev_priv(dev);
4796
4797        if (unlikely(!netif_running(dev)))
4798                return 0;
4799
4800        bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4801        return bnx2x_nic_load(bp, LOAD_NORMAL);
4802#endif
4803}
4804
4805int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4806{
4807        uint32_t sel_phy_idx = 0;
4808        if (bp->link_params.num_phys <= 1)
4809                return INT_PHY;
4810
4811        if (bp->link_vars.link_up) {
4812                sel_phy_idx = EXT_PHY1;
4813                /* In case link is SERDES, check if the EXT_PHY2 is the one */
4814                if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4815                    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4816                        sel_phy_idx = EXT_PHY2;
4817        } else {
4818
4819                switch (bnx2x_phy_selection(&bp->link_params)) {
4820                case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4821                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4822                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4823                       sel_phy_idx = EXT_PHY1;
4824                       break;
4825                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4826                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4827                       sel_phy_idx = EXT_PHY2;
4828                       break;
4829                }
4830        }
4831
4832        return sel_phy_idx;
4833}
4834
4835int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4836{
4837        uint32_t sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4838        /*
4839         * The selected activated PHY is always after swapping (in case PHY
4840         * swapping is enabled). So when swapping is enabled, we need to reverse
4841         * the configuration
4842         */
4843
4844        if (bp->link_params.multi_phy_config &
4845            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4846                if (sel_phy_idx == EXT_PHY1)
4847                        sel_phy_idx = EXT_PHY2;
4848                else if (sel_phy_idx == EXT_PHY2)
4849                        sel_phy_idx = EXT_PHY1;
4850        }
4851        return LINK_CONFIG_IDX(sel_phy_idx);
4852}
4853
4854#ifdef NETDEV_FCOE_WWNN
4855int bnx2x_fcoe_get_wwn(struct ether *dev, uint64_t *wwn, int type)
4856{
4857        struct bnx2x *bp = netdev_priv(dev);
4858        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4859
4860        switch (type) {
4861        case NETDEV_FCOE_WWNN:
4862                *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4863                                cp->fcoe_wwn_node_name_lo);
4864                break;
4865        case NETDEV_FCOE_WWPN:
4866                *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4867                                cp->fcoe_wwn_port_name_lo);
4868                break;
4869        default:
4870                BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4871                return -EINVAL;
4872        }
4873
4874        return 0;
4875}
4876#endif
4877
4878/* called with rtnl_lock */
4879int bnx2x_change_mtu(struct ether *dev, int new_mtu)
4880{
4881        struct bnx2x *bp = netdev_priv(dev);
4882
4883        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4884                BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4885                return -EAGAIN;
4886        }
4887
4888        if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4889            ((new_mtu + ETHERHDRSIZE) < ETH_MIN_PACKET_SIZE)) {
4890                BNX2X_ERR("Can't support requested MTU size\n");
4891                return -EINVAL;
4892        }
4893
4894        /* This does not race with packet allocation
4895         * because the actual alloc size is
4896         * only updated as part of load
4897         */
4898        dev->mtu = new_mtu;
4899
4900        return bnx2x_reload_if_running(dev);
4901}
4902
4903netdev_features_t bnx2x_fix_features(struct ether *dev,
4904                                     netdev_features_t features)
4905{
4906        struct bnx2x *bp = netdev_priv(dev);
4907
4908        /* TPA requires Rx CSUM offloading */
4909        if (!(features & NETIF_F_RXCSUM)) {
4910                features &= ~NETIF_F_LRO;
4911                features &= ~NETIF_F_GRO;
4912        }
4913
4914        /* Note: do not disable SW GRO in kernel when HW GRO is off */
4915        if (bp->disable_tpa)
4916                features &= ~NETIF_F_LRO;
4917
4918        return features;
4919}
4920
4921int bnx2x_set_features(struct ether *dev, netdev_features_t features)
4922{
4923        struct bnx2x *bp = netdev_priv(dev);
4924        uint32_t flags = bp->flags;
4925        uint32_t changes;
4926        bool bnx2x_reload = false;
4927
4928        if (features & NETIF_F_LRO)
4929                flags |= TPA_ENABLE_FLAG;
4930        else
4931                flags &= ~TPA_ENABLE_FLAG;
4932
4933        if (features & NETIF_F_GRO)
4934                flags |= GRO_ENABLE_FLAG;
4935        else
4936                flags &= ~GRO_ENABLE_FLAG;
4937
4938        if (features & NETIF_F_LOOPBACK) {
4939                if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4940                        bp->link_params.loopback_mode = LOOPBACK_BMAC;
4941                        bnx2x_reload = true;
4942                }
4943        } else {
4944                if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4945                        bp->link_params.loopback_mode = LOOPBACK_NONE;
4946                        bnx2x_reload = true;
4947                }
4948        }
4949
4950        changes = flags ^ bp->flags;
4951
4952        /* if GRO is changed while LRO is enabled, don't force a reload */
4953        if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4954                changes &= ~GRO_ENABLE_FLAG;
4955
4956        /* if GRO is changed while HW TPA is off, don't force a reload */
4957        if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
4958                changes &= ~GRO_ENABLE_FLAG;
4959
4960        if (changes)
4961                bnx2x_reload = true;
4962
4963        bp->flags = flags;
4964
4965        if (bnx2x_reload) {
4966                if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4967                        return bnx2x_reload_if_running(dev);
4968                /* else: bnx2x_nic_load() will be called at end of recovery */
4969        }
4970
4971        return 0;
4972}
4973
4974void bnx2x_tx_timeout(struct ether *dev)
4975{
4976        struct bnx2x *bp = netdev_priv(dev);
4977
4978#ifdef BNX2X_STOP_ON_ERROR
4979        if (!bp->panic)
4980                bnx2x_panic();
4981#endif
4982
4983        /* This allows the netif to be shutdown gracefully before resetting */
4984        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4985}
4986
4987int bnx2x_suspend(struct pci_device *pdev, pm_message_t state)
4988{
4989panic("Not implemented");
4990#if 0 // AKAROS_PORT
4991        struct ether *dev = pci_get_drvdata(pdev);
4992        struct bnx2x *bp;
4993
4994        if (!dev) {
4995                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4996                return -ENODEV;
4997        }
4998        bp = netdev_priv(dev);
4999
5000        rtnl_lock();
5001
5002        pci_save_state(pdev);
5003
5004        if (!netif_running(dev)) {
5005                rtnl_unlock();
5006                return 0;
5007        }
5008
5009        netif_device_detach(dev);
5010
5011        bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5012
5013        bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5014
5015        rtnl_unlock();
5016
5017        return 0;
5018#endif
5019}
5020
5021int bnx2x_resume(struct pci_device *pdev)
5022{
5023panic("Not implemented");
5024#if 0 // AKAROS_PORT
5025        struct ether *dev = pci_get_drvdata(pdev);
5026        struct bnx2x *bp;
5027        int rc;
5028
5029        if (!dev) {
5030                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5031                return -ENODEV;
5032        }
5033        bp = netdev_priv(dev);
5034
5035        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5036                BNX2X_ERR("Handling parity error recovery. Try again later\n");
5037                return -EAGAIN;
5038        }
5039
5040        rtnl_lock();
5041
5042        pci_restore_state(pdev);
5043
5044        if (!netif_running(dev)) {
5045                rtnl_unlock();
5046                return 0;
5047        }
5048
5049        bnx2x_set_power_state(bp, PCI_D0);
5050        netif_device_attach(dev);
5051
5052        rc = bnx2x_nic_load(bp, LOAD_OPEN);
5053
5054        rtnl_unlock();
5055
5056        return rc;
5057#endif
5058}
5059
5060void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5061                              uint32_t cid)
5062{
5063        if (!cxt) {
5064                BNX2X_ERR("bad context pointer %p\n", cxt);
5065                return;
5066        }
5067
5068        /* ustorm cxt validation */
5069        cxt->ustorm_ag_context.cdu_usage =
5070                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5071                        CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5072        /* xcontext validation */
5073        cxt->xstorm_ag_context.cdu_reserved =
5074                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5075                        CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5076}
5077
5078static void storm_memset_hc_timeout(struct bnx2x *bp, uint8_t port,
5079                                    uint8_t fw_sb_id, uint8_t sb_index,
5080                                    uint8_t ticks)
5081{
5082        uint32_t addr = BAR_CSTRORM_INTMEM +
5083                   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5084        REG_WR8(bp, addr, ticks);
5085        DP(NETIF_MSG_IFUP,
5086           "port %x fw_sb_id %d sb_index %d ticks %d\n",
5087           port, fw_sb_id, sb_index, ticks);
5088}
5089
5090static void storm_memset_hc_disable(struct bnx2x *bp, uint8_t port,
5091                                    uint16_t fw_sb_id, uint8_t sb_index,
5092                                    uint8_t disable)
5093{
5094        uint32_t enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5095        uint32_t addr = BAR_CSTRORM_INTMEM +
5096                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5097        uint8_t flags = REG_RD8(bp, addr);
5098        /* clear and set */
5099        flags &= ~HC_INDEX_DATA_HC_ENABLED;
5100        flags |= enable_flag;
5101        REG_WR8(bp, addr, flags);
5102        DP(NETIF_MSG_IFUP,
5103           "port %x fw_sb_id %d sb_index %d disable %d\n",
5104           port, fw_sb_id, sb_index, disable);
5105}
5106
5107void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, uint8_t fw_sb_id,
5108                                    uint8_t sb_index, uint8_t disable,
5109                                    uint16_t usec)
5110{
5111        int port = BP_PORT(bp);
5112        uint8_t ticks = usec / BNX2X_BTR;
5113
5114        storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5115
5116        disable = disable ? 1 : (usec ? 0 : 1);
5117        storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5118}
5119
5120void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5121                            uint32_t verbose)
5122{
5123        cmb();
5124        set_bit(flag, &bp->sp_rtnl_state);
5125        cmb();
5126        DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5127           flag);
5128        schedule_delayed_work(&bp->sp_rtnl_task, 0);
5129}
5130EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
5131