akaros/kern/drivers/net/bnx2x/bnx2x_cmn.c
<<
>>
Prefs
   1/* bnx2x_cmn.c: Broadcom Everest network driver.
   2 *
   3 * Copyright (c) 2007-2013 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  10 * Written by: Eliezer Tamir
  11 * Based on code from Michael Chan's bnx2 driver
  12 * UDP CSUM errata workaround by Arik Gendelman
  13 * Slowpath and fastpath rework by Vladislav Zolotarov
  14 * Statistics and Link management by Yitchak Gertner
  15 *
  16 */
  17
  18#include <linux_compat.h>
  19
  20#include "bnx2x_cmn.h"
  21#include "bnx2x_init.h"
  22#include "bnx2x_sp.h"
  23
  24static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
  25static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
  26static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
  27static void bnx2x_poll(uint32_t srcid, long a0, long a1, long a2);
  28
  29static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
  30{
  31        int i;
  32
  33        /* Add NAPI objects */
  34        for_each_rx_queue_cnic(bp, i) {
  35                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  36                               bnx2x_poll, NAPI_POLL_WEIGHT);
  37                napi_hash_add(&bnx2x_fp(bp, i, napi));
  38        }
  39}
  40
  41static void bnx2x_add_all_napi(struct bnx2x *bp)
  42{
  43        int i;
  44
  45        /* Add NAPI objects */
  46        for_each_eth_queue(bp, i) {
  47                netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
  48                               bnx2x_poll, NAPI_POLL_WEIGHT);
  49                napi_hash_add(&bnx2x_fp(bp, i, napi));
  50        }
  51}
  52
  53static int bnx2x_calc_num_queues(struct bnx2x *bp)
  54{
  55        /* default is min(8, num_cores) in Linux.  we'll set it elsewhere */
  56        int nq = bnx2x_num_queues ? : 8;
  57
  58        /* Reduce memory usage in kdump environment by using only one queue */
  59        if (is_kdump_kernel())
  60                nq = 1;
  61
  62        nq = CLAMP(nq, 1, BNX2X_MAX_QUEUES(bp));
  63        return nq;
  64}
  65
  66/**
  67 * bnx2x_move_fp - move content of the fastpath structure.
  68 *
  69 * @bp:         driver handle
  70 * @from:       source FP index
  71 * @to:         destination FP index
  72 *
  73 * Makes sure the contents of the bp->fp[to].napi is kept
  74 * intact. This is done by first copying the napi struct from
  75 * the target to the source, and then mem copying the entire
  76 * source onto the target. Update txdata pointers and related
  77 * content.
  78 */
  79static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
  80{
  81        struct bnx2x_fastpath *from_fp = &bp->fp[from];
  82        struct bnx2x_fastpath *to_fp = &bp->fp[to];
  83        struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
  84        struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
  85        struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
  86        struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
  87        int old_max_eth_txqs, new_max_eth_txqs;
  88        int old_txdata_index = 0, new_txdata_index = 0;
  89        struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
  90
  91        /* Copy the NAPI object as it has been already initialized */
  92        from_fp->napi = to_fp->napi;
  93
  94        /* Move bnx2x_fastpath contents */
  95        memcpy(to_fp, from_fp, sizeof(*to_fp));
  96        to_fp->index = to;
  97
  98        /* Retain the tpa_info of the original `to' version as we don't want
  99         * 2 FPs to contain the same tpa_info pointer.
 100         */
 101        to_fp->tpa_info = old_tpa_info;
 102
 103        /* move sp_objs contents as well, as their indices match fp ones */
 104        memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
 105
 106        /* move fp_stats contents as well, as their indices match fp ones */
 107        memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
 108
 109        /* Update txdata pointers in fp and move txdata content accordingly:
 110         * Each fp consumes 'max_cos' txdata structures, so the index should be
 111         * decremented by max_cos x delta.
 112         */
 113
 114        old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
 115        new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
 116                                (bp)->max_cos;
 117        if (from == FCOE_IDX(bp)) {
 118                old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 119                new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
 120        }
 121
 122        memcpy(&bp->bnx2x_txq[new_txdata_index],
 123               &bp->bnx2x_txq[old_txdata_index],
 124               sizeof(struct bnx2x_fp_txdata));
 125        to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
 126}
 127
 128/**
 129 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 130 *
 131 * @bp:        driver handle
 132 * @buf:       character buffer to fill with the fw name
 133 * @buf_len:   length of the above buffer
 134 *
 135 */
 136void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
 137{
 138        if (IS_PF(bp)) {
 139                uint8_t phy_fw_ver[PHY_FW_VER_LEN];
 140
 141                phy_fw_ver[0] = '\0';
 142                bnx2x_get_ext_phy_fw_version(&bp->link_params,
 143                                             phy_fw_ver, PHY_FW_VER_LEN);
 144                strlcpy(buf, bp->fw_ver, buf_len);
 145                snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
 146                         "bc %d.%d.%d%s%s",
 147                         (bp->common.bc_ver & 0xff0000) >> 16,
 148                         (bp->common.bc_ver & 0xff00) >> 8,
 149                         (bp->common.bc_ver & 0xff),
 150                         ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
 151        } else {
 152                bnx2x_vf_fill_fw_str(bp, buf, buf_len);
 153        }
 154}
 155
 156/**
 157 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 158 *
 159 * @bp: driver handle
 160 * @delta:      number of eth queues which were not allocated
 161 */
 162static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
 163{
 164        int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
 165
 166        /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
 167         * backward along the array could cause memory to be overridden
 168         */
 169        for (cos = 1; cos < bp->max_cos; cos++) {
 170                for (i = 0; i < old_eth_num - delta; i++) {
 171                        struct bnx2x_fastpath *fp = &bp->fp[i];
 172                        int new_idx = cos * (old_eth_num - delta) + i;
 173
 174                        memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
 175                               sizeof(struct bnx2x_fp_txdata));
 176                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
 177                }
 178        }
 179}
 180
 181int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 182
 183/* free skb in the packet ring at pos idx
 184 * return idx of last bd freed
 185 */
 186static uint16_t bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 187                             uint16_t idx, unsigned int *pkts_compl,
 188                             unsigned int *bytes_compl)
 189{
 190        struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 191        struct eth_tx_start_bd *tx_start_bd;
 192        struct eth_tx_bd *tx_data_bd;
 193        struct block *block = tx_buf->block;
 194        uint16_t bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 195        int nbd;
 196        uint16_t split_bd_len = 0;
 197
 198        /* prefetch skb end pointer to speedup dev_kfree_skb() */
 199        //prefetch(&skb->end); // AKAROS_PORT
 200
 201        DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->block %p\n",
 202           txdata->txq_index, idx, tx_buf, block);
 203
 204        tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 205
 206        nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 207#ifdef BNX2X_STOP_ON_ERROR
 208        if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 209                BNX2X_ERR("BAD nbd!\n");
 210                bnx2x_panic();
 211        }
 212#endif
 213        new_cons = nbd + tx_buf->first_bd;
 214
 215        /* Get the next bd */
 216        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 217
 218        /* Skip a parse bd... */
 219        --nbd;
 220        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 221
 222        if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
 223                /* Skip second parse bd... */
 224                --nbd;
 225                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 226        }
 227
 228        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
 229        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 230                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 231                split_bd_len = BD_UNMAP_LEN(tx_data_bd);
 232                --nbd;
 233                bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 234        }
 235
 236        /* unmap first bd */
 237        dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 238                         BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
 239                         DMA_TO_DEVICE);
 240
 241        /* now free frags */
 242        while (nbd > 0) {
 243
 244                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 245                dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 246                               BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 247                if (--nbd)
 248                        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 249        }
 250
 251        /* release block */
 252        warn_on(!block);
 253        if (likely(block)) {
 254                (*pkts_compl)++;
 255                (*bytes_compl) += BLEN(block);
 256        }
 257
 258        freeb(block);
 259        tx_buf->first_bd = 0;
 260        tx_buf->block = NULL;
 261
 262        return new_cons;
 263}
 264
 265int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 266{
 267        uint16_t hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 268        unsigned int pkts_compl = 0, bytes_compl = 0;
 269
 270#ifdef BNX2X_STOP_ON_ERROR
 271        if (unlikely(bp->panic))
 272                return -1;
 273#endif
 274
 275        hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 276        sw_cons = txdata->tx_pkt_cons;
 277
 278        while (sw_cons != hw_cons) {
 279                uint16_t pkt_cons;
 280
 281                pkt_cons = TX_BD(sw_cons);
 282
 283                DP(NETIF_MSG_TX_DONE,
 284                   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
 285                   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 286
 287                bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
 288                                            &pkts_compl, &bytes_compl);
 289
 290                sw_cons++;
 291        }
 292
 293        txdata->tx_pkt_cons = sw_cons;
 294        txdata->tx_bd_cons = bd_cons;
 295
 296        poke(&txdata->poker, txdata);
 297        return 0;
 298}
 299
 300static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 301                                             uint16_t idx)
 302{
 303        uint16_t last_max = fp->last_max_sge;
 304
 305        if (SUB_S16(idx, last_max) > 0)
 306                fp->last_max_sge = idx;
 307}
 308
 309static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 310                                         uint16_t sge_len,
 311                                         struct eth_end_agg_rx_cqe *cqe)
 312{
 313        struct bnx2x *bp = fp->bp;
 314        uint16_t last_max, last_elem, first_elem;
 315        uint16_t delta = 0;
 316        uint16_t i;
 317
 318        if (!sge_len)
 319                return;
 320
 321        /* First mark all used pages */
 322        for (i = 0; i < sge_len; i++)
 323                BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 324                        RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
 325
 326        DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 327           sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 328
 329        /* Here we assume that the last SGE index is the biggest */
 330        prefetch((void *)(fp->sge_mask));
 331        bnx2x_update_last_max_sge(fp,
 332                le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 333
 334        last_max = RX_SGE(fp->last_max_sge);
 335        last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 336        first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 337
 338        /* If ring is not full */
 339        if (last_elem + 1 != first_elem)
 340                last_elem++;
 341
 342        /* Now update the prod */
 343        for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 344                if (likely(fp->sge_mask[i]))
 345                        break;
 346
 347                fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 348                delta += BIT_VEC64_ELEM_SZ;
 349        }
 350
 351        if (delta > 0) {
 352                fp->rx_sge_prod += delta;
 353                /* clear page-end entries */
 354                bnx2x_clear_sge_mask_next_elems(fp);
 355        }
 356
 357        DP(NETIF_MSG_RX_STATUS,
 358           "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 359           fp->last_max_sge, fp->rx_sge_prod);
 360}
 361
 362/* Get Toeplitz hash value in the skb using the value from the
 363 * CQE (calculated by HW).
 364 */
 365static uint32_t bnx2x_get_rxhash(const struct bnx2x *bp,
 366                            const struct eth_fast_path_rx_cqe *cqe,
 367                            enum pkt_hash_types *rxhash_type)
 368{
 369panic("Not implemented");
 370#if 0 // AKAROS_PORT
 371        /* Get Toeplitz hash from CQE */
 372        if ((bp->dev->feat & NETIF_F_RXHASH) &&
 373            (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
 374                enum eth_rss_hash_type htype;
 375
 376                htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
 377                *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
 378                                (htype == TCP_IPV6_HASH_TYPE)) ?
 379                               PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
 380
 381                return le32_to_cpu(cqe->rss_hash_result);
 382        }
 383        *rxhash_type = PKT_HASH_TYPE_NONE;
 384        return 0;
 385#endif
 386}
 387
 388static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, uint16_t queue,
 389                            uint16_t cons, uint16_t prod,
 390                            struct eth_fast_path_rx_cqe *cqe)
 391{
 392panic("Not implemented");
 393#if 0 // AKAROS_PORT
 394        struct bnx2x *bp = fp->bp;
 395        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 396        struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 397        struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 398        dma_addr_t mapping;
 399        struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 400        struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 401
 402        /* print error if current state != stop */
 403        if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 404                BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 405
 406        /* Try to map an empty data buffer from the aggregation info  */
 407        mapping = dma_map_single(&bp->pdev->dev,
 408                                 first_buf->data + NET_SKB_PAD,
 409                                 fp->rx_buf_size, DMA_FROM_DEVICE);
 410        /*
 411         *  ...if it fails - move the skb from the consumer to the producer
 412         *  and set the current aggregation state as ERROR to drop it
 413         *  when TPA_STOP arrives.
 414         */
 415
 416        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 417                /* Move the BD from the consumer to the producer */
 418                bnx2x_reuse_rx_data(fp, cons, prod);
 419                tpa_info->tpa_state = BNX2X_TPA_ERROR;
 420                return;
 421        }
 422
 423        /* move empty data from pool to prod */
 424        prod_rx_buf->data = first_buf->data;
 425        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 426        /* point prod_bd to new data */
 427        prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 428        prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 429
 430        /* move partial skb from cons to pool (don't unmap yet) */
 431        *first_buf = *cons_rx_buf;
 432
 433        /* mark bin state as START */
 434        tpa_info->parsing_flags =
 435                le16_to_cpu(cqe->pars_flags.flags);
 436        tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 437        tpa_info->tpa_state = BNX2X_TPA_START;
 438        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 439        tpa_info->placement_offset = cqe->placement_offset;
 440        tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
 441        if (fp->mode == TPA_MODE_GRO) {
 442                uint16_t gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 443                tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
 444                tpa_info->gro_size = gro_size;
 445        }
 446
 447#ifdef BNX2X_STOP_ON_ERROR
 448        fp->tpa_queue_used |= (1 << queue);
 449        DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 450           fp->tpa_queue_used);
 451#endif
 452#endif
 453}
 454
 455/* Timestamp option length allowed for TPA aggregation:
 456 *
 457 *              nop nop kind length echo val
 458 */
 459#define TPA_TSTAMP_OPT_LEN      12
 460/**
 461 * bnx2x_set_gro_params - compute GRO values
 462 *
 463 * @skb:                packet skb
 464 * @parsing_flags:      parsing flags from the START CQE
 465 * @len_on_bd:          total length of the first packet for the
 466 *                      aggregation.
 467 * @pkt_len:            length of all segments
 468 *
 469 * Approximate value of the MSS for this aggregation calculated using
 470 * the first packet of it.
 471 * Compute number of aggregated segments, and gso_type.
 472 */
 473static void bnx2x_set_gro_params(struct sk_buff *skb, uint16_t parsing_flags,
 474                                 uint16_t len_on_bd, unsigned int pkt_len,
 475                                 uint16_t num_of_coalesced_segs)
 476{
 477panic("Not implemented");
 478#if 0 // AKAROS_PORT
 479        /* TPA aggregation won't have either IP options or TCP options
 480         * other than timestamp or IPv6 extension headers.
 481         */
 482        uint16_t hdrs_len = ETHERHDRSIZE + sizeof(struct tcphdr);
 483
 484        if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 485            PRS_FLAG_OVERETH_IPV6) {
 486                hdrs_len += sizeof(struct ipv6hdr);
 487                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 488        } else {
 489                hdrs_len += sizeof(struct iphdr);
 490                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 491        }
 492
 493        /* Check if there was a TCP timestamp, if there is it's will
 494         * always be 12 bytes length: nop nop kind length echo val.
 495         *
 496         * Otherwise FW would close the aggregation.
 497         */
 498        if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 499                hdrs_len += TPA_TSTAMP_OPT_LEN;
 500
 501        skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
 502
 503        /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
 504         * to skb_shinfo(skb)->gso_segs
 505         */
 506        NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
 507#endif
 508}
 509
 510static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 511                              uint16_t index, gfp_t gfp_mask)
 512{
 513        /* AKAROS_PORT: our get_cont_pages returns KVAs, not struct page * */
 514        struct page *page = kva2page(get_cont_pages(PAGES_PER_SGE_SHIFT, gfp_mask));
 515        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
 516        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
 517        dma_addr_t mapping;
 518
 519        if (unlikely(page == NULL)) {
 520                BNX2X_ERR("Can't alloc sge\n");
 521                return -ENOMEM;
 522        }
 523
 524        mapping = dma_map_page(&bp->pdev->dev, page, 0,
 525                               SGE_PAGES, DMA_FROM_DEVICE);
 526        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 527                free_cont_pages(page2kva(page), PAGES_PER_SGE_SHIFT);
 528                BNX2X_ERR("Can't map sge\n");
 529                return -ENOMEM;
 530        }
 531
 532        sw_buf->page = page;
 533        dma_unmap_addr_set(sw_buf, mapping, mapping);
 534
 535        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
 536        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 537
 538        return 0;
 539}
 540
 541static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 542                               struct bnx2x_agg_info *tpa_info,
 543                               uint16_t pages,
 544                               struct sk_buff *skb,
 545                               struct eth_end_agg_rx_cqe *cqe,
 546                               uint16_t cqe_idx)
 547{
 548panic("Not implemented");
 549#if 0 // AKAROS_PORT
 550        struct sw_rx_page *rx_pg, old_rx_pg;
 551        uint32_t i, frag_len, frag_size;
 552        int err, j, frag_id = 0;
 553        uint16_t len_on_bd = tpa_info->len_on_bd;
 554        uint16_t full_page = 0, gro_size = 0;
 555
 556        frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 557
 558        if (fp->mode == TPA_MODE_GRO) {
 559                gro_size = tpa_info->gro_size;
 560                full_page = tpa_info->full_page;
 561        }
 562
 563        /* This is needed in order to enable forwarding support */
 564        if (frag_size)
 565                bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
 566                                     le16_to_cpu(cqe->pkt_len),
 567                                     le16_to_cpu(cqe->num_of_coalesced_segs));
 568
 569#ifdef BNX2X_STOP_ON_ERROR
 570        if (pages > MIN_T(uint32_t, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
 571                BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 572                          pages, cqe_idx);
 573                BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 574                bnx2x_panic();
 575                return -EINVAL;
 576        }
 577#endif
 578
 579        /* Run through the SGL and compose the fragmented skb */
 580        for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 581                uint16_t sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 582
 583                /* FW gives the indices of the SGE as if the ring is an array
 584                   (meaning that "next" element will consume 2 indices) */
 585                if (fp->mode == TPA_MODE_GRO)
 586                        frag_len = MIN_T(uint32_t, frag_size,
 587                                         (uint32_t)full_page);
 588                else /* LRO */
 589                        frag_len = MIN_T(uint32_t, frag_size,
 590                                         (uint32_t)SGE_PAGES);
 591
 592                rx_pg = &fp->rx_page_ring[sge_idx];
 593                old_rx_pg = *rx_pg;
 594
 595                /* If we fail to allocate a substitute page, we simply stop
 596                   where we are and drop the whole packet */
 597                err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, 0);
 598                if (unlikely(err)) {
 599                        bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
 600                        return err;
 601                }
 602
 603                /* Unmap the page as we're going to pass it to the stack */
 604                dma_unmap_page(&bp->pdev->dev,
 605                               dma_unmap_addr(&old_rx_pg, mapping),
 606                               SGE_PAGES, DMA_FROM_DEVICE);
 607                /* Add one frag and update the appropriate fields in the skb */
 608                if (fp->mode == TPA_MODE_LRO)
 609                        skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
 610                else { /* GRO */
 611                        int rem;
 612                        int offset = 0;
 613                        for (rem = frag_len; rem > 0; rem -= gro_size) {
 614                                int len = rem > gro_size ? gro_size : rem;
 615                                skb_fill_page_desc(skb, frag_id++,
 616                                                   old_rx_pg.page, offset, len);
 617                                /* TODO: if this is pinning for I/O, we need to change to a
 618                                 * device-ownership / mmap model. */
 619                                if (offset)
 620                                        page_incref(old_rx_pg.page);
 621                                offset += len;
 622                        }
 623                }
 624
 625                skb->data_len += frag_len;
 626                skb->truesize += SGE_PAGES;
 627                skb->len += frag_len;
 628
 629                frag_size -= frag_len;
 630        }
 631
 632        return 0;
 633#endif
 634}
 635
 636static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 637{
 638        if (fp->rx_frag_size)
 639                page_decref(kva2page(data));
 640        else
 641                kfree(data);
 642}
 643
 644static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
 645{
 646        if (fp->rx_frag_size) {
 647                /* GFP_KERNEL allocations are used only during initialization */
 648                if (unlikely(gfp_mask & MEM_WAIT))
 649                        return (void *)kpage_alloc_addr();
 650
 651#if 0 // AKAROS_PORT
 652                return netdev_alloc_frag(fp->rx_frag_size);
 653#else
 654                return (void *)kpage_alloc_addr();
 655#endif
 656        }
 657
 658        return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
 659}
 660
 661#ifdef CONFIG_INET
 662static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
 663{
 664        const struct iphdr *iph = ip_hdr(skb);
 665        struct tcphdr *th;
 666
 667        skb_set_transport_header(skb, sizeof(struct iphdr));
 668        th = tcp_hdr(skb);
 669
 670        th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
 671                                  iph->saddr, iph->daddr, 0);
 672}
 673
 674static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
 675{
 676        struct ipv6hdr *iph = ipv6_hdr(skb);
 677        struct tcphdr *th;
 678
 679        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 680        th = tcp_hdr(skb);
 681
 682        th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
 683                                  &iph->saddr, &iph->daddr, 0);
 684}
 685
 686static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
 687                            void (*gro_func)(struct bnx2x*, struct sk_buff*))
 688{
 689        skb_set_network_header(skb, 0);
 690        gro_func(bp, skb);
 691        tcp_gro_complete(skb);
 692}
 693#endif
 694
 695static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 696                               struct sk_buff *skb)
 697{
 698panic("Not implemented");
 699#if 0 // AKAROS_PORT
 700#ifdef CONFIG_INET
 701        if (skb_shinfo(skb)->gso_size) {
 702                switch (be16_to_cpu(skb->protocol)) {
 703                case ETH_P_IP:
 704                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
 705                        break;
 706                case ETH_P_IPV6:
 707                        bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
 708                        break;
 709                default:
 710                        BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
 711                                  be16_to_cpu(skb->protocol));
 712                }
 713        }
 714#endif
 715        skb_record_rx_queue(skb, fp->rx_queue);
 716        napi_gro_receive(&fp->napi, skb);
 717#endif
 718}
 719
 720static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 721                           struct bnx2x_agg_info *tpa_info,
 722                           uint16_t pages,
 723                           struct eth_end_agg_rx_cqe *cqe,
 724                           uint16_t cqe_idx)
 725{
 726panic("Not implemented");
 727#if 0 // AKAROS_PORT
 728        struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 729        uint8_t pad = tpa_info->placement_offset;
 730        uint16_t len = tpa_info->len_on_bd;
 731        struct sk_buff *skb = NULL;
 732        uint8_t *new_data, *data = rx_buf->data;
 733        uint8_t old_tpa_state = tpa_info->tpa_state;
 734
 735        tpa_info->tpa_state = BNX2X_TPA_STOP;
 736
 737        /* If we there was an error during the handling of the TPA_START -
 738         * drop this aggregation.
 739         */
 740        if (old_tpa_state == BNX2X_TPA_ERROR)
 741                goto drop;
 742
 743        /* Try to allocate the new data */
 744        new_data = bnx2x_frag_alloc(fp, 0);
 745        /* Unmap skb in the pool anyway, as we are going to change
 746           pool entry status to BNX2X_TPA_STOP even if new skb allocation
 747           fails. */
 748        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 749                         fp->rx_buf_size, DMA_FROM_DEVICE);
 750        if (likely(new_data))
 751                skb = build_skb(data, fp->rx_frag_size);
 752
 753        if (likely(skb)) {
 754#ifdef BNX2X_STOP_ON_ERROR
 755                if (pad + len > fp->rx_buf_size) {
 756                        BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
 757                                  pad, len, fp->rx_buf_size);
 758                        bnx2x_panic();
 759                        return;
 760                }
 761#endif
 762
 763                skb_reserve(skb, pad + NET_SKB_PAD);
 764                skb_put(skb, len);
 765                skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
 766
 767                skb->protocol = eth_type_trans(skb, bp->dev);
 768                skb->ip_summed = CHECKSUM_UNNECESSARY;
 769
 770                if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
 771                                         skb, cqe, cqe_idx)) {
 772                        if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 773                                __vlan_hwaccel_put_tag(skb,
 774                                                       cpu_to_be16(ETH_P_8021Q),
 775                                                       tpa_info->vlan_tag);
 776                        bnx2x_gro_receive(bp, fp, skb);
 777                } else {
 778                        DP(NETIF_MSG_RX_STATUS,
 779                           "Failed to allocate new pages - dropping packet!\n");
 780                        dev_kfree_skb_any(skb);
 781                }
 782
 783                /* put new data in bin */
 784                rx_buf->data = new_data;
 785
 786                return;
 787        }
 788        if (new_data)
 789                bnx2x_frag_free(fp, new_data);
 790drop:
 791        /* drop the packet and keep the buffer in the bin */
 792        DP(NETIF_MSG_RX_STATUS,
 793           "Failed to allocate or map a new skb - dropping packet!\n");
 794        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
 795#endif
 796}
 797
 798static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 799                               uint16_t index, gfp_t gfp_mask)
 800{
 801        uint8_t *data;
 802        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
 803        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 804        dma_addr_t mapping;
 805
 806        data = bnx2x_frag_alloc(fp, gfp_mask);
 807        if (unlikely(data == NULL))
 808                return -ENOMEM;
 809
 810        mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
 811                                 fp->rx_buf_size,
 812                                 DMA_FROM_DEVICE);
 813        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 814                bnx2x_frag_free(fp, data);
 815                BNX2X_ERR("Can't map rx data\n");
 816                return -ENOMEM;
 817        }
 818
 819        rx_buf->data = data;
 820        dma_unmap_addr_set(rx_buf, mapping, mapping);
 821
 822        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 823        rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 824
 825        return 0;
 826}
 827
 828static
 829void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
 830                                 struct bnx2x_fastpath *fp,
 831                                 struct bnx2x_eth_q_stats *qstats)
 832{
 833panic("Not implemented");
 834#if 0 // AKAROS_PORT
 835        /* Do nothing if no L4 csum validation was done.
 836         * We do not check whether IP csum was validated. For IPv4 we assume
 837         * that if the card got as far as validating the L4 csum, it also
 838         * validated the IP csum. IPv6 has no IP csum.
 839         */
 840        if (cqe->fast_path_cqe.status_flags &
 841            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
 842                return;
 843
 844        /* If L4 validation was done, check if an error was found. */
 845
 846        if (cqe->fast_path_cqe.type_error_flags &
 847            (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
 848             ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
 849                qstats->hw_csum_err++;
 850        else
 851                skb->ip_summed = CHECKSUM_UNNECESSARY;
 852#endif
 853}
 854
 855static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 856{
 857        struct bnx2x *bp = fp->bp;
 858        uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 859        uint16_t sw_comp_cons, sw_comp_prod;
 860        int rx_pkt = 0;
 861        union eth_rx_cqe *cqe;
 862        struct eth_fast_path_rx_cqe *cqe_fp;
 863
 864        struct block *block;
 865
 866#ifdef BNX2X_STOP_ON_ERROR
 867        if (unlikely(bp->panic))
 868                return 0;
 869#endif
 870        if (budget <= 0)
 871                return rx_pkt;
 872
 873        bd_cons = fp->rx_bd_cons;
 874        bd_prod = fp->rx_bd_prod;
 875        bd_prod_fw = bd_prod;
 876        sw_comp_cons = fp->rx_comp_cons;
 877        sw_comp_prod = fp->rx_comp_prod;
 878
 879        comp_ring_cons = RCQ_BD(sw_comp_cons);
 880        cqe = &fp->rx_comp_ring[comp_ring_cons];
 881        cqe_fp = &cqe->fast_path_cqe;
 882
 883        DP(NETIF_MSG_RX_STATUS,
 884           "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
 885
 886        while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
 887                struct sw_rx_bd *rx_buf = NULL;
 888                uint8_t cqe_fp_flags;
 889                enum eth_rx_cqe_type cqe_fp_type;
 890                uint16_t len, pad, queue;
 891                uint8_t *data;
 892                uint32_t rxhash;
 893
 894#ifdef BNX2X_STOP_ON_ERROR
 895                if (unlikely(bp->panic))
 896                        return 0;
 897#endif
 898
 899                bd_prod = RX_BD(bd_prod);
 900                bd_cons = RX_BD(bd_cons);
 901
 902                /* A rmb() is required to ensure that the CQE is not read
 903                 * before it is written by the adapter DMA.  PCI ordering
 904                 * rules will make sure the other fields are written before
 905                 * the marker at the end of struct eth_fast_path_rx_cqe
 906                 * but without rmb() a weakly ordered processor can process
 907                 * stale data.  Without the barrier TPA state-machine might
 908                 * enter inconsistent state and kernel stack might be
 909                 * provided with incorrect packet description - these lead
 910                 * to various kernel crashed.
 911                 */
 912                rmb();
 913
 914                cqe_fp_flags = cqe_fp->type_error_flags;
 915                cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 916
 917                DP(NETIF_MSG_RX_STATUS,
 918                   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
 919                   CQE_TYPE(cqe_fp_flags),
 920                   cqe_fp_flags, cqe_fp->status_flags,
 921                   le32_to_cpu(cqe_fp->rss_hash_result),
 922                   le16_to_cpu(cqe_fp->vlan_tag),
 923                   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
 924
 925                /* is this a slowpath msg? */
 926                if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 927                        bnx2x_sp_event(fp, cqe);
 928                        goto next_cqe;
 929                }
 930
 931                rx_buf = &fp->rx_buf_ring[bd_cons];
 932                data = rx_buf->data;
 933
 934                if (!CQE_TYPE_FAST(cqe_fp_type)) {
 935                        struct bnx2x_agg_info *tpa_info;
 936                        uint16_t frag_size, pages;
 937#ifdef BNX2X_STOP_ON_ERROR
 938                        /* sanity check */
 939                        if (fp->disable_tpa &&
 940                            (CQE_TYPE_START(cqe_fp_type) ||
 941                             CQE_TYPE_STOP(cqe_fp_type)))
 942                                BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
 943                                          CQE_TYPE(cqe_fp_type));
 944#endif
 945
 946                        if (CQE_TYPE_START(cqe_fp_type)) {
 947                                uint16_t queue = cqe_fp->queue_index;
 948                                DP(NETIF_MSG_RX_STATUS,
 949                                   "calling tpa_start on queue %d\n",
 950                                   queue);
 951
 952                                bnx2x_tpa_start(fp, queue,
 953                                                bd_cons, bd_prod,
 954                                                cqe_fp);
 955
 956                                goto next_rx;
 957                        }
 958                        queue = cqe->end_agg_cqe.queue_index;
 959                        tpa_info = &fp->tpa_info[queue];
 960                        DP(NETIF_MSG_RX_STATUS,
 961                           "calling tpa_stop on queue %d\n",
 962                           queue);
 963
 964                        frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
 965                                    tpa_info->len_on_bd;
 966
 967                        if (fp->mode == TPA_MODE_GRO)
 968                                pages = (frag_size + tpa_info->full_page - 1) /
 969                                         tpa_info->full_page;
 970                        else
 971                                pages = SGE_PAGE_ALIGN(frag_size) >>
 972                                        SGE_PAGE_SHIFT;
 973
 974                        bnx2x_tpa_stop(bp, fp, tpa_info, pages,
 975                                       &cqe->end_agg_cqe, comp_ring_cons);
 976#ifdef BNX2X_STOP_ON_ERROR
 977                        if (bp->panic)
 978                                return 0;
 979#endif
 980
 981                        bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
 982                        goto next_cqe;
 983                }
 984                /* non TPA */
 985                len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
 986                pad = cqe_fp->placement_offset;
 987                dma_sync_single_for_cpu(&bp->pdev->dev,
 988                                        dma_unmap_addr(rx_buf, mapping),
 989                                        pad + RX_COPY_THRESH,
 990                                        DMA_FROM_DEVICE);
 991                pad += NET_SKB_PAD;
 992                prefetch(data + pad); /* speedup eth_type_trans() */
 993                /* is this an error packet? */
 994                if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
 995                        DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
 996                           "ERROR  flags %x  rx packet %u\n",
 997                           cqe_fp_flags, sw_comp_cons);
 998                        bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
 999                        goto reuse_rx;
1000                }
1001
1002                /* Since we don't have a jumbo ring
1003                 * copy small packets if mtu > 1500
1004                 */
1005                /* TODO: AKAROS_PORT always copy out the packet for now. */
1006                if (1) {
1007//              if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1008//                  (len <= RX_COPY_THRESH)) {
1009                        block = block_alloc(len, MEM_ATOMIC);
1010                        if (block == NULL) {
1011                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1012                                   "ERROR  packet dropped because of alloc failure\n");
1013                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1014                                goto reuse_rx;
1015                        }
1016                        memcpy(block->wp, data + pad, len);
1017                        block->wp += len;
1018                        bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1019                } else {
1020                        if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1021                                                       0) == 0)) {
1022                                dma_unmap_single(&bp->pdev->dev,
1023                                                 dma_unmap_addr(rx_buf,
1024                                                                mapping),
1025                                                 fp->rx_buf_size,
1026                                                 DMA_FROM_DEVICE);
1027                                /* TODO: block extra data here */
1028                                panic("Extra-data not implemented");
1029                                #if 0 // AKAROS_PORT
1030                                skb = build_skb(data, fp->rx_frag_size);
1031                                if (unlikely(!skb)) {
1032                                        bnx2x_frag_free(fp, data);
1033                                        bnx2x_fp_qstats(bp, fp)->
1034                                                        rx_skb_alloc_failed++;
1035                                        goto next_rx;
1036                                }
1037                                skb_reserve(skb, pad);
1038                                #endif
1039                        } else {
1040                                DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1041                                   "ERROR  packet dropped because of alloc failure\n");
1042                                bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1043reuse_rx:
1044                                bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1045                                goto next_rx;
1046                        }
1047                }
1048
1049                // AKAROS_PORT TODO: set hash and checksum stuff
1050#if 0
1051                skb_put(skb, len);
1052                skb->protocol = eth_type_trans(skb, bp->dev);
1053
1054                /* Set Toeplitz hash for a none-LRO skb */
1055                rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1056                skb_set_hash(skb, rxhash, rxhash_type);
1057
1058                skb_checksum_none_assert(skb);
1059
1060                if (bp->dev->feat & NETIF_F_RXCSUM)
1061                        bnx2x_csum_validate(skb, cqe, fp,
1062                                            bnx2x_fp_qstats(bp, fp));
1063
1064                skb_record_rx_queue(skb, fp->rx_queue);
1065
1066                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1067                    PARSING_FLAGS_VLAN)
1068                        __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1069                                               le16_to_cpu(cqe_fp->vlan_tag));
1070
1071                skb_mark_napi_id(skb, &fp->napi);
1072
1073                if (bnx2x_fp_ll_polling(fp))
1074                        netif_receive_skb(skb);
1075                else
1076                        napi_gro_receive(&fp->napi, skb);
1077#endif
1078                etheriq(bp->edev, block, TRUE);
1079next_rx:
1080                rx_buf->data = NULL;
1081
1082                bd_cons = NEXT_RX_IDX(bd_cons);
1083                bd_prod = NEXT_RX_IDX(bd_prod);
1084                bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1085                rx_pkt++;
1086next_cqe:
1087                sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1088                sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1089
1090                /* mark CQE as free */
1091                BNX2X_SEED_CQE(cqe_fp);
1092
1093                if (rx_pkt == budget)
1094                        break;
1095
1096                comp_ring_cons = RCQ_BD(sw_comp_cons);
1097                cqe = &fp->rx_comp_ring[comp_ring_cons];
1098                cqe_fp = &cqe->fast_path_cqe;
1099        } /* while */
1100
1101        fp->rx_bd_cons = bd_cons;
1102        fp->rx_bd_prod = bd_prod_fw;
1103        fp->rx_comp_cons = sw_comp_cons;
1104        fp->rx_comp_prod = sw_comp_prod;
1105
1106        /* Update producers */
1107        bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1108                             fp->rx_sge_prod);
1109
1110        fp->rx_pkt += rx_pkt;
1111        fp->rx_calls++;
1112
1113        return rx_pkt;
1114}
1115
1116static void bnx2x_msix_fp_int(struct hw_trapframe *hw_tf, void *fp_cookie)
1117{
1118        struct bnx2x_fastpath *fp = fp_cookie;
1119        struct bnx2x *bp = fp->bp;
1120        uint8_t cos;
1121
1122        DP(NETIF_MSG_INTR,
1123           "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1124           fp->index, fp->fw_sb_id, fp->igu_sb_id);
1125
1126        bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1127
1128#ifdef BNX2X_STOP_ON_ERROR
1129        if (unlikely(bp->panic))
1130                return;
1131#endif
1132
1133        /* Handle Rx and Tx according to MSI-X vector */
1134        for_each_cos_in_tx_queue(fp, cos)
1135                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1136
1137        prefetch(&fp->sb_running_index[SM_RX_ID]);
1138        // AKAROS_PORT
1139        send_kernel_message(core_id(), bnx2x_poll, (long)fp, 0, 0,
1140                            KMSG_ROUTINE);
1141        napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1142
1143        return;
1144}
1145
1146/* HW Lock for shared dual port PHYs */
1147void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1148{
1149        qlock(&bp->port.phy_mutex);
1150
1151        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1152}
1153
1154void bnx2x_release_phy_lock(struct bnx2x *bp)
1155{
1156        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1157
1158        qunlock(&bp->port.phy_mutex);
1159}
1160
1161/* calculates MF speed according to current linespeed and MF configuration */
1162uint16_t bnx2x_get_mf_speed(struct bnx2x *bp)
1163{
1164        uint16_t line_speed = bp->link_vars.line_speed;
1165        if (IS_MF(bp)) {
1166                uint16_t maxCfg = bnx2x_extract_max_cfg(bp,
1167                                                   bp->mf_config[BP_VN(bp)]);
1168
1169                /* Calculate the current MAX line speed limit for the MF
1170                 * devices
1171                 */
1172                if (IS_MF_SI(bp))
1173                        line_speed = (line_speed * maxCfg) / 100;
1174                else { /* SD mode */
1175                        uint16_t vn_max_rate = maxCfg * 100;
1176
1177                        if (vn_max_rate < line_speed)
1178                                line_speed = vn_max_rate;
1179                }
1180        }
1181
1182        return line_speed;
1183}
1184
1185/**
1186 * bnx2x_fill_report_data - fill link report data to report
1187 *
1188 * @bp:         driver handle
1189 * @data:       link state to update
1190 *
1191 * It uses a none-atomic bit operations because is called under the mutex.
1192 */
1193static void bnx2x_fill_report_data(struct bnx2x *bp,
1194                                   struct bnx2x_link_report_data *data)
1195{
1196        memset(data, 0, sizeof(*data));
1197
1198        if (IS_PF(bp)) {
1199                /* Fill the report data: effective line speed */
1200                data->line_speed = bnx2x_get_mf_speed(bp);
1201
1202                /* Link is down */
1203                if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1204                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1205                                  &data->link_report_flags);
1206
1207                if (!BNX2X_NUM_ETH_QUEUES(bp))
1208                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209                                  &data->link_report_flags);
1210
1211                /* Full DUPLEX */
1212                if (bp->link_vars.duplex == DUPLEX_FULL)
1213                        __set_bit(BNX2X_LINK_REPORT_FD,
1214                                  &data->link_report_flags);
1215
1216                /* Rx Flow Control is ON */
1217                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1218                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1219                                  &data->link_report_flags);
1220
1221                /* Tx Flow Control is ON */
1222                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1223                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1224                                  &data->link_report_flags);
1225        } else { /* VF */
1226                *data = bp->vf_link_vars;
1227        }
1228}
1229
1230/**
1231 * bnx2x_link_report - report link status to OS.
1232 *
1233 * @bp:         driver handle
1234 *
1235 * Calls the __bnx2x_link_report() under the same locking scheme
1236 * as a link/PHY state managing code to ensure a consistent link
1237 * reporting.
1238 */
1239
1240void bnx2x_link_report(struct bnx2x *bp)
1241{
1242        bnx2x_acquire_phy_lock(bp);
1243        __bnx2x_link_report(bp);
1244        bnx2x_release_phy_lock(bp);
1245}
1246
1247/**
1248 * __bnx2x_link_report - report link status to OS.
1249 *
1250 * @bp:         driver handle
1251 *
1252 * None atomic implementation.
1253 * Should be called under the phy_lock.
1254 */
1255void __bnx2x_link_report(struct bnx2x *bp)
1256{
1257        struct bnx2x_link_report_data cur_data;
1258
1259        /* reread mf_cfg */
1260        if (IS_PF(bp) && !CHIP_IS_E1(bp))
1261                bnx2x_read_mf_cfg(bp);
1262
1263        /* Read the current link report info */
1264        bnx2x_fill_report_data(bp, &cur_data);
1265
1266        /* Don't report link down or exactly the same link status twice */
1267        if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1268            (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1269                      &bp->last_reported_link.link_report_flags) &&
1270             test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1271                      &cur_data.link_report_flags)))
1272                return;
1273
1274        bp->link_cnt++;
1275
1276        /* We are going to report a new link parameters now -
1277         * remember the current data for the next time.
1278         */
1279        memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1280
1281        /* propagate status to VFs */
1282        if (IS_PF(bp))
1283                bnx2x_iov_link_update(bp);
1284
1285        if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1286                     &cur_data.link_report_flags)) {
1287                netif_carrier_off(bp->dev);
1288                netdev_err(bp->dev, "NIC Link is Down\n");
1289                return;
1290        } else {
1291                const char *duplex;
1292                const char *flow;
1293
1294                netif_carrier_on(bp->dev);
1295
1296                if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1297                                       &cur_data.link_report_flags))
1298                        duplex = "full";
1299                else
1300                        duplex = "half";
1301
1302                /* Handle the FC at the end so that only these flags would be
1303                 * possibly set. This way we may easily check if there is no FC
1304                 * enabled.
1305                 */
1306                if (cur_data.link_report_flags) {
1307                        if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1308                                     &cur_data.link_report_flags)) {
1309                                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1310                                     &cur_data.link_report_flags))
1311                                        flow = "ON - receive & transmit";
1312                                else
1313                                        flow = "ON - receive";
1314                        } else {
1315                                flow = "ON - transmit";
1316                        }
1317                } else {
1318                        flow = "none";
1319                }
1320                netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1321                            cur_data.line_speed, duplex, flow);
1322        }
1323}
1324
1325static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1326{
1327        int i;
1328
1329        for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1330                struct eth_rx_sge *sge;
1331
1332                sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1333                sge->addr_hi =
1334                        cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1335                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1336
1337                sge->addr_lo =
1338                        cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1339                        BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1340        }
1341}
1342
1343static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1344                                struct bnx2x_fastpath *fp, int last)
1345{
1346        int i;
1347
1348        for (i = 0; i < last; i++) {
1349                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1350                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1351                uint8_t *data = first_buf->data;
1352
1353                if (data == NULL) {
1354                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1355                        continue;
1356                }
1357                if (tpa_info->tpa_state == BNX2X_TPA_START)
1358                        dma_unmap_single(&bp->pdev->dev,
1359                                         dma_unmap_addr(first_buf, mapping),
1360                                         fp->rx_buf_size, DMA_FROM_DEVICE);
1361                bnx2x_frag_free(fp, data);
1362                first_buf->data = NULL;
1363        }
1364}
1365
1366void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1367{
1368        int j;
1369
1370        for_each_rx_queue_cnic(bp, j) {
1371                struct bnx2x_fastpath *fp = &bp->fp[j];
1372
1373                fp->rx_bd_cons = 0;
1374
1375                /* Activate BD ring */
1376                /* Warning!
1377                 * this will generate an interrupt (to the TSTORM)
1378                 * must only be done after chip is initialized
1379                 */
1380                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1381                                     fp->rx_sge_prod);
1382        }
1383}
1384
1385void bnx2x_init_rx_rings(struct bnx2x *bp)
1386{
1387        int func = BP_FUNC(bp);
1388        uint16_t ring_prod;
1389        int i, j;
1390
1391        /* Allocate TPA resources */
1392        for_each_eth_queue(bp, j) {
1393                struct bnx2x_fastpath *fp = &bp->fp[j];
1394
1395                DP(NETIF_MSG_IFUP,
1396                   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1397
1398                if (!fp->disable_tpa) {
1399                        /* Fill the per-aggregation pool */
1400                        for (i = 0; i < MAX_AGG_QS(bp); i++) {
1401                                struct bnx2x_agg_info *tpa_info =
1402                                        &fp->tpa_info[i];
1403                                struct sw_rx_bd *first_buf =
1404                                        &tpa_info->first_buf;
1405
1406                                first_buf->data =
1407                                        bnx2x_frag_alloc(fp, MEM_WAIT);
1408                                if (!first_buf->data) {
1409                                        BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1410                                                  j);
1411                                        bnx2x_free_tpa_pool(bp, fp, i);
1412                                        fp->disable_tpa = 1;
1413                                        break;
1414                                }
1415                                dma_unmap_addr_set(first_buf, mapping, 0);
1416                                tpa_info->tpa_state = BNX2X_TPA_STOP;
1417                        }
1418
1419                        /* "next page" elements initialization */
1420                        bnx2x_set_next_page_sgl(fp);
1421
1422                        /* set SGEs bit mask */
1423                        bnx2x_init_sge_ring_bit_mask(fp);
1424
1425                        /* Allocate SGEs and initialize the ring elements */
1426                        for (i = 0, ring_prod = 0;
1427                             i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1428
1429                                if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1430                                                       MEM_WAIT) < 0) {
1431                                        BNX2X_ERR("was only able to allocate %d rx sges\n",
1432                                                  i);
1433                                        BNX2X_ERR("disabling TPA for queue[%d]\n",
1434                                                  j);
1435                                        /* Cleanup already allocated elements */
1436                                        bnx2x_free_rx_sge_range(bp, fp,
1437                                                                ring_prod);
1438                                        bnx2x_free_tpa_pool(bp, fp,
1439                                                            MAX_AGG_QS(bp));
1440                                        fp->disable_tpa = 1;
1441                                        ring_prod = 0;
1442                                        break;
1443                                }
1444                                ring_prod = NEXT_SGE_IDX(ring_prod);
1445                        }
1446
1447                        fp->rx_sge_prod = ring_prod;
1448                }
1449        }
1450
1451        for_each_eth_queue(bp, j) {
1452                struct bnx2x_fastpath *fp = &bp->fp[j];
1453
1454                fp->rx_bd_cons = 0;
1455
1456                /* Activate BD ring */
1457                /* Warning!
1458                 * this will generate an interrupt (to the TSTORM)
1459                 * must only be done after chip is initialized
1460                 */
1461                bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1462                                     fp->rx_sge_prod);
1463
1464                if (j != 0)
1465                        continue;
1466
1467                if (CHIP_IS_E1(bp)) {
1468                        REG_WR(bp, BAR_USTRORM_INTMEM +
1469                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1470                               U64_LO(fp->rx_comp_mapping));
1471                        REG_WR(bp, BAR_USTRORM_INTMEM +
1472                               USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1473                               U64_HI(fp->rx_comp_mapping));
1474                }
1475        }
1476}
1477
1478static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1479{
1480panic("Not implemented");
1481#if 0 // AKAROS_PORT
1482        uint8_t cos;
1483        struct bnx2x *bp = fp->bp;
1484
1485        for_each_cos_in_tx_queue(fp, cos) {
1486                struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1487                unsigned pkts_compl = 0, bytes_compl = 0;
1488
1489                uint16_t sw_prod = txdata->tx_pkt_prod;
1490                uint16_t sw_cons = txdata->tx_pkt_cons;
1491
1492                while (sw_cons != sw_prod) {
1493                        bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1494                                          &pkts_compl, &bytes_compl);
1495                        sw_cons++;
1496                }
1497
1498                netdev_tx_reset_queue(
1499                        netdev_get_tx_queue(bp->dev,
1500                                            txdata->txq_index));
1501        }
1502#endif
1503}
1504
1505static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1506{
1507        int i;
1508
1509        for_each_tx_queue_cnic(bp, i) {
1510                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1511        }
1512}
1513
1514static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1515{
1516        int i;
1517
1518        for_each_eth_queue(bp, i) {
1519                bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1520        }
1521}
1522
1523static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1524{
1525        struct bnx2x *bp = fp->bp;
1526        int i;
1527
1528        /* ring wasn't allocated */
1529        if (fp->rx_buf_ring == NULL)
1530                return;
1531
1532        for (i = 0; i < NUM_RX_BD; i++) {
1533                struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1534                uint8_t *data = rx_buf->data;
1535
1536                if (data == NULL)
1537                        continue;
1538                dma_unmap_single(&bp->pdev->dev,
1539                                 dma_unmap_addr(rx_buf, mapping),
1540                                 fp->rx_buf_size, DMA_FROM_DEVICE);
1541
1542                rx_buf->data = NULL;
1543                bnx2x_frag_free(fp, data);
1544        }
1545}
1546
1547static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1548{
1549        int j;
1550
1551        for_each_rx_queue_cnic(bp, j) {
1552                bnx2x_free_rx_bds(&bp->fp[j]);
1553        }
1554}
1555
1556static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1557{
1558        int j;
1559
1560        for_each_eth_queue(bp, j) {
1561                struct bnx2x_fastpath *fp = &bp->fp[j];
1562
1563                bnx2x_free_rx_bds(fp);
1564
1565                if (!fp->disable_tpa)
1566                        bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1567        }
1568}
1569
1570static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1571{
1572        bnx2x_free_tx_skbs_cnic(bp);
1573        bnx2x_free_rx_skbs_cnic(bp);
1574}
1575
1576void bnx2x_free_skbs(struct bnx2x *bp)
1577{
1578        bnx2x_free_tx_skbs(bp);
1579        bnx2x_free_rx_skbs(bp);
1580}
1581
1582void bnx2x_update_max_mf_config(struct bnx2x *bp, uint32_t value)
1583{
1584        /* load old values */
1585        uint32_t mf_cfg = bp->mf_config[BP_VN(bp)];
1586
1587        if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1588                /* leave all but MAX value */
1589                mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1590
1591                /* set new MAX value */
1592                mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1593                                & FUNC_MF_CFG_MAX_BW_MASK;
1594
1595                bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1596        }
1597}
1598
1599/**
1600 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1601 *
1602 * @bp:         driver handle
1603 * @nvecs:      number of vectors to be released
1604 */
1605static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1606{
1607panic("Not implemented");
1608#if 0 // AKAROS_PORT
1609        int i, offset = 0;
1610
1611        if (nvecs == offset)
1612                return;
1613
1614        /* VFs don't have a default SB */
1615        if (IS_PF(bp)) {
1616                free_irq(bp->msix_table[offset].vector, bp->dev);
1617                DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1618                   bp->msix_table[offset].vector);
1619                offset++;
1620        }
1621
1622        if (CNIC_SUPPORT(bp)) {
1623                if (nvecs == offset)
1624                        return;
1625                offset++;
1626        }
1627
1628        for_each_eth_queue(bp, i) {
1629                if (nvecs == offset)
1630                        return;
1631                DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1632                   i, bp->msix_table[offset].vector);
1633
1634                free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1635        }
1636#endif
1637}
1638
1639void bnx2x_free_irq(struct bnx2x *bp)
1640{
1641panic("Not implemented");
1642#if 0 // AKAROS_PORT
1643        if (bp->flags & USING_MSIX_FLAG &&
1644            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1645                int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1646
1647                /* vfs don't have a default status block */
1648                if (IS_PF(bp))
1649                        nvecs++;
1650
1651                bnx2x_free_msix_irqs(bp, nvecs);
1652        } else {
1653                free_irq(bp->dev->irq, bp->dev);
1654        }
1655#endif
1656}
1657
1658int bnx2x_enable_msix(struct bnx2x *bp)
1659{
1660        int msix_vec = 0, i, rc;
1661panic("Not implemented");
1662#if 0 // AKAROS_PORT
1663        /* VFs don't have a default status block */
1664        if (IS_PF(bp)) {
1665                bp->msix_table[msix_vec].entry = msix_vec;
1666                BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1667                               bp->msix_table[0].entry);
1668                msix_vec++;
1669        }
1670
1671        /* Cnic requires an msix vector for itself */
1672        if (CNIC_SUPPORT(bp)) {
1673                bp->msix_table[msix_vec].entry = msix_vec;
1674                BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1675                               msix_vec, bp->msix_table[msix_vec].entry);
1676                msix_vec++;
1677        }
1678
1679        /* We need separate vectors for ETH queues only (not FCoE) */
1680        for_each_eth_queue(bp, i) {
1681                bp->msix_table[msix_vec].entry = msix_vec;
1682                BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1683                               msix_vec, msix_vec, i);
1684                msix_vec++;
1685        }
1686
1687        DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1688           msix_vec);
1689
1690        rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1691                                   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1692        /*
1693         * reconfigure number of tx/rx queues according to available
1694         * MSI-X vectors
1695         */
1696        if (rc == -ENOSPC) {
1697                /* Get by with single vector */
1698                rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1699                if (rc < 0) {
1700                        BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1701                                       rc);
1702                        goto no_msix;
1703                }
1704
1705                BNX2X_DEV_INFO("Using single MSI-X vector\n");
1706                bp->flags |= USING_SINGLE_MSIX_FLAG;
1707
1708                BNX2X_DEV_INFO("set number of queues to 1\n");
1709                bp->num_ethernet_queues = 1;
1710                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1711        } else if (rc < 0) {
1712                BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1713                goto no_msix;
1714        } else if (rc < msix_vec) {
1715                /* how less vectors we will have? */
1716                int diff = msix_vec - rc;
1717
1718                BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1719
1720                /*
1721                 * decrease number of queues by number of unallocated entries
1722                 */
1723                bp->num_ethernet_queues -= diff;
1724                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1725
1726                BNX2X_DEV_INFO("New queue configuration set: %d\n",
1727                               bp->num_queues);
1728        }
1729
1730        bp->flags |= USING_MSIX_FLAG;
1731
1732        return 0;
1733
1734no_msix:
1735        /* fall to INTx if not enough memory */
1736        if (rc == -ENOMEM)
1737                bp->flags |= DISABLE_MSI_FLAG;
1738
1739        return rc;
1740#endif
1741}
1742
1743static void bullshit_handler(struct hw_trapframe *hw_tf, void *cnic_turd)
1744{
1745        printk("bnx2x CNIC IRQ fired.  Probably a bug!\n");
1746}
1747
1748static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1749{
1750        int i, rc, offset = 0;
1751
1752        /* no default status block for vf */
1753        if (IS_PF(bp)) {
1754                rc = register_irq(bp->msix_table[offset++].vector,
1755                                  bnx2x_msix_sp_int, bp->dev,
1756                                  pci_to_tbdf(bp->pdev));
1757                if (rc) {
1758                        BNX2X_ERR("request sp irq failed\n");
1759                        return -EBUSY;
1760                }
1761        }
1762
1763        if (CNIC_SUPPORT(bp)) {
1764                offset++;
1765                // AKAROS_PORT
1766                rc = register_irq(0, bullshit_handler, 0,
1767                                  pci_to_tbdf(bp->pdev));
1768                if (rc) {
1769                        BNX2X_ERR("Fucked up getting a CNIC MSIX vector!");
1770                        return -EBUSY;
1771                }
1772        }
1773
1774        for_each_eth_queue(bp, i) {
1775                struct bnx2x_fastpath *fp = &bp->fp[i];
1776                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1777                         bp->dev->name, i);
1778
1779                rc = register_irq(bp->msix_table[offset].vector,
1780                                  bnx2x_msix_fp_int, fp, pci_to_tbdf(bp->pdev));
1781                if (rc) {
1782                        BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1783                              bp->msix_table[offset].vector, rc);
1784                        bnx2x_free_msix_irqs(bp, offset);
1785                        return -EBUSY;
1786                }
1787
1788                offset++;
1789        }
1790
1791        i = BNX2X_NUM_ETH_QUEUES(bp);
1792        if (IS_PF(bp)) {
1793                offset = 1 + CNIC_SUPPORT(bp);
1794                netdev_info(bp->dev,
1795                            "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1796                            bp->msix_table[0].vector,
1797                            0, bp->msix_table[offset].vector,
1798                            i - 1, bp->msix_table[offset + i - 1].vector);
1799        } else {
1800                offset = CNIC_SUPPORT(bp);
1801                netdev_info(bp->dev,
1802                            "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1803                            0, bp->msix_table[offset].vector,
1804                            i - 1, bp->msix_table[offset + i - 1].vector);
1805        }
1806        return 0;
1807}
1808
1809int bnx2x_enable_msi(struct bnx2x *bp)
1810{
1811panic("Not implemented");
1812#if 0 // AKAROS_PORT
1813        int rc;
1814
1815        rc = pci_enable_msi(bp->pdev);
1816        if (rc) {
1817                BNX2X_DEV_INFO("MSI is not attainable\n");
1818                return -1;
1819        }
1820        bp->flags |= USING_MSI_FLAG;
1821
1822        return 0;
1823#endif
1824}
1825
1826static int bnx2x_req_irq(struct bnx2x *bp)
1827{
1828        unsigned long flags;
1829panic("Not implemented");
1830#if 0 // AKAROS_PORT
1831        unsigned int irq;
1832
1833        if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1834                flags = 0;
1835        else
1836                flags = IRQF_SHARED;
1837
1838        if (bp->flags & USING_MSIX_FLAG)
1839                irq = bp->msix_table[0].vector;
1840        else
1841                irq = bp->pdev->irq;
1842
1843        return register_irq(irq, bnx2x_interrupt, bp->dev,
1844                            pci_to_tbdf(bp->pdev));
1845#endif
1846}
1847
1848static int bnx2x_setup_irqs(struct bnx2x *bp)
1849{
1850        return bnx2x_req_msix_irqs(bp);
1851#if 0 // AKAROS_PORT we just register_irq
1852        if (bp->flags & USING_MSIX_FLAG &&
1853            !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1854                rc = bnx2x_req_msix_irqs(bp);
1855                if (rc)
1856                        return rc;
1857        } else {
1858                rc = bnx2x_req_irq(bp);
1859                if (rc) {
1860                        BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1861                        return rc;
1862                }
1863                if (bp->flags & USING_MSI_FLAG) {
1864                        bp->dev->irq = bp->pdev->irq;
1865                        netdev_info(bp->dev, "using MSI IRQ %d\n",
1866                                    bp->dev->irq);
1867                }
1868                if (bp->flags & USING_MSIX_FLAG) {
1869                        bp->dev->irq = bp->msix_table[0].vector;
1870                        netdev_info(bp->dev, "using MSIX IRQ %d\n",
1871                                    bp->dev->irq);
1872                }
1873        }
1874
1875        return 0;
1876#endif
1877}
1878
1879static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1880{
1881        int i;
1882
1883        for_each_rx_queue_cnic(bp, i) {
1884                bnx2x_fp_init_lock(&bp->fp[i]);
1885                napi_enable(&bnx2x_fp(bp, i, napi));
1886        }
1887}
1888
1889static void bnx2x_napi_enable(struct bnx2x *bp)
1890{
1891        int i;
1892
1893        for_each_eth_queue(bp, i) {
1894                bnx2x_fp_init_lock(&bp->fp[i]);
1895                napi_enable(&bnx2x_fp(bp, i, napi));
1896        }
1897}
1898
1899static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1900{
1901        int i;
1902
1903        for_each_rx_queue_cnic(bp, i) {
1904                napi_disable(&bnx2x_fp(bp, i, napi));
1905                while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1906                        kthread_usleep(1000);
1907        }
1908}
1909
1910static void bnx2x_napi_disable(struct bnx2x *bp)
1911{
1912        int i;
1913
1914        for_each_eth_queue(bp, i) {
1915                napi_disable(&bnx2x_fp(bp, i, napi));
1916                while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1917                        kthread_usleep(1000);
1918        }
1919}
1920
1921void bnx2x_netif_start(struct bnx2x *bp)
1922{
1923panic("Not implemented");
1924#if 0 // AKAROS_PORT
1925        if (netif_running(bp->dev)) {
1926                bnx2x_napi_enable(bp);
1927                if (CNIC_LOADED(bp))
1928                        bnx2x_napi_enable_cnic(bp);
1929                bnx2x_int_enable(bp);
1930                if (bp->state == BNX2X_STATE_OPEN)
1931                        netif_tx_wake_all_queues(bp->dev);
1932        }
1933#endif
1934}
1935
1936void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1937{
1938        bnx2x_int_disable_sync(bp, disable_hw);
1939        bnx2x_napi_disable(bp);
1940        if (CNIC_LOADED(bp))
1941                bnx2x_napi_disable_cnic(bp);
1942}
1943
1944uint16_t bnx2x_select_queue(struct ether *dev, struct sk_buff *skb,
1945                       void *accel_priv, select_queue_fallback_t fallback)
1946{
1947panic("Not implemented");
1948#if 0 // AKAROS_PORT
1949        struct bnx2x *bp = netdev_priv(dev);
1950
1951        if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1952                struct ethhdr *hdr = (struct ethhdr *)skb->data;
1953                uint16_t ether_type = be16_to_cpu(hdr->h_proto);
1954
1955                /* Skip VLAN tag if present */
1956                if (ether_type == ETH_P_8021Q) {
1957                        struct vlan_ethhdr *vhdr =
1958                                (struct vlan_ethhdr *)skb->data;
1959
1960                        ether_type = be16_to_cpu(vhdr->h_vlan_encapsulated_proto);
1961                }
1962
1963                /* If ethertype is FCoE or FIP - use FCoE ring */
1964                if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1965                        return bnx2x_fcoe_tx(bp, txq_index);
1966        }
1967
1968        /* select a non-FCoE queue */
1969        return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1970#endif
1971}
1972
1973void bnx2x_set_num_queues(struct bnx2x *bp)
1974{
1975        /* RSS queues */
1976        bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1977
1978        /* override in STORAGE SD modes */
1979        if (IS_MF_STORAGE_ONLY(bp))
1980                bp->num_ethernet_queues = 1;
1981
1982        /* Add special queues */
1983        bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1984        bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1985
1986        BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1987}
1988
1989/**
1990 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1991 *
1992 * @bp:         Driver handle
1993 *
1994 * We currently support for at most 16 Tx queues for each CoS thus we will
1995 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1996 * bp->max_cos.
1997 *
1998 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1999 * index after all ETH L2 indices.
2000 *
2001 * If the actual number of Tx queues (for each CoS) is less than 16 then there
2002 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
2003 * 16..31,...) with indices that are not coupled with any real Tx queue.
2004 *
2005 * The proper configuration of skb->queue_mapping is handled by
2006 * bnx2x_select_queue() and __skb_tx_hash().
2007 *
2008 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
2009 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
2010 */
2011static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
2012{
2013        int rc, tx, rx;
2014
2015        tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
2016        rx = BNX2X_NUM_ETH_QUEUES(bp);
2017
2018/* account for fcoe queue */
2019        if (include_cnic && !NO_FCOE(bp)) {
2020                rx++;
2021                tx++;
2022        }
2023
2024#if 0 // AKAROS_PORT XME: set queues in ether
2025        rc = netif_set_real_num_tx_queues(bp->dev, tx);
2026        if (rc) {
2027                BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2028                return rc;
2029        }
2030        rc = netif_set_real_num_rx_queues(bp->dev, rx);
2031        if (rc) {
2032                BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2033                return rc;
2034        }
2035#else
2036        rc = 0;
2037#endif
2038
2039        DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2040                          tx, rx);
2041
2042        return rc;
2043}
2044
2045static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2046{
2047        int i;
2048
2049        for_each_queue(bp, i) {
2050                struct bnx2x_fastpath *fp = &bp->fp[i];
2051                uint32_t mtu;
2052
2053                /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2054                if (IS_FCOE_IDX(i))
2055                        /*
2056                         * Although there are no IP frames expected to arrive to
2057                         * this ring we still want to add an
2058                         * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2059                         * overrun attack.
2060                         */
2061                        mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2062                else
2063                        mtu = bp->dev->mtu;
2064                /* AKAROS_PORT XME struct block alignment and size issues? */
2065                fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2066                                  IP_HEADER_ALIGNMENT_PADDING +
2067                                  ETH_OVREHEAD +
2068                                  mtu +
2069                                  BNX2X_FW_RX_ALIGN_END;
2070                /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2071                if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2072                        fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2073                else
2074                        fp->rx_frag_size = 0;
2075        }
2076}
2077
2078static int bnx2x_init_rss(struct bnx2x *bp)
2079{
2080        int i;
2081        uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2082
2083        /* Prepare the initial contents for the indirection table if RSS is
2084         * enabled
2085         */
2086        for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2087                bp->rss_conf_obj.ind_table[i] =
2088                        bp->fp->cl_id +
2089                        ethtool_rxfh_indir_default(i, num_eth_queues);
2090
2091        /*
2092         * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2093         * per-port, so if explicit configuration is needed , do it only
2094         * for a PMF.
2095         *
2096         * For 57712 and newer on the other hand it's a per-function
2097         * configuration.
2098         */
2099        return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2100}
2101
2102int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2103              bool config_hash, bool enable)
2104{
2105        struct bnx2x_config_rss_params params = {NULL};
2106
2107        /* Although RSS is meaningless when there is a single HW queue we
2108         * still need it enabled in order to have HW Rx hash generated.
2109         *
2110         * if (!is_eth_multi(bp))
2111         *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2112         */
2113
2114        params.rss_obj = rss_obj;
2115
2116        __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2117
2118        if (enable) {
2119                __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2120
2121                /* RSS configuration */
2122                __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2123                __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2124                __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2125                __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2126                if (rss_obj->udp_rss_v4)
2127                        __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2128                if (rss_obj->udp_rss_v6)
2129                        __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2130
2131                if (!CHIP_IS_E1x(bp))
2132                        /* valid only for TUNN_MODE_GRE tunnel mode */
2133                        __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
2134        } else {
2135                __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2136        }
2137
2138        /* Hash bits */
2139        params.rss_result_mask = MULTI_MASK;
2140
2141        memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2142
2143        if (config_hash) {
2144                /* RSS keys */
2145                #if 0 // AKAROS_PORT
2146                netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2147                #else
2148                /* linux picks a random, once, then uses it here.  it could be
2149                 * 5a! */
2150                memset(params.rss_key, 0x5a, T_ETH_RSS_KEY * 4);
2151                #endif
2152                __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2153        }
2154
2155        if (IS_PF(bp))
2156                return bnx2x_config_rss(bp, &params);
2157        else
2158                return bnx2x_vfpf_config_rss(bp, &params);
2159}
2160
2161static int bnx2x_init_hw(struct bnx2x *bp, uint32_t load_code)
2162{
2163        struct bnx2x_func_state_params func_params = {NULL};
2164
2165        /* Prepare parameters for function state transitions */
2166        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2167
2168        func_params.f_obj = &bp->func_obj;
2169        func_params.cmd = BNX2X_F_CMD_HW_INIT;
2170
2171        func_params.params.hw_init.load_phase = load_code;
2172
2173        return bnx2x_func_state_change(bp, &func_params);
2174}
2175
2176/*
2177 * Cleans the object that have internal lists without sending
2178 * ramrods. Should be run when interrupts are disabled.
2179 */
2180void bnx2x_squeeze_objects(struct bnx2x *bp)
2181{
2182        int rc;
2183        unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2184        struct bnx2x_mcast_ramrod_params rparam = {NULL};
2185        struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2186
2187        /***************** Cleanup MACs' object first *************************/
2188
2189        /* Wait for completion of requested */
2190        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2191        /* Perform a dry cleanup */
2192        __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2193
2194        /* Clean ETH primary MAC */
2195        __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2196        rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2197                                 &ramrod_flags);
2198        if (rc != 0)
2199                BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2200
2201        /* Cleanup UC list */
2202        vlan_mac_flags = 0;
2203        __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2204        rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2205                                 &ramrod_flags);
2206        if (rc != 0)
2207                BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2208
2209        /***************** Now clean mcast object *****************************/
2210        rparam.mcast_obj = &bp->mcast_obj;
2211        __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2212
2213        /* Add a DEL command... - Since we're doing a driver cleanup only,
2214         * we take a lock surrounding both the initial send and the CONTs,
2215         * as we don't want a true completion to disrupt us in the middle.
2216         */
2217        qlock(&bp->dev->qlock);
2218        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2219        if (rc < 0)
2220                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2221                          rc);
2222
2223        /* ...and wait until all pending commands are cleared */
2224        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2225        while (rc != 0) {
2226                if (rc < 0) {
2227                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2228                                  rc);
2229                        qunlock(&bp->dev->qlock);
2230                        return;
2231                }
2232
2233                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2234        }
2235        qunlock(&bp->dev->qlock);
2236}
2237
2238#ifndef BNX2X_STOP_ON_ERROR
2239#define LOAD_ERROR_EXIT(bp, label) \
2240        do { \
2241                (bp)->state = BNX2X_STATE_ERROR; \
2242                goto label; \
2243        } while (0)
2244
2245#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2246        do { \
2247                bp->cnic_loaded = false; \
2248                goto label; \
2249        } while (0)
2250#else /*BNX2X_STOP_ON_ERROR*/
2251#define LOAD_ERROR_EXIT(bp, label) \
2252        do { \
2253                (bp)->state = BNX2X_STATE_ERROR; \
2254                (bp)->panic = 1; \
2255                return -EBUSY; \
2256        } while (0)
2257#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2258        do { \
2259                bp->cnic_loaded = false; \
2260                (bp)->panic = 1; \
2261                return -EBUSY; \
2262        } while (0)
2263#endif /*BNX2X_STOP_ON_ERROR*/
2264
2265static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2266{
2267        BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2268                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2269        return;
2270}
2271
2272static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2273{
2274        int num_groups, vf_headroom = 0;
2275        int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2276
2277        /* number of queues for statistics is number of eth queues + FCoE */
2278        uint8_t num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2279
2280        /* Total number of FW statistics requests =
2281         * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2282         * and fcoe l2 queue) stats + num of queues (which includes another 1
2283         * for fcoe l2 queue if applicable)
2284         */
2285        bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2286
2287        /* vf stats appear in the request list, but their data is allocated by
2288         * the VFs themselves. We don't include them in the bp->fw_stats_num as
2289         * it is used to determine where to place the vf stats queries in the
2290         * request struct
2291         */
2292        if (IS_SRIOV(bp))
2293                vf_headroom = bnx2x_vf_headroom(bp);
2294
2295        /* Request is built from stats_query_header and an array of
2296         * stats_query_cmd_group each of which contains
2297         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2298         * configured in the stats_query_header.
2299         */
2300        num_groups =
2301                (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2302                 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2303                 1 : 0));
2304
2305        DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2306           bp->fw_stats_num, vf_headroom, num_groups);
2307        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2308                num_groups * sizeof(struct stats_query_cmd_group);
2309
2310        /* Data for statistics requests + stats_counter
2311         * stats_counter holds per-STORM counters that are incremented
2312         * when STORM has finished with the current request.
2313         * memory for FCoE offloaded statistics are counted anyway,
2314         * even if they will not be sent.
2315         * VF stats are not accounted for here as the data of VF stats is stored
2316         * in memory allocated by the VF, not here.
2317         */
2318        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2319                sizeof(struct per_pf_stats) +
2320                sizeof(struct fcoe_statistics_params) +
2321                sizeof(struct per_queue_stats) * num_queue_stats +
2322                sizeof(struct stats_counter);
2323
2324        bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2325                                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2326        if (!bp->fw_stats)
2327                goto alloc_mem_err;
2328
2329        /* Set shortcuts */
2330        bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2331        bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2332        bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2333                ((uint8_t *)bp->fw_stats + bp->fw_stats_req_sz);
2334        bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2335                bp->fw_stats_req_sz;
2336
2337        DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2338           U64_HI(bp->fw_stats_req_mapping),
2339           U64_LO(bp->fw_stats_req_mapping));
2340        DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2341           U64_HI(bp->fw_stats_data_mapping),
2342           U64_LO(bp->fw_stats_data_mapping));
2343        return 0;
2344
2345alloc_mem_err:
2346        bnx2x_free_fw_stats_mem(bp);
2347        BNX2X_ERR("Can't allocate FW stats memory\n");
2348        return -ENOMEM;
2349}
2350
2351/* send load request to mcp and analyze response */
2352static int bnx2x_nic_load_request(struct bnx2x *bp, uint32_t *load_code)
2353{
2354        uint32_t param;
2355
2356        /* init fw_seq */
2357        bp->fw_seq =
2358                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2359                 DRV_MSG_SEQ_NUMBER_MASK);
2360        BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2361
2362        /* Get current FW pulse sequence */
2363        bp->fw_drv_pulse_wr_seq =
2364                (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2365                 DRV_PULSE_SEQ_MASK);
2366        BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2367
2368        param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2369
2370        if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2371                param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2372
2373        /* load request */
2374        (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2375
2376        /* if mcp fails to respond we must abort */
2377        if (!(*load_code)) {
2378                BNX2X_ERR("MCP response failure, aborting\n");
2379                return -EBUSY;
2380        }
2381
2382        /* If mcp refused (e.g. other port is in diagnostic mode) we
2383         * must abort
2384         */
2385        if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2386                BNX2X_ERR("MCP refused load request, aborting\n");
2387                return -EBUSY;
2388        }
2389        return 0;
2390}
2391
2392/* check whether another PF has already loaded FW to chip. In
2393 * virtualized environments a pf from another VM may have already
2394 * initialized the device including loading FW
2395 */
2396int bnx2x_compare_fw_ver(struct bnx2x *bp, uint32_t load_code,
2397                         bool print_err)
2398{
2399        /* is another pf loaded on this engine? */
2400        if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2401            load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2402                /* build my FW version dword */
2403                uint32_t my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2404                        (BCM_5710_FW_MINOR_VERSION << 8) +
2405                        (BCM_5710_FW_REVISION_VERSION << 16) +
2406                        (BCM_5710_FW_ENGINEERING_VERSION << 24);
2407
2408                /* read loaded FW from chip */
2409                uint32_t loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2410
2411                DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2412                   loaded_fw, my_fw);
2413
2414                /* abort nic load if version mismatch */
2415                if (my_fw != loaded_fw) {
2416                        if (print_err)
2417                                BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2418                                          loaded_fw, my_fw);
2419                        else
2420                                BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2421                                               loaded_fw, my_fw);
2422                        return -EBUSY;
2423                }
2424        }
2425        return 0;
2426}
2427
2428/* returns the "mcp load_code" according to global load_count array */
2429static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2430{
2431        int path = BP_PATH(bp);
2432
2433        DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2434           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2435           bnx2x_load_count[path][2]);
2436        bnx2x_load_count[path][0]++;
2437        bnx2x_load_count[path][1 + port]++;
2438        DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2439           path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2440           bnx2x_load_count[path][2]);
2441        if (bnx2x_load_count[path][0] == 1)
2442                return FW_MSG_CODE_DRV_LOAD_COMMON;
2443        else if (bnx2x_load_count[path][1 + port] == 1)
2444                return FW_MSG_CODE_DRV_LOAD_PORT;
2445        else
2446                return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2447}
2448
2449/* mark PMF if applicable */
2450static void bnx2x_nic_load_pmf(struct bnx2x *bp, uint32_t load_code)
2451{
2452        if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2453            (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2454            (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2455                bp->port.pmf = 1;
2456                /* We need the barrier to ensure the ordering between the
2457                 * writing to bp->port.pmf here and reading it from the
2458                 * bnx2x_periodic_task().
2459                 */
2460                mb();
2461        } else {
2462                bp->port.pmf = 0;
2463        }
2464
2465        DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2466}
2467
2468static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2469{
2470        if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2471             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2472            (bp->common.shmem2_base)) {
2473                if (SHMEM2_HAS(bp, dcc_support))
2474                        SHMEM2_WR(bp, dcc_support,
2475                                  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2476                                   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2477                if (SHMEM2_HAS(bp, afex_driver_support))
2478                        SHMEM2_WR(bp, afex_driver_support,
2479                                  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2480        }
2481
2482        /* Set AFEX default VLAN tag to an invalid value */
2483        bp->afex_def_vlan_tag = -1;
2484}
2485
2486/**
2487 * bnx2x_bz_fp - zero content of the fastpath structure.
2488 *
2489 * @bp:         driver handle
2490 * @index:      fastpath index to be zeroed
2491 *
2492 * Makes sure the contents of the bp->fp[index].napi is kept
2493 * intact.
2494 */
2495static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2496{
2497        struct bnx2x_fastpath *fp = &bp->fp[index];
2498        int cos;
2499        struct napi_struct orig_napi = fp->napi;
2500        struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2501
2502        /* bzero bnx2x_fastpath contents */
2503        if (fp->tpa_info)
2504                memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2505                       sizeof(struct bnx2x_agg_info));
2506        memset(fp, 0, sizeof(*fp));
2507
2508        /* AKAROS_PORT: let the code set up whatever fake napi stuff it needs */
2509        /* Restore the NAPI object as it has been already initialized */
2510        fp->napi = orig_napi;
2511        fp->tpa_info = orig_tpa_info;
2512        fp->bp = bp;
2513        fp->index = index;
2514        if (IS_ETH_FP(fp))
2515                fp->max_cos = bp->max_cos;
2516        else
2517                /* Special queues support only one CoS */
2518                fp->max_cos = 1;
2519
2520        /* Init txdata pointers */
2521        if (IS_FCOE_FP(fp))
2522                fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2523        if (IS_ETH_FP(fp))
2524                for_each_cos_in_tx_queue(fp, cos)
2525                        fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2526                                BNX2X_NUM_ETH_QUEUES(bp) + index];
2527
2528        /* set the tpa flag for each queue. The tpa flag determines the queue
2529         * minimal size so it must be set prior to queue memory allocation
2530         */
2531        fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2532                                  (bp->flags & GRO_ENABLE_FLAG &&
2533                                   bnx2x_mtu_allows_gro(bp->dev->mtu)));
2534        if (bp->flags & TPA_ENABLE_FLAG)
2535                fp->mode = TPA_MODE_LRO;
2536        else if (bp->flags & GRO_ENABLE_FLAG)
2537                fp->mode = TPA_MODE_GRO;
2538
2539        /* We don't want TPA on an FCoE L2 ring */
2540        if (IS_FCOE_FP(fp))
2541                fp->disable_tpa = 1;
2542}
2543
2544int bnx2x_load_cnic(struct bnx2x *bp)
2545{
2546        int i, rc, port = BP_PORT(bp);
2547
2548        DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2549
2550        qlock_init(&bp->cnic_mutex);
2551
2552        if (IS_PF(bp)) {
2553                rc = bnx2x_alloc_mem_cnic(bp);
2554                if (rc) {
2555                        BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2556                        LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2557                }
2558        }
2559
2560        rc = bnx2x_alloc_fp_mem_cnic(bp);
2561        if (rc) {
2562                BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2563                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2564        }
2565
2566        /* Update the number of queues with the cnic queues */
2567        rc = bnx2x_set_real_num_queues(bp, 1);
2568        if (rc) {
2569                BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2570                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2571        }
2572
2573        /* Add all CNIC NAPI objects */
2574        bnx2x_add_all_napi_cnic(bp);
2575        DP(NETIF_MSG_IFUP, "cnic napi added\n");
2576        bnx2x_napi_enable_cnic(bp);
2577
2578        rc = bnx2x_init_hw_func_cnic(bp);
2579        if (rc)
2580                LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2581
2582        bnx2x_nic_init_cnic(bp);
2583
2584        if (IS_PF(bp)) {
2585                /* Enable Timer scan */
2586                REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2587
2588                /* setup cnic queues */
2589                for_each_cnic_queue(bp, i) {
2590                        rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2591                        if (rc) {
2592                                BNX2X_ERR("Queue setup failed\n");
2593                                LOAD_ERROR_EXIT(bp, load_error_cnic2);
2594                        }
2595                }
2596        }
2597
2598        /* Initialize Rx filter. */
2599        bnx2x_set_rx_mode_inner(bp);
2600
2601        /* re-read iscsi info */
2602        bnx2x_get_iscsi_info(bp);
2603        bnx2x_setup_cnic_irq_info(bp);
2604        bnx2x_setup_cnic_info(bp);
2605        bp->cnic_loaded = true;
2606        if (bp->state == BNX2X_STATE_OPEN)
2607                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2608
2609        DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2610
2611        return 0;
2612
2613#ifndef BNX2X_STOP_ON_ERROR
2614load_error_cnic2:
2615        /* Disable Timer scan */
2616        REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2617
2618load_error_cnic1:
2619        bnx2x_napi_disable_cnic(bp);
2620        /* Update the number of queues without the cnic queues */
2621        if (bnx2x_set_real_num_queues(bp, 0))
2622                BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2623load_error_cnic0:
2624        BNX2X_ERR("CNIC-related load failed\n");
2625        bnx2x_free_fp_mem_cnic(bp);
2626        bnx2x_free_mem_cnic(bp);
2627        return rc;
2628#endif /* ! BNX2X_STOP_ON_ERROR */
2629}
2630
2631/* must be called with rtnl_lock */
2632int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2633{
2634        int port = BP_PORT(bp);
2635        int i, rc = 0;
2636        uint32_t load_code = 0;
2637
2638        DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2639        DP(NETIF_MSG_IFUP,
2640           "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2641
2642#ifdef BNX2X_STOP_ON_ERROR
2643        if (unlikely(bp->panic)) {
2644                BNX2X_ERR("Can't load NIC when there is panic\n");
2645                return -EPERM;
2646        }
2647#endif
2648
2649        bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2650
2651        /* zero the structure w/o any lock, before SP handler is initialized */
2652        memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2653        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2654                &bp->last_reported_link.link_report_flags);
2655
2656        if (IS_PF(bp))
2657                /* must be called before memory allocation and HW init */
2658                bnx2x_ilt_set_info(bp);
2659
2660        /*
2661         * Zero fastpath structures preserving invariants like napi, which are
2662         * allocated only once, fp index, max_cos, bp pointer.
2663         * Also set fp->disable_tpa and txdata_ptr.
2664         */
2665        DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2666        for_each_queue(bp, i)
2667                bnx2x_bz_fp(bp, i);
2668        memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2669                                  bp->num_cnic_queues) *
2670                                  sizeof(struct bnx2x_fp_txdata));
2671
2672        bp->fcoe_init = false;
2673
2674        /* Set the receive queues buffer size */
2675        bnx2x_set_rx_buf_size(bp);
2676
2677        if (IS_PF(bp)) {
2678                rc = bnx2x_alloc_mem(bp);
2679                if (rc) {
2680                        BNX2X_ERR("Unable to allocate bp memory\n");
2681                        return rc;
2682                }
2683        }
2684
2685        /* need to be done after alloc mem, since it's self adjusting to amount
2686         * of memory available for RSS queues
2687         */
2688        rc = bnx2x_alloc_fp_mem(bp);
2689        if (rc) {
2690                BNX2X_ERR("Unable to allocate memory for fps\n");
2691                LOAD_ERROR_EXIT(bp, load_error0);
2692        }
2693
2694        /* Allocated memory for FW statistics  */
2695        if (bnx2x_alloc_fw_stats_mem(bp))
2696                LOAD_ERROR_EXIT(bp, load_error0);
2697
2698        /* request pf to initialize status blocks */
2699        if (IS_VF(bp)) {
2700                rc = bnx2x_vfpf_init(bp);
2701                if (rc)
2702                        LOAD_ERROR_EXIT(bp, load_error0);
2703        }
2704
2705        /* As long as bnx2x_alloc_mem() may possibly update
2706         * bp->num_queues, bnx2x_set_real_num_queues() should always
2707         * come after it. At this stage cnic queues are not counted.
2708         */
2709        rc = bnx2x_set_real_num_queues(bp, 0);
2710        if (rc) {
2711                BNX2X_ERR("Unable to set real_num_queues\n");
2712                LOAD_ERROR_EXIT(bp, load_error0);
2713        }
2714
2715        /* configure multi cos mappings in kernel.
2716         * this configuration may be overridden by a multi class queue
2717         * discipline or by a dcbx negotiation result.
2718         */
2719        bnx2x_setup_tc(bp->dev, bp->max_cos);
2720
2721        /* Add all NAPI objects */
2722        bnx2x_add_all_napi(bp);
2723        DP(NETIF_MSG_IFUP, "napi added\n");
2724        bnx2x_napi_enable(bp);
2725
2726        if (IS_PF(bp)) {
2727                /* set pf load just before approaching the MCP */
2728                bnx2x_set_pf_load(bp);
2729
2730                /* if mcp exists send load request and analyze response */
2731                if (!BP_NOMCP(bp)) {
2732                        /* attempt to load pf */
2733                        rc = bnx2x_nic_load_request(bp, &load_code);
2734                        if (rc)
2735                                LOAD_ERROR_EXIT(bp, load_error1);
2736
2737                        /* what did mcp say? */
2738                        rc = bnx2x_compare_fw_ver(bp, load_code, true);
2739                        if (rc) {
2740                                bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2741                                LOAD_ERROR_EXIT(bp, load_error2);
2742                        }
2743                } else {
2744                        load_code = bnx2x_nic_load_no_mcp(bp, port);
2745                }
2746
2747                /* mark pmf if applicable */
2748                bnx2x_nic_load_pmf(bp, load_code);
2749
2750                /* Init Function state controlling object */
2751                bnx2x__init_func_obj(bp);
2752
2753                /* Initialize HW */
2754                rc = bnx2x_init_hw(bp, load_code);
2755                if (rc) {
2756                        BNX2X_ERR("HW init failed, aborting\n");
2757                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2758                        LOAD_ERROR_EXIT(bp, load_error2);
2759                }
2760        }
2761
2762        bnx2x_pre_irq_nic_init(bp);
2763
2764        /* Connect to IRQs */
2765        rc = bnx2x_setup_irqs(bp);
2766        if (rc) {
2767                BNX2X_ERR("setup irqs failed\n");
2768                if (IS_PF(bp))
2769                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2770                LOAD_ERROR_EXIT(bp, load_error2);
2771        }
2772
2773        /* Init per-function objects */
2774        if (IS_PF(bp)) {
2775                /* Setup NIC internals and enable interrupts */
2776                bnx2x_post_irq_nic_init(bp, load_code);
2777
2778                bnx2x_init_bp_objs(bp);
2779                bnx2x_iov_nic_init(bp);
2780
2781                /* Set AFEX default VLAN tag to an invalid value */
2782                bp->afex_def_vlan_tag = -1;
2783                bnx2x_nic_load_afex_dcc(bp, load_code);
2784                bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2785                rc = bnx2x_func_start(bp);
2786                if (rc) {
2787                        BNX2X_ERR("Function start failed!\n");
2788                        bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2789
2790                        LOAD_ERROR_EXIT(bp, load_error3);
2791                }
2792
2793                /* Send LOAD_DONE command to MCP */
2794                if (!BP_NOMCP(bp)) {
2795                        load_code = bnx2x_fw_command(bp,
2796                                                     DRV_MSG_CODE_LOAD_DONE, 0);
2797                        if (!load_code) {
2798                                BNX2X_ERR("MCP response failure, aborting\n");
2799                                rc = -EBUSY;
2800                                LOAD_ERROR_EXIT(bp, load_error3);
2801                        }
2802                }
2803
2804                /* initialize FW coalescing state machines in RAM */
2805                bnx2x_update_coalesce(bp);
2806        }
2807
2808        /* setup the leading queue */
2809        rc = bnx2x_setup_leading(bp);
2810        if (rc) {
2811                BNX2X_ERR("Setup leading failed!\n");
2812                LOAD_ERROR_EXIT(bp, load_error3);
2813        }
2814
2815        /* set up the rest of the queues */
2816        for_each_nondefault_eth_queue(bp, i) {
2817                if (IS_PF(bp))
2818                        rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2819                else /* VF */
2820                        rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2821                if (rc) {
2822                        BNX2X_ERR("Queue %d setup failed\n", i);
2823                        LOAD_ERROR_EXIT(bp, load_error3);
2824                }
2825        }
2826
2827        /* setup rss */
2828        rc = bnx2x_init_rss(bp);
2829        if (rc) {
2830                BNX2X_ERR("PF RSS init failed\n");
2831                LOAD_ERROR_EXIT(bp, load_error3);
2832        }
2833
2834        /* Now when Clients are configured we are ready to work */
2835        bp->state = BNX2X_STATE_OPEN;
2836
2837        /* Configure a ucast MAC */
2838        if (IS_PF(bp))
2839                rc = bnx2x_set_eth_mac(bp, true);
2840        else /* vf */
2841                rc = bnx2x_vfpf_config_mac(bp, bp->dev->ea, bp->fp->index,
2842                                           true);
2843        if (rc) {
2844                BNX2X_ERR("Setting Ethernet MAC failed\n");
2845                LOAD_ERROR_EXIT(bp, load_error3);
2846        }
2847
2848        if (IS_PF(bp) && bp->pending_max) {
2849                bnx2x_update_max_mf_config(bp, bp->pending_max);
2850                bp->pending_max = 0;
2851        }
2852
2853        if (bp->port.pmf) {
2854                rc = bnx2x_initial_phy_init(bp, load_mode);
2855                if (rc)
2856                        LOAD_ERROR_EXIT(bp, load_error3);
2857        }
2858        bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2859
2860        /* Start fast path */
2861
2862        /* Initialize Rx filter. */
2863        bnx2x_set_rx_mode_inner(bp);
2864
2865        /* Start Tx */
2866        switch (load_mode) {
2867        case LOAD_NORMAL:
2868                /* Tx queue should be only re-enabled */
2869                netif_tx_wake_all_queues(bp->dev);
2870                break;
2871
2872        case LOAD_OPEN:
2873                netif_tx_start_all_queues(bp->dev);
2874                cmb();
2875                break;
2876
2877        case LOAD_DIAG:
2878        case LOAD_LOOPBACK_EXT:
2879                bp->state = BNX2X_STATE_DIAG;
2880                break;
2881
2882        default:
2883                break;
2884        }
2885
2886        if (bp->port.pmf)
2887                bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2888        else
2889                bnx2x__link_status_update(bp);
2890
2891        /* start the timer */
2892        set_awaiter_rel(&bp->timer, bp->current_interval * 1000); // fudge
2893        set_alarm(&per_cpu_info[0].tchain, &bp->timer);
2894
2895        if (CNIC_ENABLED(bp))
2896                bnx2x_load_cnic(bp);
2897
2898        if (IS_PF(bp))
2899                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2900
2901        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2902                /* mark driver is loaded in shmem2 */
2903                uint32_t val;
2904                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2905                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2906                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2907                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
2908        }
2909
2910        /* Wait for all pending SP commands to complete */
2911        if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2912                BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2913                bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2914                return -EBUSY;
2915        }
2916
2917        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2918        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2919                bnx2x_dcbx_init(bp, false);
2920
2921        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2922
2923        return 0;
2924
2925#ifndef BNX2X_STOP_ON_ERROR
2926load_error3:
2927        if (IS_PF(bp)) {
2928                bnx2x_int_disable_sync(bp, 1);
2929
2930                /* Clean queueable objects */
2931                bnx2x_squeeze_objects(bp);
2932        }
2933
2934        /* Free SKBs, SGEs, TPA pool and driver internals */
2935        bnx2x_free_skbs(bp);
2936        for_each_rx_queue(bp, i)
2937                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2938
2939        /* Release IRQs */
2940        bnx2x_free_irq(bp);
2941load_error2:
2942        if (IS_PF(bp) && !BP_NOMCP(bp)) {
2943                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2944                bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2945        }
2946
2947        bp->port.pmf = 0;
2948load_error1:
2949        bnx2x_napi_disable(bp);
2950        bnx2x_del_all_napi(bp);
2951
2952        /* clear pf_load status, as it was already set */
2953        if (IS_PF(bp))
2954                bnx2x_clear_pf_load(bp);
2955load_error0:
2956        bnx2x_free_fw_stats_mem(bp);
2957        bnx2x_free_fp_mem(bp);
2958        bnx2x_free_mem(bp);
2959
2960        return rc;
2961#endif /* ! BNX2X_STOP_ON_ERROR */
2962}
2963
2964int bnx2x_drain_tx_queues(struct bnx2x *bp)
2965{
2966        uint8_t rc = 0, cos, i;
2967
2968        /* Wait until tx fastpath tasks complete */
2969        for_each_tx_queue(bp, i) {
2970                struct bnx2x_fastpath *fp = &bp->fp[i];
2971
2972                for_each_cos_in_tx_queue(fp, cos)
2973                        rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2974                if (rc)
2975                        return rc;
2976        }
2977        return 0;
2978}
2979
2980/* must be called with rtnl_lock */
2981int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2982{
2983panic("Not implemented");
2984#if 0 // AKAROS_PORT
2985        int i;
2986        bool global = false;
2987
2988        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2989
2990        /* mark driver is unloaded in shmem2 */
2991        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2992                uint32_t val;
2993                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2994                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2995                          val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2996        }
2997
2998        if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2999            (bp->state == BNX2X_STATE_CLOSED ||
3000             bp->state == BNX2X_STATE_ERROR)) {
3001                /* We can get here if the driver has been unloaded
3002                 * during parity error recovery and is either waiting for a
3003                 * leader to complete or for other functions to unload and
3004                 * then ifdown has been issued. In this case we want to
3005                 * unload and let other functions to complete a recovery
3006                 * process.
3007                 */
3008                bp->recovery_state = BNX2X_RECOVERY_DONE;
3009                bp->is_leader = 0;
3010                bnx2x_release_leader_lock(bp);
3011                mb();
3012
3013                DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3014                BNX2X_ERR("Can't unload in closed or error state\n");
3015                return -EINVAL;
3016        }
3017
3018        /* Nothing to do during unload if previous bnx2x_nic_load()
3019         * have not completed successfully - all resources are released.
3020         *
3021         * we can get here only after unsuccessful ndo_* callback, during which
3022         * dev->IFF_UP flag is still on.
3023         */
3024        if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3025                return 0;
3026
3027        /* It's important to set the bp->state to the value different from
3028         * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3029         * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3030         */
3031        bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3032        mb();
3033
3034        /* indicate to VFs that the PF is going down */
3035        bnx2x_iov_channel_down(bp);
3036
3037        if (CNIC_LOADED(bp))
3038                bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3039
3040        /* Stop Tx */
3041        bnx2x_tx_disable(bp);
3042        netdev_reset_tc(bp->dev);
3043
3044        bp->rx_mode = BNX2X_RX_MODE_NONE;
3045
3046        del_timer_sync(&bp->timer);
3047
3048        if (IS_PF(bp)) {
3049                /* Set ALWAYS_ALIVE bit in shmem */
3050                bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3051                bnx2x_drv_pulse(bp);
3052                bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3053                bnx2x_save_statistics(bp);
3054        }
3055
3056        /* wait till consumers catch up with producers in all queues */
3057        bnx2x_drain_tx_queues(bp);
3058
3059        /* if VF indicate to PF this function is going down (PF will delete sp
3060         * elements and clear initializations
3061         */
3062        if (IS_VF(bp))
3063                bnx2x_vfpf_close_vf(bp);
3064        else if (unload_mode != UNLOAD_RECOVERY)
3065                /* if this is a normal/close unload need to clean up chip*/
3066                bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3067        else {
3068                /* Send the UNLOAD_REQUEST to the MCP */
3069                bnx2x_send_unload_req(bp, unload_mode);
3070
3071                /* Prevent transactions to host from the functions on the
3072                 * engine that doesn't reset global blocks in case of global
3073                 * attention once global blocks are reset and gates are opened
3074                 * (the engine which leader will perform the recovery
3075                 * last).
3076                 */
3077                if (!CHIP_IS_E1x(bp))
3078                        bnx2x_pf_disable(bp);
3079
3080                /* Disable HW interrupts, NAPI */
3081                bnx2x_netif_stop(bp, 1);
3082                /* Delete all NAPI objects */
3083                bnx2x_del_all_napi(bp);
3084                if (CNIC_LOADED(bp))
3085                        bnx2x_del_all_napi_cnic(bp);
3086                /* Release IRQs */
3087                bnx2x_free_irq(bp);
3088
3089                /* Report UNLOAD_DONE to MCP */
3090                bnx2x_send_unload_done(bp, false);
3091        }
3092
3093        /*
3094         * At this stage no more interrupts will arrive so we may safely clean
3095         * the queueable objects here in case they failed to get cleaned so far.
3096         */
3097        if (IS_PF(bp))
3098                bnx2x_squeeze_objects(bp);
3099
3100        /* There should be no more pending SP commands at this stage */
3101        bp->sp_state = 0;
3102
3103        bp->port.pmf = 0;
3104
3105        /* clear pending work in rtnl task */
3106        bp->sp_rtnl_state = 0;
3107        mb();
3108
3109        /* Free SKBs, SGEs, TPA pool and driver internals */
3110        bnx2x_free_skbs(bp);
3111        if (CNIC_LOADED(bp))
3112                bnx2x_free_skbs_cnic(bp);
3113        for_each_rx_queue(bp, i)
3114                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3115
3116        bnx2x_free_fp_mem(bp);
3117        if (CNIC_LOADED(bp))
3118                bnx2x_free_fp_mem_cnic(bp);
3119
3120        if (IS_PF(bp)) {
3121                if (CNIC_LOADED(bp))
3122                        bnx2x_free_mem_cnic(bp);
3123        }
3124        bnx2x_free_mem(bp);
3125
3126        bp->state = BNX2X_STATE_CLOSED;
3127        bp->cnic_loaded = false;
3128
3129        /* Clear driver version indication in shmem */
3130        if (IS_PF(bp))
3131                bnx2x_update_mng_version(bp);
3132
3133        /* Check if there are pending parity attentions. If there are - set
3134         * RECOVERY_IN_PROGRESS.
3135         */
3136        if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3137                bnx2x_set_reset_in_progress(bp);
3138
3139                /* Set RESET_IS_GLOBAL if needed */
3140                if (global)
3141                        bnx2x_set_reset_global(bp);
3142        }
3143
3144        /* The last driver must disable a "close the gate" if there is no
3145         * parity attention or "process kill" pending.
3146         */
3147        if (IS_PF(bp) &&
3148            !bnx2x_clear_pf_load(bp) &&
3149            bnx2x_reset_is_done(bp, BP_PATH(bp)))
3150                bnx2x_disable_close_the_gate(bp);
3151
3152        DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3153
3154        return 0;
3155#endif
3156}
3157
3158#if 0 // AKAROS_PORT
3159int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3160{
3161        uint16_t pmcsr;
3162
3163        /* If there is no power capability, silently succeed */
3164        if (!bp->pdev->pm_cap) {
3165                BNX2X_DEV_INFO("No power capability. Breaking.\n");
3166                return 0;
3167        }
3168
3169        pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3170
3171        switch (state) {
3172        case PCI_D0:
3173                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3174                                      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3175                                       PCI_PM_CTRL_PME_STATUS));
3176
3177                if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3178                        /* delay required during transition out of D3hot */
3179                        kthread_usleep(1000 * 20);
3180                break;
3181
3182        case PCI_D3hot:
3183                /* If there are other clients above don't
3184                   shut down the power */
3185                if (atomic_read(&bp->pdev->enable_cnt) != 1)
3186                        return 0;
3187                /* Don't shut down the power for emulation and FPGA */
3188                if (CHIP_REV_IS_SLOW(bp))
3189                        return 0;
3190
3191                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3192                pmcsr |= 3;
3193
3194                if (bp->wol)
3195                        pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3196
3197                pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3198                                      pmcsr);
3199
3200                /* No more memory access after this point until
3201                * device is brought back to D0.
3202                */
3203                break;
3204
3205        default:
3206                dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3207                return -EINVAL;
3208        }
3209        return 0;
3210}
3211#endif
3212
3213/*
3214 * net_device service functions
3215 */
3216static void bnx2x_poll(uint32_t srcid, long a0, long a1, long a2)
3217{
3218        struct bnx2x_fastpath *fp = (struct bnx2x_fastpath*)a0;
3219        int work_done = 0;
3220        int budget = INT32_MAX; // AKAROS_PORT  comes from napi; just let it run
3221        uint8_t cos;
3222        struct bnx2x *bp = fp->bp;
3223
3224        while (1) {
3225#ifdef BNX2X_STOP_ON_ERROR
3226                if (unlikely(bp->panic)) {
3227                        napi_complete(napi);
3228                        return 0;
3229                }
3230#endif
3231                if (!bnx2x_fp_lock_napi(fp))
3232                        return;
3233
3234                for_each_cos_in_tx_queue(fp, cos)
3235                        if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3236                                bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3237
3238                if (bnx2x_has_rx_work(fp)) {
3239                        work_done += bnx2x_rx_int(fp, budget - work_done);
3240
3241                        /* must not complete if we consumed full budget */
3242                        if (work_done >= budget) {
3243                                bnx2x_fp_unlock_napi(fp);
3244                                break;
3245                        }
3246                }
3247
3248                /* Fall out from the NAPI loop if needed */
3249                if (!bnx2x_fp_unlock_napi(fp) &&
3250                    !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3251
3252                        /* No need to update SB for FCoE L2 ring as long as
3253                         * it's connected to the default SB and the SB
3254                         * has been updated when NAPI was scheduled.
3255                         */
3256                        if (IS_FCOE_FP(fp)) {
3257                                napi_complete(napi);
3258                                break;
3259                        }
3260                        bnx2x_update_fpsb_idx(fp);
3261                        /* bnx2x_has_rx_work() reads the status block,
3262                         * thus we need to ensure that status block indices
3263                         * have been actually read (bnx2x_update_fpsb_idx)
3264                         * prior to this check (bnx2x_has_rx_work) so that
3265                         * we won't write the "newer" value of the status block
3266                         * to IGU (if there was a DMA right after
3267                         * bnx2x_has_rx_work and if there is no rmb, the memory
3268                         * reading (bnx2x_update_fpsb_idx) may be postponed
3269                         * to right before bnx2x_ack_sb). In this case there
3270                         * will never be another interrupt until there is
3271                         * another update of the status block, while there
3272                         * is still unhandled work.
3273                         */
3274                        rmb();
3275
3276                        if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3277                                napi_complete(napi);
3278                                /* Re-enable interrupts */
3279                                DP(NETIF_MSG_RX_STATUS,
3280                                   "Update index to %d\n", fp->fp_hc_idx);
3281                                bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3282                                             le16_to_cpu(fp->fp_hc_idx),
3283                                             IGU_INT_ENABLE, 1);
3284                                break;
3285                        }
3286                }
3287        }
3288}
3289
3290#ifdef CONFIG_NET_RX_BUSY_POLL
3291/* must be called with local_bh_disable()d */
3292int bnx2x_low_latency_recv(struct napi_struct *napi)
3293{
3294        struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3295                                                 napi);
3296        struct bnx2x *bp = fp->bp;
3297        int found = 0;
3298
3299        if ((bp->state == BNX2X_STATE_CLOSED) ||
3300            (bp->state == BNX2X_STATE_ERROR) ||
3301            (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3302                return LL_FLUSH_FAILED;
3303
3304        if (!bnx2x_fp_lock_poll(fp))
3305                return LL_FLUSH_BUSY;
3306
3307        if (bnx2x_has_rx_work(fp))
3308                found = bnx2x_rx_int(fp, 4);
3309
3310        bnx2x_fp_unlock_poll(fp);
3311
3312        return found;
3313}
3314#endif
3315
3316/* we split the first BD into headers and data BDs
3317 * to ease the pain of our fellow microcode engineers
3318 * we use one mapping for both BDs
3319 */
3320static uint16_t bnx2x_tx_split(struct bnx2x *bp,
3321                          struct bnx2x_fp_txdata *txdata,
3322                          struct sw_tx_bd *tx_buf,
3323                          struct eth_tx_start_bd **tx_bd, uint16_t hlen,
3324                          uint16_t bd_prod)
3325{
3326        struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3327        struct eth_tx_bd *d_tx_bd;
3328        dma_addr_t mapping;
3329        int old_len = le16_to_cpu(h_tx_bd->nbytes);
3330
3331        /* first fix first BD */
3332        h_tx_bd->nbytes = cpu_to_le16(hlen);
3333
3334        DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3335           h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3336
3337        /* now get a new data BD
3338         * (after the pbd) and fill it */
3339        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3340        d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3341
3342        mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3343                           le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3344
3345        d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3346        d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3347        d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3348
3349        /* this marks the BD as one that has no individual mapping */
3350        tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3351
3352        DP(NETIF_MSG_TX_QUEUED,
3353           "TSO split data size is %d (%x:%x)\n",
3354           d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3355
3356        /* update tx_bd */
3357        *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3358
3359        return bd_prod;
3360}
3361
3362#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3363#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3364static __le16 bnx2x_csum_fix(unsigned char *t_header, uint16_t csum,
3365                             int8_t fix)
3366{
3367panic("Not implemented");
3368#if 0 // AKAROS_PORT
3369        __sum16 tsum = (__force __sum16) csum;
3370
3371        if (fix > 0)
3372                tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3373                                  csum_partial(t_header - fix, fix, 0)));
3374
3375        else if (fix < 0)
3376                tsum = ~csum_fold(csum_add((__force __wsum) csum,
3377                                  csum_partial(t_header, -fix, 0)));
3378
3379        return bswab16(tsum);
3380#endif
3381}
3382
3383static uint32_t bnx2x_xmit_type(struct bnx2x *bp, struct block *block)
3384{
3385        uint32_t rc;
3386        __u8 prot = 0;
3387        __be16 protocol;
3388
3389        /* TODO: AKAROS_PORT ask for checksums */
3390        return XMIT_PLAIN;
3391
3392#if 0 // AKAROS_PORT
3393        if (skb->ip_summed != CHECKSUM_PARTIAL)
3394                return XMIT_PLAIN;
3395
3396        protocol = vlan_get_protocol(skb);
3397        if (protocol == cpu_to_be16(ETH_P_IPV6)) {
3398                rc = XMIT_CSUM_V6;
3399                prot = ipv6_hdr(skb)->nexthdr;
3400        } else {
3401                rc = XMIT_CSUM_V4;
3402                prot = ip_hdr(skb)->protocol;
3403        }
3404
3405        if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3406                if (inner_ip_hdr(skb)->version == 6) {
3407                        rc |= XMIT_CSUM_ENC_V6;
3408                        if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3409                                rc |= XMIT_CSUM_TCP;
3410                } else {
3411                        rc |= XMIT_CSUM_ENC_V4;
3412                        if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3413                                rc |= XMIT_CSUM_TCP;
3414                }
3415        }
3416        if (prot == IPPROTO_TCP)
3417                rc |= XMIT_CSUM_TCP;
3418
3419        if (skb_is_gso(skb)) {
3420                if (skb_is_gso_v6(skb)) {
3421                        rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3422                        if (rc & XMIT_CSUM_ENC)
3423                                rc |= XMIT_GSO_ENC_V6;
3424                } else {
3425                        rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3426                        if (rc & XMIT_CSUM_ENC)
3427                                rc |= XMIT_GSO_ENC_V4;
3428                }
3429        }
3430
3431        return rc;
3432#endif
3433}
3434
3435#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3436/* check if packet requires linearization (packet is too fragmented)
3437   no need to check fragmentation if page size > 8K (there will be no
3438   violation to FW restrictions) */
3439static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3440                             uint32_t xmit_type)
3441{
3442panic("Not implemented");
3443#if 0 // AKAROS_PORT
3444        int to_copy = 0;
3445        int hlen = 0;
3446        int first_bd_sz = 0;
3447
3448        /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3449        if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3450
3451                if (xmit_type & XMIT_GSO) {
3452                        unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3453                        /* Check if LSO packet needs to be copied:
3454                           3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3455                        int wnd_size = MAX_FETCH_BD - 3;
3456                        /* Number of windows to check */
3457                        int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3458                        int wnd_idx = 0;
3459                        int frag_idx = 0;
3460                        uint32_t wnd_sum = 0;
3461
3462                        /* Headers length */
3463                        hlen = (int)(skb_transport_header(skb) - skb->data) +
3464                                tcp_hdrlen(skb);
3465
3466                        /* Amount of data (w/o headers) on linear part of SKB*/
3467                        first_bd_sz = skb_headlen(skb) - hlen;
3468
3469                        wnd_sum  = first_bd_sz;
3470
3471                        /* Calculate the first sum - it's special */
3472                        for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3473                                wnd_sum +=
3474                                        skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3475
3476                        /* If there was data on linear skb data - check it */
3477                        if (first_bd_sz > 0) {
3478                                if (unlikely(wnd_sum < lso_mss)) {
3479                                        to_copy = 1;
3480                                        goto exit_lbl;
3481                                }
3482
3483                                wnd_sum -= first_bd_sz;
3484                        }
3485
3486                        /* Others are easier: run through the frag list and
3487                           check all windows */
3488                        for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3489                                wnd_sum +=
3490                          skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3491
3492                                if (unlikely(wnd_sum < lso_mss)) {
3493                                        to_copy = 1;
3494                                        break;
3495                                }
3496                                wnd_sum -=
3497                                        skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3498                        }
3499                } else {
3500                        /* in non-LSO too fragmented packet should always
3501                           be linearized */
3502                        to_copy = 1;
3503                }
3504        }
3505
3506exit_lbl:
3507        if (unlikely(to_copy))
3508                DP(NETIF_MSG_TX_QUEUED,
3509                   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3510                   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3511                   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3512
3513        return to_copy;
3514#endif
3515}
3516#endif
3517
3518/**
3519 * bnx2x_set_pbd_gso - update PBD in GSO case.
3520 *
3521 * @skb:        packet skb
3522 * @pbd:        parse BD
3523 * @xmit_type:  xmit flags
3524 */
3525static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3526                              struct eth_tx_parse_bd_e1x *pbd,
3527                              uint32_t xmit_type)
3528{
3529panic("Not implemented");
3530#if 0 // AKAROS_PORT
3531        pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3532        pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3533        pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3534
3535        if (xmit_type & XMIT_GSO_V4) {
3536                pbd->ip_id = bswab16(ip_hdr(skb)->id);
3537                pbd->tcp_pseudo_csum =
3538                        bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3539                                                   ip_hdr(skb)->daddr,
3540                                                   0, IPPROTO_TCP, 0));
3541        } else {
3542                pbd->tcp_pseudo_csum =
3543                        bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3544                                                 &ipv6_hdr(skb)->daddr,
3545                                                 0, IPPROTO_TCP, 0));
3546        }
3547
3548        pbd->global_data |=
3549                cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3550#endif
3551}
3552
3553/**
3554 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3555 *
3556 * @bp:                 driver handle
3557 * @skb:                packet skb
3558 * @parsing_data:       data to be updated
3559 * @xmit_type:          xmit flags
3560 *
3561 * 57712/578xx related, when skb has encapsulation
3562 */
3563static uint8_t bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3564                                 uint32_t *parsing_data, uint32_t xmit_type)
3565{
3566panic("Not implemented");
3567#if 0 // AKAROS_PORT
3568        *parsing_data |=
3569                ((((uint8_t *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3570                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3571                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3572
3573        if (xmit_type & XMIT_CSUM_TCP) {
3574                *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3575                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3576                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3577
3578                return skb_inner_transport_header(skb) +
3579                        inner_tcp_hdrlen(skb) - skb->data;
3580        }
3581
3582        /* We support checksum offload for TCP and UDP only.
3583         * No need to pass the UDP header length - it's a constant.
3584         */
3585        return skb_inner_transport_header(skb) +
3586                sizeof(struct udphdr) - skb->data;
3587#endif
3588}
3589
3590/**
3591 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3592 *
3593 * @bp:                 driver handle
3594 * @skb:                packet skb
3595 * @parsing_data:       data to be updated
3596 * @xmit_type:          xmit flags
3597 *
3598 * 57712/578xx related
3599 */
3600static uint8_t bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3601                                uint32_t *parsing_data, uint32_t xmit_type)
3602{
3603panic("Not implemented");
3604#if 0 // AKAROS_PORT
3605        *parsing_data |=
3606                ((((uint8_t *)skb_transport_header(skb) - skb->data) >> 1) <<
3607                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3608                ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3609
3610        if (xmit_type & XMIT_CSUM_TCP) {
3611                *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3612                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3613                        ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3614
3615                return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3616        }
3617        /* We support checksum offload for TCP and UDP only.
3618         * No need to pass the UDP header length - it's a constant.
3619         */
3620        return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3621#endif
3622}
3623
3624/* set FW indication according to inner or outer protocols if tunneled */
3625static void bnx2x_set_sbd_csum(struct bnx2x *bp, void *ignored_skb,
3626                               struct eth_tx_start_bd *tx_start_bd,
3627                               uint32_t xmit_type)
3628{
3629        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3630
3631        if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3632                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3633
3634        if (!(xmit_type & XMIT_CSUM_TCP))
3635                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3636}
3637
3638/**
3639 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3640 *
3641 * @bp:         driver handle
3642 * @skb:        packet skb
3643 * @pbd:        parse BD to be updated
3644 * @xmit_type:  xmit flags
3645 */
3646static uint8_t bnx2x_set_pbd_csum(struct bnx2x *bp, struct block *block,
3647                             struct eth_tx_parse_bd_e1x *pbd,
3648                             uint32_t xmit_type)
3649{
3650panic("Not implemented");
3651#if 0 // AKAROS_PORT
3652        uint8_t hlen = (skb_network_header(skb) - skb->data) >> 1;
3653
3654        /* for now NS flag is not used in Linux */
3655        pbd->global_data =
3656                cpu_to_le16(hlen |
3657                            ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3658                             ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3659
3660        pbd->ip_hlen_w = (skb_transport_header(skb) -
3661                        skb_network_header(skb)) >> 1;
3662
3663        hlen += pbd->ip_hlen_w;
3664
3665        /* We support checksum offload for TCP and UDP only */
3666        if (xmit_type & XMIT_CSUM_TCP)
3667                hlen += tcp_hdrlen(skb) / 2;
3668        else
3669                hlen += sizeof(struct udphdr) / 2;
3670
3671        pbd->total_hlen_w = cpu_to_le16(hlen);
3672        hlen = hlen*2;
3673
3674        if (xmit_type & XMIT_CSUM_TCP) {
3675                pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3676
3677        } else {
3678                int8_t fix = SKB_CS_OFF(skb); /* signed! */
3679
3680                DP(NETIF_MSG_TX_QUEUED,
3681                   "hlen %d  fix %d  csum before fix %x\n",
3682                   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3683
3684                /* HW bug: fixup the CSUM */
3685                pbd->tcp_pseudo_csum =
3686                        bnx2x_csum_fix(skb_transport_header(skb),
3687                                       SKB_CS(skb), fix);
3688
3689                DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3690                   pbd->tcp_pseudo_csum);
3691        }
3692
3693        return hlen;
3694#endif
3695}
3696
3697static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3698                                      struct eth_tx_parse_bd_e2 *pbd_e2,
3699                                      struct eth_tx_parse_2nd_bd *pbd2,
3700                                      uint16_t *global_data,
3701                                      uint32_t xmit_type)
3702{
3703panic("Not implemented");
3704#if 0 // AKAROS_PORT
3705        uint16_t hlen_w = 0;
3706        uint8_t outerip_off, outerip_len = 0;
3707
3708        /* from outer IP to transport */
3709        hlen_w = (skb_inner_transport_header(skb) -
3710                  skb_network_header(skb)) >> 1;
3711
3712        /* transport len */
3713        hlen_w += inner_tcp_hdrlen(skb) >> 1;
3714
3715        pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3716
3717        /* outer IP header info */
3718        if (xmit_type & XMIT_CSUM_V4) {
3719                struct iphdr *iph = ip_hdr(skb);
3720                uint32_t csum = (__force uint32_t)(~iph->check) -
3721                           (__force uint32_t)iph->tot_len -
3722                           (__force uint32_t)iph->frag_off;
3723
3724                outerip_len = iph->ihl << 1;
3725
3726                pbd2->fw_ip_csum_wo_len_flags_frag =
3727                        bswab16(csum_fold((__force __wsum)csum));
3728        } else {
3729                pbd2->fw_ip_hdr_to_payload_w =
3730                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3731                pbd_e2->data.tunnel_data.flags |=
3732                        ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
3733        }
3734
3735        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3736
3737        pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3738
3739        /* inner IP header info */
3740        if (xmit_type & XMIT_CSUM_ENC_V4) {
3741                pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3742
3743                pbd_e2->data.tunnel_data.pseudo_csum =
3744                        bswab16(~csum_tcpudp_magic(
3745                                        inner_ip_hdr(skb)->saddr,
3746                                        inner_ip_hdr(skb)->daddr,
3747                                        0, IPPROTO_TCP, 0));
3748        } else {
3749                pbd_e2->data.tunnel_data.pseudo_csum =
3750                        bswab16(~csum_ipv6_magic(
3751                                        &inner_ipv6_hdr(skb)->saddr,
3752                                        &inner_ipv6_hdr(skb)->daddr,
3753                                        0, IPPROTO_TCP, 0));
3754        }
3755
3756        outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3757
3758        *global_data |=
3759                outerip_off |
3760                (outerip_len <<
3761                        ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3762                ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3763                        ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3764
3765        if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3766                SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3767                pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3768        }
3769#endif
3770}
3771
3772static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb,
3773                                         uint32_t *parsing_data,
3774                                         uint32_t xmit_type)
3775{
3776panic("Not implemented");
3777#if 0 // AKAROS_PORT
3778        struct ipv6hdr *ipv6;
3779
3780        if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3781                return;
3782
3783        if (xmit_type & XMIT_GSO_ENC_V6)
3784                ipv6 = inner_ipv6_hdr(skb);
3785        else /* XMIT_GSO_V6 */
3786                ipv6 = ipv6_hdr(skb);
3787
3788        if (ipv6->nexthdr == NEXTHDR_IPV6)
3789                *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3790#endif
3791}
3792
3793/* called with netif_tx_lock
3794 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3795 * netif_wake_queue()
3796 */
3797netdev_tx_t bnx2x_start_xmit(struct block *block,
3798                             struct bnx2x_fp_txdata *txdata)
3799{
3800        struct bnx2x *bp = txdata->parent_fp->bp;
3801
3802        struct sw_tx_bd *tx_buf;
3803        struct eth_tx_start_bd *tx_start_bd, *first_bd;
3804        struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3805        struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3806        struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3807        struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3808        uint32_t pbd_e2_parsing_data = 0;
3809        uint16_t pkt_prod, bd_prod;
3810        int nbd, txq_index;
3811        dma_addr_t mapping;
3812        uint32_t xmit_type = bnx2x_xmit_type(bp, block);
3813        int i;
3814        uint8_t hlen = 0;
3815        __le16 pkt_size = 0;
3816        struct etherpkt *eth;
3817        uint8_t mac_type = UNICAST_ADDRESS;
3818
3819#ifdef BNX2X_STOP_ON_ERROR
3820        if (unlikely(bp->panic))
3821                return NETDEV_TX_BUSY;
3822#endif
3823
3824        txq_index = txdata->txq_index;
3825        assert(txq_index == 0); // AKAROS_PORT til we get multi-queue working
3826        assert(txdata == &bp->bnx2x_txq[txq_index]);
3827
3828        assert(!(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)));
3829
3830
3831        /* enable this debug print to view the transmission queue being used
3832        DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3833           txq_index, fp_index, txdata_index); */
3834
3835        /* enable this debug print to view the transmission details
3836        DP(NETIF_MSG_TX_QUEUED,
3837           "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3838           txdata->cid, fp_index, txdata_index, txdata, fp); */
3839
3840        if (unlikely(bnx2x_tx_avail(bp, txdata) <
3841#if 0 // AKAROS_PORT TODO: block extra
3842                        skb_shinfo(skb)->nr_frags +
3843#else
3844                        1 +
3845#endif
3846                        BDS_PER_TX_PKT +
3847                        NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3848                /* Handle special storage cases separately */
3849                if (txdata->tx_ring_size == 0) {
3850                        struct bnx2x_eth_q_stats *q_stats =
3851                                bnx2x_fp_qstats(bp, txdata->parent_fp);
3852                        q_stats->driver_filtered_tx_pkt++;
3853                        freeb(block);
3854                        return NETDEV_TX_OK;
3855                }
3856                bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3857                netif_tx_stop_queue(txq);
3858                BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3859
3860                return NETDEV_TX_BUSY;
3861        }
3862
3863#if 0 // AKAROS_PORT
3864        DP(NETIF_MSG_TX_QUEUED,
3865           "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3866           txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3867           ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3868           skb->len);
3869#endif
3870
3871        eth = (struct etherpkt *)block->rp;
3872
3873        /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3874        if (unlikely(is_multicast_ether_addr(eth->d))) {
3875                if (eaddrcmp(eth->d, bp->edev->bcast))
3876                        mac_type = BROADCAST_ADDRESS;
3877                else
3878                        mac_type = MULTICAST_ADDRESS;
3879        }
3880
3881#if 0 // AKAROS_PORT TODO block extra
3882#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3883        /* First, check if we need to linearize the skb (due to FW
3884           restrictions). No need to check fragmentation if page size > 8K
3885           (there will be no violation to FW restrictions) */
3886        if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3887                /* Statistics of linearization */
3888                bp->lin_cnt++;
3889                if (skb_linearize(skb) != 0) {
3890                        DP(NETIF_MSG_TX_QUEUED,
3891                           "SKB linearization failed - silently dropping this SKB\n");
3892                        dev_kfree_skb_any(skb);
3893                        return NETDEV_TX_OK;
3894                }
3895        }
3896#endif
3897#endif
3898        /* Map skb linear data for DMA */
3899        mapping = dma_map_single(&bp->pdev->dev, block->rp,
3900                                 BLEN(block), DMA_TO_DEVICE);
3901        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3902                DP(NETIF_MSG_TX_QUEUED,
3903                   "SKB mapping failed - silently dropping this SKB\n");
3904                freeb(block);
3905                return NETDEV_TX_OK;
3906        }
3907        /*
3908        Please read carefully. First we use one BD which we mark as start,
3909        then we have a parsing info BD (used for TSO or xsum),
3910        and only then we have the rest of the TSO BDs.
3911        (don't forget to mark the last one as last,
3912        and to unmap only AFTER you write to the BD ...)
3913        And above all, all pdb sizes are in words - NOT DWORDS!
3914        */
3915
3916        /* get current pkt produced now - advance it just before sending packet
3917         * since mapping of pages may fail and cause packet to be dropped
3918         */
3919        pkt_prod = txdata->tx_pkt_prod;
3920        bd_prod = TX_BD(txdata->tx_bd_prod);
3921
3922        /* get a tx_buf and first BD
3923         * tx_start_bd may be changed during SPLIT,
3924         * but first_bd will always stay first
3925         */
3926        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3927        tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3928        first_bd = tx_start_bd;
3929
3930        tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3931
3932        /* header nbd: indirectly zero other flags! */
3933        tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3934
3935        /* remember the first BD of the packet */
3936        tx_buf->first_bd = txdata->tx_bd_prod;
3937        tx_buf->block = block;
3938        tx_buf->flags = 0;
3939
3940        DP(NETIF_MSG_TX_QUEUED,
3941           "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3942           pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3943
3944#if 0 // AKAROS_PORT skipping vlan stuff
3945        if (vlan_tx_tag_present(skb)) {
3946                tx_start_bd->vlan_or_ethertype =
3947                    cpu_to_le16(vlan_tx_tag_get(skb));
3948                tx_start_bd->bd_flags.as_bitfield |=
3949                    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3950        } else {
3951#else
3952        {
3953#endif
3954                /* when transmitting in a vf, start bd must hold the ethertype
3955                 * for fw to enforce it
3956                 */
3957                // AKAROS_PORT
3958                uint16_t type_le16 = (eth->type[0] << 8) | eth->type[1];
3959#ifndef BNX2X_STOP_ON_ERROR
3960                if (IS_VF(bp))
3961#endif
3962                        tx_start_bd->vlan_or_ethertype =
3963                                cpu_to_le16(type_le16);
3964#ifndef BNX2X_STOP_ON_ERROR
3965                else
3966                        /* used by FW for packet accounting */
3967                        tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3968#endif
3969        }
3970
3971        nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3972
3973        /* turn on parsing and get a BD */
3974        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3975
3976        if (xmit_type & XMIT_CSUM)
3977                bnx2x_set_sbd_csum(bp, block, tx_start_bd, xmit_type);
3978
3979        if (!CHIP_IS_E1x(bp)) {
3980                panic("Not implemented");
3981                #if 0 // AKAROS_PORT
3982                pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3983                memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3984
3985                if (xmit_type & XMIT_CSUM_ENC) {
3986                        uint16_t global_data = 0;
3987
3988                        /* Set PBD in enc checksum offload case */
3989                        hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3990                                                      &pbd_e2_parsing_data,
3991                                                      xmit_type);
3992
3993                        /* turn on 2nd parsing and get a BD */
3994                        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3995
3996                        pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3997
3998                        memset(pbd2, 0, sizeof(*pbd2));
3999
4000                        pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
4001                                (skb_inner_network_header(skb) -
4002                                 skb->data) >> 1;
4003
4004                        if (xmit_type & XMIT_GSO_ENC)
4005                                bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
4006                                                          &global_data,
4007                                                          xmit_type);
4008
4009                        pbd2->global_data = cpu_to_le16(global_data);
4010
4011                        /* add addition parse BD indication to start BD */
4012                        SET_FLAG(tx_start_bd->general_data,
4013                                 ETH_TX_START_BD_PARSE_NBDS, 1);
4014                        /* set encapsulation flag in start BD */
4015                        SET_FLAG(tx_start_bd->general_data,
4016                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
4017
4018                        tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
4019
4020                        nbd++;
4021                } else if (xmit_type & XMIT_CSUM) {
4022                        /* Set PBD in checksum offload case w/o encapsulation */
4023                        hlen = bnx2x_set_pbd_csum_e2(bp, skb,
4024                                                     &pbd_e2_parsing_data,
4025                                                     xmit_type);
4026                }
4027
4028                bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
4029                /* Add the macs to the parsing BD if this is a vf or if
4030                 * Tx Switching is enabled.
4031                 */
4032                if (IS_VF(bp)) {
4033                        /* override GRE parameters in BD */
4034                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4035                                              &pbd_e2->data.mac_addr.src_mid,
4036                                              &pbd_e2->data.mac_addr.src_lo,
4037                                              eth->h_source);
4038
4039                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4040                                              &pbd_e2->data.mac_addr.dst_mid,
4041                                              &pbd_e2->data.mac_addr.dst_lo,
4042                                              eth->h_dest);
4043                } else {
4044                        if (bp->flags & TX_SWITCHING)
4045                                bnx2x_set_fw_mac_addr(
4046                                                &pbd_e2->data.mac_addr.dst_hi,
4047                                                &pbd_e2->data.mac_addr.dst_mid,
4048                                                &pbd_e2->data.mac_addr.dst_lo,
4049                                                eth->h_dest);
4050#ifdef BNX2X_STOP_ON_ERROR
4051                        /* Enforce security is always set in Stop on Error -
4052                         * source mac should be present in the parsing BD
4053                         */
4054                        bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4055                                              &pbd_e2->data.mac_addr.src_mid,
4056                                              &pbd_e2->data.mac_addr.src_lo,
4057                                              eth->h_source);
4058#endif
4059                }
4060
4061                SET_FLAG(pbd_e2_parsing_data,
4062                         ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4063                #endif
4064        } else {
4065                uint16_t global_data = 0;
4066                pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4067                memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4068                /* Set PBD in checksum offload case */
4069                if (xmit_type & XMIT_CSUM) {
4070                        panic("Not implemented");
4071                        #if 0 // AKAROS_PORT (xsum offload)
4072                        hlen = bnx2x_set_pbd_csum(bp, block, pbd_e1x, xmit_type);
4073                        #endif
4074                }
4075
4076                SET_FLAG(global_data,
4077                         ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4078                pbd_e1x->global_data |= cpu_to_le16(global_data);
4079        }
4080
4081        /* Setup the data pointer of the first BD of the packet */
4082        tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4083        tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4084        tx_start_bd->nbytes = cpu_to_le16(BLEN(block));
4085        pkt_size = tx_start_bd->nbytes;
4086
4087        DP(NETIF_MSG_TX_QUEUED,
4088           "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4089           tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4090           le16_to_cpu(tx_start_bd->nbytes),
4091           tx_start_bd->bd_flags.as_bitfield,
4092           le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4093
4094        if (xmit_type & XMIT_GSO) {
4095                panic("Not implemented");
4096                #if 0 // AKAROS_PORT
4097
4098                DP(NETIF_MSG_TX_QUEUED,
4099                   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4100                   skb->len, hlen, skb_headlen(skb),
4101                   skb_shinfo(skb)->gso_size);
4102
4103                tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4104
4105                if (unlikely(skb_headlen(skb) > hlen)) {
4106                        nbd++;
4107                        bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4108                                                 &tx_start_bd, hlen,
4109                                                 bd_prod);
4110                }
4111                if (!CHIP_IS_E1x(bp))
4112                        pbd_e2_parsing_data |=
4113                                (skb_shinfo(skb)->gso_size <<
4114                                 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4115                                 ETH_TX_PARSE_BD_E2_LSO_MSS;
4116                else
4117                        bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4118                #endif
4119        }
4120
4121        /* Set the PBD's parsing_data field if not zero
4122         * (for the chips newer than 57711).
4123         */
4124        if (pbd_e2_parsing_data)
4125                pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4126
4127        tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4128
4129#if 0 // AKAROS_PORT TODO block extra
4130        /* Handle fragmented skb */
4131        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4132                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4133
4134                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4135                                           skb_frag_size(frag), DMA_TO_DEVICE);
4136                if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4137                        unsigned int pkts_compl = 0, bytes_compl = 0;
4138
4139                        DP(NETIF_MSG_TX_QUEUED,
4140                           "Unable to map page - dropping packet...\n");
4141
4142                        /* we need unmap all buffers already mapped
4143                         * for this SKB;
4144                         * first_bd->nbd need to be properly updated
4145                         * before call to bnx2x_free_tx_pkt
4146                         */
4147                        first_bd->nbd = cpu_to_le16(nbd);
4148                        bnx2x_free_tx_pkt(bp, txdata,
4149                                          TX_BD(txdata->tx_pkt_prod),
4150                                          &pkts_compl, &bytes_compl);
4151                        return NETDEV_TX_OK;
4152                }
4153
4154                bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4155                tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4156                if (total_pkt_bd == NULL)
4157                        total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4158
4159                tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4160                tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4161                tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4162                le16_add_cpu(&pkt_size, skb_frag_size(frag));
4163                nbd++;
4164
4165                DP(NETIF_MSG_TX_QUEUED,
4166                   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4167                   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4168                   le16_to_cpu(tx_data_bd->nbytes));
4169        }
4170#endif
4171
4172        DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4173
4174        /* update with actual num BDs */
4175        first_bd->nbd = cpu_to_le16(nbd);
4176
4177        bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4178
4179        /* now send a tx doorbell, counting the next BD
4180         * if the packet contains or ends with it
4181         */
4182        if (TX_BD_POFF(bd_prod) < nbd)
4183                nbd++;
4184
4185        /* total_pkt_bytes should be set on the first data BD if
4186         * it's not an LSO packet and there is more than one
4187         * data BD. In this case pkt_size is limited by an MTU value.
4188         * However we prefer to set it for an LSO packet (while we don't
4189         * have to) in order to save some CPU cycles in a none-LSO
4190         * case, when we much more care about them.
4191         */
4192        if (total_pkt_bd != NULL)
4193                total_pkt_bd->total_pkt_bytes = pkt_size;
4194
4195        if (pbd_e1x)
4196                DP(NETIF_MSG_TX_QUEUED,
4197                   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4198                   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4199                   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4200                   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4201                    le16_to_cpu(pbd_e1x->total_hlen_w));
4202        if (pbd_e2)
4203                DP(NETIF_MSG_TX_QUEUED,
4204                   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4205                   pbd_e2,
4206                   pbd_e2->data.mac_addr.dst_hi,
4207                   pbd_e2->data.mac_addr.dst_mid,
4208                   pbd_e2->data.mac_addr.dst_lo,
4209                   pbd_e2->data.mac_addr.src_hi,
4210                   pbd_e2->data.mac_addr.src_mid,
4211                   pbd_e2->data.mac_addr.src_lo,
4212                   pbd_e2->parsing_data);
4213        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4214
4215        netdev_tx_sent_queue(txq, skb->len);
4216
4217        skb_tx_timestamp(skb);
4218
4219        txdata->tx_pkt_prod++;
4220        /*
4221         * Make sure that the BD data is updated before updating the producer
4222         * since FW might read the BD right after the producer is updated.
4223         * This is only applicable for weak-ordered memory model archs such
4224         * as IA-64. The following barrier is also mandatory since FW will
4225         * assumes packets must have BDs.
4226         */
4227        wmb();
4228
4229        txdata->tx_db.data.prod += nbd;
4230        cmb();
4231
4232        DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4233
4234        bus_wmb();
4235
4236        txdata->tx_bd_prod += nbd;
4237
4238        txdata->tx_pkt++;
4239
4240        return NETDEV_TX_OK;
4241}
4242
4243/**
4244 * bnx2x_setup_tc - routine to configure net_device for multi tc
4245 *
4246 * @netdev: net device to configure
4247 * @tc: number of traffic classes to enable
4248 *
4249 * callback connected to the ndo_setup_tc function pointer
4250 */
4251int bnx2x_setup_tc(struct ether *dev, uint8_t num_tc)
4252{
4253        /* XME skipping traffic classes */
4254        return 0;
4255#if 0 // AKAROS_PORT
4256        int cos, prio, count, offset;
4257        struct bnx2x *bp = netdev_priv(dev);
4258
4259        /* setup tc must be called under rtnl lock */
4260        ASSERT_RTNL();
4261
4262        /* no traffic classes requested. Aborting */
4263        if (!num_tc) {
4264                netdev_reset_tc(dev);
4265                return 0;
4266        }
4267
4268        /* requested to support too many traffic classes */
4269        if (num_tc > bp->max_cos) {
4270                BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4271                          num_tc, bp->max_cos);
4272                return -EINVAL;
4273        }
4274
4275        /* declare amount of supported traffic classes */
4276        if (netdev_set_num_tc(dev, num_tc)) {
4277                BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4278                return -EINVAL;
4279        }
4280
4281        /* configure priority to traffic class mapping */
4282        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4283                netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4284                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4285                   "mapping priority %d to tc %d\n",
4286                   prio, bp->prio_to_cos[prio]);
4287        }
4288
4289        /* Use this configuration to differentiate tc0 from other COSes
4290           This can be used for ets or pfc, and save the effort of setting
4291           up a multio class queue disc or negotiating DCBX with a switch
4292        netdev_set_prio_tc_map(dev, 0, 0);
4293        DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4294        for (prio = 1; prio < 16; prio++) {
4295                netdev_set_prio_tc_map(dev, prio, 1);
4296                DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4297        } */
4298
4299        /* configure traffic class to transmission queue mapping */
4300        for (cos = 0; cos < bp->max_cos; cos++) {
4301                count = BNX2X_NUM_ETH_QUEUES(bp);
4302                offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4303                netdev_set_tc_queue(dev, cos, count, offset);
4304                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4305                   "mapping tc %d to offset %d count %d\n",
4306                   cos, offset, count);
4307        }
4308
4309        return 0;
4310#endif
4311}
4312
4313/* called with rtnl_lock */
4314int bnx2x_change_mac_addr(struct ether *dev, void *p)
4315{
4316panic("Not implemented");
4317#if 0 // AKAROS_PORT
4318        struct sockaddr *addr = p;
4319        struct bnx2x *bp = netdev_priv(dev);
4320        int rc = 0;
4321
4322        if (!is_valid_ether_addr(addr->sa_data)) {
4323                BNX2X_ERR("Requested MAC address is not valid\n");
4324                return -EINVAL;
4325        }
4326
4327        if (IS_MF_STORAGE_ONLY(bp)) {
4328                BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4329                return -EINVAL;
4330        }
4331
4332        if (netif_running(dev))  {
4333                rc = bnx2x_set_eth_mac(bp, false);
4334                if (rc)
4335                        return rc;
4336        }
4337
4338        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4339
4340        if (netif_running(dev))
4341                rc = bnx2x_set_eth_mac(bp, true);
4342
4343        return rc;
4344#endif
4345}
4346
4347static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4348{
4349        union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4350        struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4351        uint8_t cos;
4352
4353        /* Common */
4354
4355        if (IS_FCOE_IDX(fp_index)) {
4356                memset(sb, 0, sizeof(union host_hc_status_block));
4357                fp->status_blk_mapping = 0;
4358        } else {
4359                /* status blocks */
4360                if (!CHIP_IS_E1x(bp))
4361                        BNX2X_PCI_FREE(sb->e2_sb,
4362                                       bnx2x_fp(bp, fp_index,
4363                                                status_blk_mapping),
4364                                       sizeof(struct host_hc_status_block_e2));
4365                else
4366                        BNX2X_PCI_FREE(sb->e1x_sb,
4367                                       bnx2x_fp(bp, fp_index,
4368                                                status_blk_mapping),
4369                                       sizeof(struct host_hc_status_block_e1x));
4370        }
4371
4372        /* Rx */
4373        if (!skip_rx_queue(bp, fp_index)) {
4374                bnx2x_free_rx_bds(fp);
4375
4376                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4377                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4378                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4379                               bnx2x_fp(bp, fp_index, rx_desc_mapping),
4380                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
4381
4382                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4383                               bnx2x_fp(bp, fp_index, rx_comp_mapping),
4384                               sizeof(struct eth_fast_path_rx_cqe) *
4385                               NUM_RCQ_BD);
4386
4387                /* SGE ring */
4388                BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4389                BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4390                               bnx2x_fp(bp, fp_index, rx_sge_mapping),
4391                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4392        }
4393
4394        /* Tx */
4395        if (!skip_tx_queue(bp, fp_index)) {
4396                /* fastpath tx rings: tx_buf tx_desc */
4397                for_each_cos_in_tx_queue(fp, cos) {
4398                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4399
4400                        DP(NETIF_MSG_IFDOWN,
4401                           "freeing tx memory of fp %d cos %d cid %d\n",
4402                           fp_index, cos, txdata->cid);
4403
4404                        BNX2X_FREE(txdata->tx_buf_ring);
4405                        BNX2X_PCI_FREE(txdata->tx_desc_ring,
4406                                txdata->tx_desc_mapping,
4407                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4408                }
4409        }
4410        /* end of fastpath */
4411}
4412
4413static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4414{
4415        int i;
4416        for_each_cnic_queue(bp, i)
4417                bnx2x_free_fp_mem_at(bp, i);
4418}
4419
4420void bnx2x_free_fp_mem(struct bnx2x *bp)
4421{
4422        int i;
4423        for_each_eth_queue(bp, i)
4424                bnx2x_free_fp_mem_at(bp, i);
4425}
4426
4427static void set_sb_shortcuts(struct bnx2x *bp, int index)
4428{
4429        union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4430        if (!CHIP_IS_E1x(bp)) {
4431                bnx2x_fp(bp, index, sb_index_values) =
4432                        (__le16 *)status_blk.e2_sb->sb.index_values;
4433                bnx2x_fp(bp, index, sb_running_index) =
4434                        (__le16 *)status_blk.e2_sb->sb.running_index;
4435        } else {
4436                bnx2x_fp(bp, index, sb_index_values) =
4437                        (__le16 *)status_blk.e1x_sb->sb.index_values;
4438                bnx2x_fp(bp, index, sb_running_index) =
4439                        (__le16 *)status_blk.e1x_sb->sb.running_index;
4440        }
4441}
4442
4443/* Returns the number of actually allocated BDs */
4444static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4445                              int rx_ring_size)
4446{
4447        struct bnx2x *bp = fp->bp;
4448        uint16_t ring_prod, cqe_ring_prod;
4449        int i, failure_cnt = 0;
4450
4451        fp->rx_comp_cons = 0;
4452        cqe_ring_prod = ring_prod = 0;
4453
4454        /* This routine is called only during fo init so
4455         * fp->eth_q_stats.rx_skb_alloc_failed = 0
4456         */
4457        for (i = 0; i < rx_ring_size; i++) {
4458                if (bnx2x_alloc_rx_data(bp, fp, ring_prod, MEM_WAIT) < 0) {
4459                        failure_cnt++;
4460                        continue;
4461                }
4462                ring_prod = NEXT_RX_IDX(ring_prod);
4463                cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4464                warn_on(ring_prod <= (i - failure_cnt));
4465        }
4466
4467        if (failure_cnt)
4468                BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4469                          i - failure_cnt, fp->index);
4470
4471        fp->rx_bd_prod = ring_prod;
4472        /* Limit the CQE producer by the CQE ring size */
4473        fp->rx_comp_prod = MIN_T(uint16_t, NUM_RCQ_RINGS * RCQ_DESC_CNT,
4474                                 cqe_ring_prod);
4475        fp->rx_pkt = fp->rx_calls = 0;
4476
4477        bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4478
4479        return i - failure_cnt;
4480}
4481
4482static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4483{
4484        int i;
4485
4486        for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4487                struct eth_rx_cqe_next_page *nextpg;
4488
4489                nextpg = (struct eth_rx_cqe_next_page *)
4490                        &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4491                nextpg->addr_hi =
4492                        cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4493                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4494                nextpg->addr_lo =
4495                        cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4496                                   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4497        }
4498}
4499
4500static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4501{
4502        union host_hc_status_block *sb;
4503        struct bnx2x_fastpath *fp = &bp->fp[index];
4504        int ring_size = 0;
4505        uint8_t cos;
4506        int rx_ring_size = 0;
4507
4508        if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4509                rx_ring_size = MIN_RX_SIZE_NONTPA;
4510                bp->rx_ring_size = rx_ring_size;
4511        } else if (!bp->rx_ring_size) {
4512                rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4513
4514                if (CHIP_IS_E3(bp)) {
4515                        uint32_t cfg = SHMEM_RD(bp,
4516                                           dev_info.port_hw_config[BP_PORT(bp)].
4517                                           default_cfg);
4518
4519                        /* Decrease ring size for 1G functions */
4520                        if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4521                            PORT_HW_CFG_NET_SERDES_IF_SGMII)
4522                                rx_ring_size /= 10;
4523                }
4524
4525                /* allocate at least number of buffers required by FW */
4526                rx_ring_size = MAX_T(int,
4527                                     bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA,
4528                                     rx_ring_size);
4529
4530                bp->rx_ring_size = rx_ring_size;
4531        } else /* if rx_ring_size specified - use it */
4532                rx_ring_size = bp->rx_ring_size;
4533
4534        DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4535
4536        /* Common */
4537        sb = &bnx2x_fp(bp, index, status_blk);
4538
4539        if (!IS_FCOE_IDX(index)) {
4540                /* status blocks */
4541                if (!CHIP_IS_E1x(bp)) {
4542                        sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4543                                                    sizeof(struct host_hc_status_block_e2));
4544                        if (!sb->e2_sb)
4545                                goto alloc_mem_err;
4546                } else {
4547                        sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4548                                                     sizeof(struct host_hc_status_block_e1x));
4549                        if (!sb->e1x_sb)
4550                                goto alloc_mem_err;
4551                }
4552        }
4553
4554        /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4555         * set shortcuts for it.
4556         */
4557        if (!IS_FCOE_IDX(index))
4558                set_sb_shortcuts(bp, index);
4559
4560        /* Tx */
4561        if (!skip_tx_queue(bp, index)) {
4562                /* fastpath tx rings: tx_buf tx_desc */
4563                for_each_cos_in_tx_queue(fp, cos) {
4564                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4565
4566                        DP(NETIF_MSG_IFUP,
4567                           "allocating tx memory of fp %d cos %d\n",
4568                           index, cos);
4569
4570                        txdata->tx_buf_ring = kzmalloc((NUM_TX_BD) * (sizeof(struct sw_tx_bd)),
4571                                                       MEM_WAIT);
4572                        if (!txdata->tx_buf_ring)
4573                                goto alloc_mem_err;
4574                        txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4575                                                               sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4576                        if (!txdata->tx_desc_ring)
4577                                goto alloc_mem_err;
4578                }
4579        }
4580
4581        /* Rx */
4582        if (!skip_rx_queue(bp, index)) {
4583                /* fastpath rx rings: rx_buf rx_desc rx_comp */
4584                bnx2x_fp(bp, index, rx_buf_ring) =
4585                        kzmalloc((NUM_RX_BD) * (sizeof(struct sw_rx_bd)),
4586                                 MEM_WAIT);
4587                if (!bnx2x_fp(bp, index, rx_buf_ring))
4588                        goto alloc_mem_err;
4589                bnx2x_fp(bp, index, rx_desc_ring) =
4590                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4591                                        sizeof(struct eth_rx_bd) * NUM_RX_BD);
4592                if (!bnx2x_fp(bp, index, rx_desc_ring))
4593                        goto alloc_mem_err;
4594
4595                /* Seed all CQEs by 1s */
4596                bnx2x_fp(bp, index, rx_comp_ring) =
4597                        BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4598                                         sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4599                if (!bnx2x_fp(bp, index, rx_comp_ring))
4600                        goto alloc_mem_err;
4601
4602                /* SGE ring */
4603                bnx2x_fp(bp, index, rx_page_ring) =
4604                        kzmalloc((NUM_RX_SGE) * (sizeof(struct sw_rx_page)),
4605                                 MEM_WAIT);
4606                if (!bnx2x_fp(bp, index, rx_page_ring))
4607                        goto alloc_mem_err;
4608                bnx2x_fp(bp, index, rx_sge_ring) =
4609                        BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4610                                        BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4611                if (!bnx2x_fp(bp, index, rx_sge_ring))
4612                        goto alloc_mem_err;
4613                /* RX BD ring */
4614                bnx2x_set_next_page_rx_bd(fp);
4615
4616                /* CQ ring */
4617                bnx2x_set_next_page_rx_cq(fp);
4618
4619                /* BDs */
4620                ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4621                if (ring_size < rx_ring_size)
4622                        goto alloc_mem_err;
4623        }
4624
4625        return 0;
4626
4627/* handles low memory cases */
4628alloc_mem_err:
4629        BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4630                                                index, ring_size);
4631        /* FW will drop all packets if queue is not big enough,
4632         * In these cases we disable the queue
4633         * Min size is different for OOO, TPA and non-TPA queues
4634         */
4635        if (ring_size < (fp->disable_tpa ?
4636                                MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4637                        /* release memory allocated for this queue */
4638                        bnx2x_free_fp_mem_at(bp, index);
4639                        return -ENOMEM;
4640        }
4641        return 0;
4642}
4643
4644static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4645{
4646        if (!NO_FCOE(bp))
4647                /* FCoE */
4648                if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4649                        /* we will fail load process instead of mark
4650                         * NO_FCOE_FLAG
4651                         */
4652                        return -ENOMEM;
4653
4654        return 0;
4655}
4656
4657static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4658{
4659        int i;
4660
4661        /* 1. Allocate FP for leading - fatal if error
4662         * 2. Allocate RSS - fix number of queues if error
4663         */
4664
4665        /* leading */
4666        if (bnx2x_alloc_fp_mem_at(bp, 0))
4667                return -ENOMEM;
4668
4669        /* RSS */
4670        for_each_nondefault_eth_queue(bp, i)
4671                if (bnx2x_alloc_fp_mem_at(bp, i))
4672                        break;
4673
4674        /* handle memory failures */
4675        if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4676                int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4677
4678                warn_on(delta < 0);
4679                bnx2x_shrink_eth_fp(bp, delta);
4680                if (CNIC_SUPPORT(bp))
4681                        /* move non eth FPs next to last eth FP
4682                         * must be done in that order
4683                         * FCOE_IDX < FWD_IDX < OOO_IDX
4684                         */
4685
4686                        /* move FCoE fp even NO_FCOE_FLAG is on */
4687                        bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4688                bp->num_ethernet_queues -= delta;
4689                bp->num_queues = bp->num_ethernet_queues +
4690                                 bp->num_cnic_queues;
4691                BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4692                          bp->num_queues + delta, bp->num_queues);
4693        }
4694
4695        return 0;
4696}
4697
4698void bnx2x_free_mem_bp(struct bnx2x *bp)
4699{
4700        int i;
4701
4702        for (i = 0; i < bp->fp_array_size; i++)
4703                kfree(bp->fp[i].tpa_info);
4704        kfree(bp->fp);
4705        kfree(bp->sp_objs);
4706        kfree(bp->fp_stats);
4707        kfree(bp->bnx2x_txq);
4708        kfree(bp->msix_table);
4709        kfree(bp->ilt);
4710}
4711
4712int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4713{
4714        struct bnx2x_fastpath *fp;
4715        struct msix_entry *tbl;
4716        struct bnx2x_ilt *ilt;
4717        int msix_table_size = 0;
4718        int fp_array_size, txq_array_size;
4719        int i;
4720
4721        /*
4722         * The biggest MSI-X table we might need is as a maximum number of fast
4723         * path IGU SBs plus default SB (for PF only).
4724         */
4725        msix_table_size = bp->igu_sb_cnt;
4726        if (IS_PF(bp))
4727                msix_table_size++;
4728        BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4729
4730        /* fp array: RSS plus CNIC related L2 queues */
4731        fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4732        bp->fp_array_size = fp_array_size;
4733        BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4734
4735        fp = kzmalloc((bp->fp_array_size) * (sizeof(*fp)), MEM_WAIT);
4736        if (!fp)
4737                goto alloc_err;
4738        for (i = 0; i < bp->fp_array_size; i++) {
4739                fp[i].tpa_info =
4740                        kzmalloc((ETH_MAX_AGGREGATION_QUEUES_E1H_E2) * (sizeof(struct bnx2x_agg_info)),
4741                                 MEM_WAIT);
4742                if (!(fp[i].tpa_info))
4743                        goto alloc_err;
4744        }
4745
4746        bp->fp = fp;
4747
4748        /* allocate sp objs */
4749        bp->sp_objs = kzmalloc((bp->fp_array_size) * (sizeof(struct bnx2x_sp_objs)),
4750                               MEM_WAIT);
4751        if (!bp->sp_objs)
4752                goto alloc_err;
4753
4754        /* allocate fp_stats */
4755        bp->fp_stats = kzmalloc((bp->fp_array_size) * (sizeof(struct bnx2x_fp_stats)),
4756                                MEM_WAIT);
4757        if (!bp->fp_stats)
4758                goto alloc_err;
4759
4760        /* Allocate memory for the transmission queues array */
4761        txq_array_size =
4762                BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4763        BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4764
4765        bp->bnx2x_txq = kzmalloc((txq_array_size) * (sizeof(struct bnx2x_fp_txdata)),
4766                                 MEM_WAIT);
4767        if (!bp->bnx2x_txq)
4768                goto alloc_err;
4769
4770        // AKAROS_PORT: we probably won't use this table */
4771        /* msix table */
4772        tbl = kzmalloc((msix_table_size) * (sizeof(*tbl)), MEM_WAIT);
4773        if (!tbl)
4774                goto alloc_err;
4775        bp->msix_table = tbl;
4776
4777        /* ilt */
4778        ilt = kzmalloc(sizeof(*ilt), MEM_WAIT);
4779        if (!ilt)
4780                goto alloc_err;
4781        bp->ilt = ilt;
4782
4783        return 0;
4784alloc_err:
4785        bnx2x_free_mem_bp(bp);
4786        return -ENOMEM;
4787}
4788
4789int bnx2x_reload_if_running(struct ether *dev)
4790{
4791panic("Not implemented");
4792#if 0 // AKAROS_PORT
4793        struct bnx2x *bp = netdev_priv(dev);
4794
4795        if (unlikely(!netif_running(dev)))
4796                return 0;
4797
4798        bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4799        return bnx2x_nic_load(bp, LOAD_NORMAL);
4800#endif
4801}
4802
4803int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4804{
4805        uint32_t sel_phy_idx = 0;
4806        if (bp->link_params.num_phys <= 1)
4807                return INT_PHY;
4808
4809        if (bp->link_vars.link_up) {
4810                sel_phy_idx = EXT_PHY1;
4811                /* In case link is SERDES, check if the EXT_PHY2 is the one */
4812                if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4813                    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4814                        sel_phy_idx = EXT_PHY2;
4815        } else {
4816
4817                switch (bnx2x_phy_selection(&bp->link_params)) {
4818                case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4819                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4820                case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4821                       sel_phy_idx = EXT_PHY1;
4822                       break;
4823                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4824                case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4825                       sel_phy_idx = EXT_PHY2;
4826                       break;
4827                }
4828        }
4829
4830        return sel_phy_idx;
4831}
4832
4833int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4834{
4835        uint32_t sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4836        /*
4837         * The selected activated PHY is always after swapping (in case PHY
4838         * swapping is enabled). So when swapping is enabled, we need to reverse
4839         * the configuration
4840         */
4841
4842        if (bp->link_params.multi_phy_config &
4843            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4844                if (sel_phy_idx == EXT_PHY1)
4845                        sel_phy_idx = EXT_PHY2;
4846                else if (sel_phy_idx == EXT_PHY2)
4847                        sel_phy_idx = EXT_PHY1;
4848        }
4849        return LINK_CONFIG_IDX(sel_phy_idx);
4850}
4851
4852#ifdef NETDEV_FCOE_WWNN
4853int bnx2x_fcoe_get_wwn(struct ether *dev, uint64_t *wwn, int type)
4854{
4855        struct bnx2x *bp = netdev_priv(dev);
4856        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4857
4858        switch (type) {
4859        case NETDEV_FCOE_WWNN:
4860                *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4861                                cp->fcoe_wwn_node_name_lo);
4862                break;
4863        case NETDEV_FCOE_WWPN:
4864                *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4865                                cp->fcoe_wwn_port_name_lo);
4866                break;
4867        default:
4868                BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4869                return -EINVAL;
4870        }
4871
4872        return 0;
4873}
4874#endif
4875
4876/* called with rtnl_lock */
4877int bnx2x_change_mtu(struct ether *dev, int new_mtu)
4878{
4879        struct bnx2x *bp = netdev_priv(dev);
4880
4881        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4882                BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4883                return -EAGAIN;
4884        }
4885
4886        if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4887            ((new_mtu + ETHERHDRSIZE) < ETH_MIN_PACKET_SIZE)) {
4888                BNX2X_ERR("Can't support requested MTU size\n");
4889                return -EINVAL;
4890        }
4891
4892        /* This does not race with packet allocation
4893         * because the actual alloc size is
4894         * only updated as part of load
4895         */
4896        dev->mtu = new_mtu;
4897
4898        return bnx2x_reload_if_running(dev);
4899}
4900
4901netdev_features_t bnx2x_fix_features(struct ether *dev,
4902                                     netdev_features_t features)
4903{
4904        struct bnx2x *bp = netdev_priv(dev);
4905
4906        /* TPA requires Rx CSUM offloading */
4907        if (!(features & NETIF_F_RXCSUM)) {
4908                features &= ~NETIF_F_LRO;
4909                features &= ~NETIF_F_GRO;
4910        }
4911
4912        /* Note: do not disable SW GRO in kernel when HW GRO is off */
4913        if (bp->disable_tpa)
4914                features &= ~NETIF_F_LRO;
4915
4916        return features;
4917}
4918
4919int bnx2x_set_features(struct ether *dev, netdev_features_t features)
4920{
4921        struct bnx2x *bp = netdev_priv(dev);
4922        uint32_t flags = bp->flags;
4923        uint32_t changes;
4924        bool bnx2x_reload = false;
4925
4926        if (features & NETIF_F_LRO)
4927                flags |= TPA_ENABLE_FLAG;
4928        else
4929                flags &= ~TPA_ENABLE_FLAG;
4930
4931        if (features & NETIF_F_GRO)
4932                flags |= GRO_ENABLE_FLAG;
4933        else
4934                flags &= ~GRO_ENABLE_FLAG;
4935
4936        if (features & NETIF_F_LOOPBACK) {
4937                if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4938                        bp->link_params.loopback_mode = LOOPBACK_BMAC;
4939                        bnx2x_reload = true;
4940                }
4941        } else {
4942                if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4943                        bp->link_params.loopback_mode = LOOPBACK_NONE;
4944                        bnx2x_reload = true;
4945                }
4946        }
4947
4948        changes = flags ^ bp->flags;
4949
4950        /* if GRO is changed while LRO is enabled, don't force a reload */
4951        if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4952                changes &= ~GRO_ENABLE_FLAG;
4953
4954        /* if GRO is changed while HW TPA is off, don't force a reload */
4955        if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
4956                changes &= ~GRO_ENABLE_FLAG;
4957
4958        if (changes)
4959                bnx2x_reload = true;
4960
4961        bp->flags = flags;
4962
4963        if (bnx2x_reload) {
4964                if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4965                        return bnx2x_reload_if_running(dev);
4966                /* else: bnx2x_nic_load() will be called at end of recovery */
4967        }
4968
4969        return 0;
4970}
4971
4972void bnx2x_tx_timeout(struct ether *dev)
4973{
4974        struct bnx2x *bp = netdev_priv(dev);
4975
4976#ifdef BNX2X_STOP_ON_ERROR
4977        if (!bp->panic)
4978                bnx2x_panic();
4979#endif
4980
4981        /* This allows the netif to be shutdown gracefully before resetting */
4982        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4983}
4984
4985int bnx2x_suspend(struct pci_device *pdev, pm_message_t state)
4986{
4987panic("Not implemented");
4988#if 0 // AKAROS_PORT
4989        struct ether *dev = pci_get_drvdata(pdev);
4990        struct bnx2x *bp;
4991
4992        if (!dev) {
4993                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4994                return -ENODEV;
4995        }
4996        bp = netdev_priv(dev);
4997
4998        rtnl_lock();
4999
5000        pci_save_state(pdev);
5001
5002        if (!netif_running(dev)) {
5003                rtnl_unlock();
5004                return 0;
5005        }
5006
5007        netif_device_detach(dev);
5008
5009        bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5010
5011        bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5012
5013        rtnl_unlock();
5014
5015        return 0;
5016#endif
5017}
5018
5019int bnx2x_resume(struct pci_device *pdev)
5020{
5021panic("Not implemented");
5022#if 0 // AKAROS_PORT
5023        struct ether *dev = pci_get_drvdata(pdev);
5024        struct bnx2x *bp;
5025        int rc;
5026
5027        if (!dev) {
5028                dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5029                return -ENODEV;
5030        }
5031        bp = netdev_priv(dev);
5032
5033        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5034                BNX2X_ERR("Handling parity error recovery. Try again later\n");
5035                return -EAGAIN;
5036        }
5037
5038        rtnl_lock();
5039
5040        pci_restore_state(pdev);
5041
5042        if (!netif_running(dev)) {
5043                rtnl_unlock();
5044                return 0;
5045        }
5046
5047        bnx2x_set_power_state(bp, PCI_D0);
5048        netif_device_attach(dev);
5049
5050        rc = bnx2x_nic_load(bp, LOAD_OPEN);
5051
5052        rtnl_unlock();
5053
5054        return rc;
5055#endif
5056}
5057
5058void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5059                              uint32_t cid)
5060{
5061        if (!cxt) {
5062                BNX2X_ERR("bad context pointer %p\n", cxt);
5063                return;
5064        }
5065
5066        /* ustorm cxt validation */
5067        cxt->ustorm_ag_context.cdu_usage =
5068                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5069                        CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5070        /* xcontext validation */
5071        cxt->xstorm_ag_context.cdu_reserved =
5072                CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5073                        CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5074}
5075
5076static void storm_memset_hc_timeout(struct bnx2x *bp, uint8_t port,
5077                                    uint8_t fw_sb_id, uint8_t sb_index,
5078                                    uint8_t ticks)
5079{
5080        uint32_t addr = BAR_CSTRORM_INTMEM +
5081                   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5082        REG_WR8(bp, addr, ticks);
5083        DP(NETIF_MSG_IFUP,
5084           "port %x fw_sb_id %d sb_index %d ticks %d\n",
5085           port, fw_sb_id, sb_index, ticks);
5086}
5087
5088static void storm_memset_hc_disable(struct bnx2x *bp, uint8_t port,
5089                                    uint16_t fw_sb_id, uint8_t sb_index,
5090                                    uint8_t disable)
5091{
5092        uint32_t enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5093        uint32_t addr = BAR_CSTRORM_INTMEM +
5094                   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5095        uint8_t flags = REG_RD8(bp, addr);
5096        /* clear and set */
5097        flags &= ~HC_INDEX_DATA_HC_ENABLED;
5098        flags |= enable_flag;
5099        REG_WR8(bp, addr, flags);
5100        DP(NETIF_MSG_IFUP,
5101           "port %x fw_sb_id %d sb_index %d disable %d\n",
5102           port, fw_sb_id, sb_index, disable);
5103}
5104
5105void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, uint8_t fw_sb_id,
5106                                    uint8_t sb_index, uint8_t disable,
5107                                    uint16_t usec)
5108{
5109        int port = BP_PORT(bp);
5110        uint8_t ticks = usec / BNX2X_BTR;
5111
5112        storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5113
5114        disable = disable ? 1 : (usec ? 0 : 1);
5115        storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5116}
5117
5118void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5119                            uint32_t verbose)
5120{
5121        cmb();
5122        set_bit(flag, &bp->sp_rtnl_state);
5123        cmb();
5124        DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5125           flag);
5126        schedule_delayed_work(&bp->sp_rtnl_task, 0);
5127}
5128EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
5129