BNX2X: limit queues to 2
[akaros.git] / kern / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include "akaros_compat.h"
19
20 #include "bnx2x_cmn.h"
21 #include "bnx2x_init.h"
22 #include "bnx2x_sp.h"
23
24 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
25 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
26 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
27 static void bnx2x_poll(uint32_t srcid, long a0, long a1, long a2);
28
29 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
30 {
31         int i;
32
33         /* Add NAPI objects */
34         for_each_rx_queue_cnic(bp, i) {
35                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
36                                bnx2x_poll, NAPI_POLL_WEIGHT);
37                 napi_hash_add(&bnx2x_fp(bp, i, napi));
38         }
39 }
40
41 static void bnx2x_add_all_napi(struct bnx2x *bp)
42 {
43         int i;
44
45         /* Add NAPI objects */
46         for_each_eth_queue(bp, i) {
47                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48                                bnx2x_poll, NAPI_POLL_WEIGHT);
49                 napi_hash_add(&bnx2x_fp(bp, i, napi));
50         }
51 }
52
53 static int bnx2x_calc_num_queues(struct bnx2x *bp)
54 {
55         /* default is min(8, num_cpus) in Linux.  we'll set it elsewhere */
56         int nq = bnx2x_num_queues ? : 8;
57
58         /* Reduce memory usage in kdump environment by using only one queue */
59         if (is_kdump_kernel())
60                 nq = 1;
61
62         nq = CLAMP(nq, 1, BNX2X_MAX_QUEUES(bp));
63         /* AKAROS_PORT XME.  For some reason, we can't handle 8 queues.  The linux
64          * driver can...  We can handle 4 *total* queues, one per function. */
65         nq = MIN(nq, 2);
66         return nq;
67 }
68
69 /**
70  * bnx2x_move_fp - move content of the fastpath structure.
71  *
72  * @bp:         driver handle
73  * @from:       source FP index
74  * @to:         destination FP index
75  *
76  * Makes sure the contents of the bp->fp[to].napi is kept
77  * intact. This is done by first copying the napi struct from
78  * the target to the source, and then mem copying the entire
79  * source onto the target. Update txdata pointers and related
80  * content.
81  */
82 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
83 {
84         struct bnx2x_fastpath *from_fp = &bp->fp[from];
85         struct bnx2x_fastpath *to_fp = &bp->fp[to];
86         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
87         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
88         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
89         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
90         int old_max_eth_txqs, new_max_eth_txqs;
91         int old_txdata_index = 0, new_txdata_index = 0;
92         struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
93
94         /* Copy the NAPI object as it has been already initialized */
95         from_fp->napi = to_fp->napi;
96
97         /* Move bnx2x_fastpath contents */
98         memcpy(to_fp, from_fp, sizeof(*to_fp));
99         to_fp->index = to;
100
101         /* Retain the tpa_info of the original `to' version as we don't want
102          * 2 FPs to contain the same tpa_info pointer.
103          */
104         to_fp->tpa_info = old_tpa_info;
105
106         /* move sp_objs contents as well, as their indices match fp ones */
107         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
108
109         /* move fp_stats contents as well, as their indices match fp ones */
110         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
111
112         /* Update txdata pointers in fp and move txdata content accordingly:
113          * Each fp consumes 'max_cos' txdata structures, so the index should be
114          * decremented by max_cos x delta.
115          */
116
117         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
118         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
119                                 (bp)->max_cos;
120         if (from == FCOE_IDX(bp)) {
121                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
122                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
123         }
124
125         memcpy(&bp->bnx2x_txq[new_txdata_index],
126                &bp->bnx2x_txq[old_txdata_index],
127                sizeof(struct bnx2x_fp_txdata));
128         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
129 }
130
131 /**
132  * bnx2x_fill_fw_str - Fill buffer with FW version string.
133  *
134  * @bp:        driver handle
135  * @buf:       character buffer to fill with the fw name
136  * @buf_len:   length of the above buffer
137  *
138  */
139 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
140 {
141         if (IS_PF(bp)) {
142                 uint8_t phy_fw_ver[PHY_FW_VER_LEN];
143
144                 phy_fw_ver[0] = '\0';
145                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
146                                              phy_fw_ver, PHY_FW_VER_LEN);
147                 strlcpy(buf, bp->fw_ver, buf_len);
148                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
149                          "bc %d.%d.%d%s%s",
150                          (bp->common.bc_ver & 0xff0000) >> 16,
151                          (bp->common.bc_ver & 0xff00) >> 8,
152                          (bp->common.bc_ver & 0xff),
153                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
154         } else {
155                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
156         }
157 }
158
159 /**
160  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
161  *
162  * @bp: driver handle
163  * @delta:      number of eth queues which were not allocated
164  */
165 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
166 {
167         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
168
169         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
170          * backward along the array could cause memory to be overridden
171          */
172         for (cos = 1; cos < bp->max_cos; cos++) {
173                 for (i = 0; i < old_eth_num - delta; i++) {
174                         struct bnx2x_fastpath *fp = &bp->fp[i];
175                         int new_idx = cos * (old_eth_num - delta) + i;
176
177                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
178                                sizeof(struct bnx2x_fp_txdata));
179                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
180                 }
181         }
182 }
183
184 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
185
186 /* free skb in the packet ring at pos idx
187  * return idx of last bd freed
188  */
189 static uint16_t bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
190                              uint16_t idx, unsigned int *pkts_compl,
191                              unsigned int *bytes_compl)
192 {
193 panic("Not implemented");
194 #if 0 // AKAROS_PORT
195         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
196         struct eth_tx_start_bd *tx_start_bd;
197         struct eth_tx_bd *tx_data_bd;
198         struct sk_buff *skb = tx_buf->skb;
199         uint16_t bd_idx = TX_BD(tx_buf->first_bd), new_cons;
200         int nbd;
201         uint16_t split_bd_len = 0;
202
203         /* prefetch skb end pointer to speedup dev_kfree_skb() */
204         prefetch(&skb->end);
205
206         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
207            txdata->txq_index, idx, tx_buf, skb);
208
209         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
210
211         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
212 #ifdef BNX2X_STOP_ON_ERROR
213         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
214                 BNX2X_ERR("BAD nbd!\n");
215                 bnx2x_panic();
216         }
217 #endif
218         new_cons = nbd + tx_buf->first_bd;
219
220         /* Get the next bd */
221         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
222
223         /* Skip a parse bd... */
224         --nbd;
225         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227         if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
228                 /* Skip second parse bd... */
229                 --nbd;
230                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
231         }
232
233         /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
234         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
235                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
236                 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
237                 --nbd;
238                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
239         }
240
241         /* unmap first bd */
242         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
243                          BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
244                          DMA_TO_DEVICE);
245
246         /* now free frags */
247         while (nbd > 0) {
248
249                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
250                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
251                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
252                 if (--nbd)
253                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
254         }
255
256         /* release skb */
257         warn_on(!skb);
258         if (likely(skb)) {
259                 (*pkts_compl)++;
260                 (*bytes_compl) += skb->len;
261         }
262
263         dev_kfree_skb_any(skb);
264         tx_buf->first_bd = 0;
265         tx_buf->skb = NULL;
266
267         return new_cons;
268 #endif
269 }
270
271 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
272 {
273 panic("Not implemented");
274 #if 0 // AKAROS_PORT
275         struct netdev_queue *txq;
276         uint16_t hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277         unsigned int pkts_compl = 0, bytes_compl = 0;
278
279 #ifdef BNX2X_STOP_ON_ERROR
280         if (unlikely(bp->panic))
281                 return -1;
282 #endif
283
284         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286         sw_cons = txdata->tx_pkt_cons;
287
288         while (sw_cons != hw_cons) {
289                 uint16_t pkt_cons;
290
291                 pkt_cons = TX_BD(sw_cons);
292
293                 DP(NETIF_MSG_TX_DONE,
294                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
295                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
296
297                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
298                                             &pkts_compl, &bytes_compl);
299
300                 sw_cons++;
301         }
302
303         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
304
305         txdata->tx_pkt_cons = sw_cons;
306         txdata->tx_bd_cons = bd_cons;
307
308         /* Need to make the tx_bd_cons update visible to start_xmit()
309          * before checking for netif_tx_queue_stopped().  Without the
310          * memory barrier, there is a small possibility that
311          * start_xmit() will miss it and cause the queue to be stopped
312          * forever.
313          * On the other hand we need an rmb() here to ensure the proper
314          * ordering of bit testing in the following
315          * netif_tx_queue_stopped(txq) call.
316          */
317         mb();
318
319         if (unlikely(netif_tx_queue_stopped(txq))) {
320                 /* Taking tx_lock() is needed to prevent re-enabling the queue
321                  * while it's empty. This could have happen if rx_action() gets
322                  * suspended in bnx2x_tx_int() after the condition before
323                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
324                  *
325                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
326                  * sends some packets consuming the whole queue again->
327                  * stops the queue
328                  */
329
330                 __netif_tx_lock(txq, core_id());
331
332                 if ((netif_tx_queue_stopped(txq)) &&
333                     (bp->state == BNX2X_STATE_OPEN) &&
334                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
335                         netif_tx_wake_queue(txq);
336
337                 __netif_tx_unlock(txq);
338         }
339         return 0;
340 #endif
341 }
342
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344                                              uint16_t idx)
345 {
346         uint16_t last_max = fp->last_max_sge;
347
348         if (SUB_S16(idx, last_max) > 0)
349                 fp->last_max_sge = idx;
350 }
351
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353                                          uint16_t sge_len,
354                                          struct eth_end_agg_rx_cqe *cqe)
355 {
356         struct bnx2x *bp = fp->bp;
357         uint16_t last_max, last_elem, first_elem;
358         uint16_t delta = 0;
359         uint16_t i;
360
361         if (!sge_len)
362                 return;
363
364         /* First mark all used pages */
365         for (i = 0; i < sge_len; i++)
366                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
367                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368
369         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
370            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371
372         /* Here we assume that the last SGE index is the biggest */
373         prefetch((void *)(fp->sge_mask));
374         bnx2x_update_last_max_sge(fp,
375                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376
377         last_max = RX_SGE(fp->last_max_sge);
378         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380
381         /* If ring is not full */
382         if (last_elem + 1 != first_elem)
383                 last_elem++;
384
385         /* Now update the prod */
386         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387                 if (likely(fp->sge_mask[i]))
388                         break;
389
390                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391                 delta += BIT_VEC64_ELEM_SZ;
392         }
393
394         if (delta > 0) {
395                 fp->rx_sge_prod += delta;
396                 /* clear page-end entries */
397                 bnx2x_clear_sge_mask_next_elems(fp);
398         }
399
400         DP(NETIF_MSG_RX_STATUS,
401            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
402            fp->last_max_sge, fp->rx_sge_prod);
403 }
404
405 /* Get Toeplitz hash value in the skb using the value from the
406  * CQE (calculated by HW).
407  */
408 static uint32_t bnx2x_get_rxhash(const struct bnx2x *bp,
409                             const struct eth_fast_path_rx_cqe *cqe,
410                             enum pkt_hash_types *rxhash_type)
411 {
412 panic("Not implemented");
413 #if 0 // AKAROS_PORT
414         /* Get Toeplitz hash from CQE */
415         if ((bp->dev->feat & NETIF_F_RXHASH) &&
416             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417                 enum eth_rss_hash_type htype;
418
419                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
420                 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421                                 (htype == TCP_IPV6_HASH_TYPE)) ?
422                                PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
423
424                 return le32_to_cpu(cqe->rss_hash_result);
425         }
426         *rxhash_type = PKT_HASH_TYPE_NONE;
427         return 0;
428 #endif
429 }
430
431 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, uint16_t queue,
432                             uint16_t cons, uint16_t prod,
433                             struct eth_fast_path_rx_cqe *cqe)
434 {
435 panic("Not implemented");
436 #if 0 // AKAROS_PORT
437         struct bnx2x *bp = fp->bp;
438         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
439         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
440         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
441         dma_addr_t mapping;
442         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
443         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
444
445         /* print error if current state != stop */
446         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
447                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
448
449         /* Try to map an empty data buffer from the aggregation info  */
450         mapping = dma_map_single(&bp->pdev->dev,
451                                  first_buf->data + NET_SKB_PAD,
452                                  fp->rx_buf_size, DMA_FROM_DEVICE);
453         /*
454          *  ...if it fails - move the skb from the consumer to the producer
455          *  and set the current aggregation state as ERROR to drop it
456          *  when TPA_STOP arrives.
457          */
458
459         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
460                 /* Move the BD from the consumer to the producer */
461                 bnx2x_reuse_rx_data(fp, cons, prod);
462                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
463                 return;
464         }
465
466         /* move empty data from pool to prod */
467         prod_rx_buf->data = first_buf->data;
468         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
469         /* point prod_bd to new data */
470         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
471         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
472
473         /* move partial skb from cons to pool (don't unmap yet) */
474         *first_buf = *cons_rx_buf;
475
476         /* mark bin state as START */
477         tpa_info->parsing_flags =
478                 le16_to_cpu(cqe->pars_flags.flags);
479         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
480         tpa_info->tpa_state = BNX2X_TPA_START;
481         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
482         tpa_info->placement_offset = cqe->placement_offset;
483         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
484         if (fp->mode == TPA_MODE_GRO) {
485                 uint16_t gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
486                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
487                 tpa_info->gro_size = gro_size;
488         }
489
490 #ifdef BNX2X_STOP_ON_ERROR
491         fp->tpa_queue_used |= (1 << queue);
492         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
493            fp->tpa_queue_used);
494 #endif
495 #endif
496 }
497
498 /* Timestamp option length allowed for TPA aggregation:
499  *
500  *              nop nop kind length echo val
501  */
502 #define TPA_TSTAMP_OPT_LEN      12
503 /**
504  * bnx2x_set_gro_params - compute GRO values
505  *
506  * @skb:                packet skb
507  * @parsing_flags:      parsing flags from the START CQE
508  * @len_on_bd:          total length of the first packet for the
509  *                      aggregation.
510  * @pkt_len:            length of all segments
511  *
512  * Approximate value of the MSS for this aggregation calculated using
513  * the first packet of it.
514  * Compute number of aggregated segments, and gso_type.
515  */
516 static void bnx2x_set_gro_params(struct sk_buff *skb, uint16_t parsing_flags,
517                                  uint16_t len_on_bd, unsigned int pkt_len,
518                                  uint16_t num_of_coalesced_segs)
519 {
520 panic("Not implemented");
521 #if 0 // AKAROS_PORT
522         /* TPA aggregation won't have either IP options or TCP options
523          * other than timestamp or IPv6 extension headers.
524          */
525         uint16_t hdrs_len = ETHERHDRSIZE + sizeof(struct tcphdr);
526
527         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
528             PRS_FLAG_OVERETH_IPV6) {
529                 hdrs_len += sizeof(struct ipv6hdr);
530                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
531         } else {
532                 hdrs_len += sizeof(struct iphdr);
533                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
534         }
535
536         /* Check if there was a TCP timestamp, if there is it's will
537          * always be 12 bytes length: nop nop kind length echo val.
538          *
539          * Otherwise FW would close the aggregation.
540          */
541         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
542                 hdrs_len += TPA_TSTAMP_OPT_LEN;
543
544         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
545
546         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
547          * to skb_shinfo(skb)->gso_segs
548          */
549         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
550 #endif
551 }
552
553 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
554                               uint16_t index, gfp_t gfp_mask)
555 {
556         struct page *page = get_cont_pages(PAGES_PER_SGE_SHIFT, gfp_mask);
557         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
558         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
559         dma_addr_t mapping;
560
561         if (unlikely(page == NULL)) {
562                 BNX2X_ERR("Can't alloc sge\n");
563                 return -ENOMEM;
564         }
565
566         mapping = dma_map_page(&bp->pdev->dev, page, 0,
567                                SGE_PAGES, DMA_FROM_DEVICE);
568         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
569                 free_cont_pages(page, PAGES_PER_SGE_SHIFT);
570                 BNX2X_ERR("Can't map sge\n");
571                 return -ENOMEM;
572         }
573
574         sw_buf->page = page;
575         dma_unmap_addr_set(sw_buf, mapping, mapping);
576
577         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
578         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
579
580         return 0;
581 }
582
583 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
584                                struct bnx2x_agg_info *tpa_info,
585                                uint16_t pages,
586                                struct sk_buff *skb,
587                                struct eth_end_agg_rx_cqe *cqe,
588                                uint16_t cqe_idx)
589 {
590 panic("Not implemented");
591 #if 0 // AKAROS_PORT
592         struct sw_rx_page *rx_pg, old_rx_pg;
593         uint32_t i, frag_len, frag_size;
594         int err, j, frag_id = 0;
595         uint16_t len_on_bd = tpa_info->len_on_bd;
596         uint16_t full_page = 0, gro_size = 0;
597
598         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
599
600         if (fp->mode == TPA_MODE_GRO) {
601                 gro_size = tpa_info->gro_size;
602                 full_page = tpa_info->full_page;
603         }
604
605         /* This is needed in order to enable forwarding support */
606         if (frag_size)
607                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
608                                      le16_to_cpu(cqe->pkt_len),
609                                      le16_to_cpu(cqe->num_of_coalesced_segs));
610
611 #ifdef BNX2X_STOP_ON_ERROR
612         if (pages > MIN_T(uint32_t, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
613                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
614                           pages, cqe_idx);
615                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
616                 bnx2x_panic();
617                 return -EINVAL;
618         }
619 #endif
620
621         /* Run through the SGL and compose the fragmented skb */
622         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
623                 uint16_t sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
624
625                 /* FW gives the indices of the SGE as if the ring is an array
626                    (meaning that "next" element will consume 2 indices) */
627                 if (fp->mode == TPA_MODE_GRO)
628                         frag_len = MIN_T(uint32_t, frag_size,
629                                          (uint32_t)full_page);
630                 else /* LRO */
631                         frag_len = MIN_T(uint32_t, frag_size,
632                                          (uint32_t)SGE_PAGES);
633
634                 rx_pg = &fp->rx_page_ring[sge_idx];
635                 old_rx_pg = *rx_pg;
636
637                 /* If we fail to allocate a substitute page, we simply stop
638                    where we are and drop the whole packet */
639                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, 0);
640                 if (unlikely(err)) {
641                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
642                         return err;
643                 }
644
645                 /* Unmap the page as we're going to pass it to the stack */
646                 dma_unmap_page(&bp->pdev->dev,
647                                dma_unmap_addr(&old_rx_pg, mapping),
648                                SGE_PAGES, DMA_FROM_DEVICE);
649                 /* Add one frag and update the appropriate fields in the skb */
650                 if (fp->mode == TPA_MODE_LRO)
651                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
652                 else { /* GRO */
653                         int rem;
654                         int offset = 0;
655                         for (rem = frag_len; rem > 0; rem -= gro_size) {
656                                 int len = rem > gro_size ? gro_size : rem;
657                                 skb_fill_page_desc(skb, frag_id++,
658                                                    old_rx_pg.page, offset, len);
659                                 if (offset)
660                                         page_incref(old_rx_pg.page);
661                                 offset += len;
662                         }
663                 }
664
665                 skb->data_len += frag_len;
666                 skb->truesize += SGE_PAGES;
667                 skb->len += frag_len;
668
669                 frag_size -= frag_len;
670         }
671
672         return 0;
673 #endif
674 }
675
676 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
677 {
678         if (fp->rx_frag_size)
679                 page_decref(kva2page(data));
680         else
681                 kfree(data);
682 }
683
684 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
685 {
686         if (fp->rx_frag_size) {
687                 /* GFP_KERNEL allocations are used only during initialization */
688                 if (unlikely(gfp_mask & KMALLOC_WAIT))
689                         return (void *)kpage_alloc_addr();
690
691 #if 0 // AKAROS_PORT
692                 return netdev_alloc_frag(fp->rx_frag_size);
693 #else
694                 return (void *)kpage_alloc_addr();
695 #endif
696         }
697
698         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
699 }
700
701 #ifdef CONFIG_INET
702 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
703 {
704         const struct iphdr *iph = ip_hdr(skb);
705         struct tcphdr *th;
706
707         skb_set_transport_header(skb, sizeof(struct iphdr));
708         th = tcp_hdr(skb);
709
710         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
711                                   iph->saddr, iph->daddr, 0);
712 }
713
714 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
715 {
716         struct ipv6hdr *iph = ipv6_hdr(skb);
717         struct tcphdr *th;
718
719         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
720         th = tcp_hdr(skb);
721
722         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
723                                   &iph->saddr, &iph->daddr, 0);
724 }
725
726 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
727                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
728 {
729         skb_set_network_header(skb, 0);
730         gro_func(bp, skb);
731         tcp_gro_complete(skb);
732 }
733 #endif
734
735 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
736                                struct sk_buff *skb)
737 {
738 panic("Not implemented");
739 #if 0 // AKAROS_PORT
740 #ifdef CONFIG_INET
741         if (skb_shinfo(skb)->gso_size) {
742                 switch (be16_to_cpu(skb->protocol)) {
743                 case ETH_P_IP:
744                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
745                         break;
746                 case ETH_P_IPV6:
747                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
748                         break;
749                 default:
750                         BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
751                                   be16_to_cpu(skb->protocol));
752                 }
753         }
754 #endif
755         skb_record_rx_queue(skb, fp->rx_queue);
756         napi_gro_receive(&fp->napi, skb);
757 #endif
758 }
759
760 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
761                            struct bnx2x_agg_info *tpa_info,
762                            uint16_t pages,
763                            struct eth_end_agg_rx_cqe *cqe,
764                            uint16_t cqe_idx)
765 {
766 panic("Not implemented");
767 #if 0 // AKAROS_PORT
768         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
769         uint8_t pad = tpa_info->placement_offset;
770         uint16_t len = tpa_info->len_on_bd;
771         struct sk_buff *skb = NULL;
772         uint8_t *new_data, *data = rx_buf->data;
773         uint8_t old_tpa_state = tpa_info->tpa_state;
774
775         tpa_info->tpa_state = BNX2X_TPA_STOP;
776
777         /* If we there was an error during the handling of the TPA_START -
778          * drop this aggregation.
779          */
780         if (old_tpa_state == BNX2X_TPA_ERROR)
781                 goto drop;
782
783         /* Try to allocate the new data */
784         new_data = bnx2x_frag_alloc(fp, 0);
785         /* Unmap skb in the pool anyway, as we are going to change
786            pool entry status to BNX2X_TPA_STOP even if new skb allocation
787            fails. */
788         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
789                          fp->rx_buf_size, DMA_FROM_DEVICE);
790         if (likely(new_data))
791                 skb = build_skb(data, fp->rx_frag_size);
792
793         if (likely(skb)) {
794 #ifdef BNX2X_STOP_ON_ERROR
795                 if (pad + len > fp->rx_buf_size) {
796                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
797                                   pad, len, fp->rx_buf_size);
798                         bnx2x_panic();
799                         return;
800                 }
801 #endif
802
803                 skb_reserve(skb, pad + NET_SKB_PAD);
804                 skb_put(skb, len);
805                 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
806
807                 skb->protocol = eth_type_trans(skb, bp->dev);
808                 skb->ip_summed = CHECKSUM_UNNECESSARY;
809
810                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
811                                          skb, cqe, cqe_idx)) {
812                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
813                                 __vlan_hwaccel_put_tag(skb,
814                                                        cpu_to_be16(ETH_P_8021Q),
815                                                        tpa_info->vlan_tag);
816                         bnx2x_gro_receive(bp, fp, skb);
817                 } else {
818                         DP(NETIF_MSG_RX_STATUS,
819                            "Failed to allocate new pages - dropping packet!\n");
820                         dev_kfree_skb_any(skb);
821                 }
822
823                 /* put new data in bin */
824                 rx_buf->data = new_data;
825
826                 return;
827         }
828         if (new_data)
829                 bnx2x_frag_free(fp, new_data);
830 drop:
831         /* drop the packet and keep the buffer in the bin */
832         DP(NETIF_MSG_RX_STATUS,
833            "Failed to allocate or map a new skb - dropping packet!\n");
834         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
835 #endif
836 }
837
838 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
839                                uint16_t index, gfp_t gfp_mask)
840 {
841         uint8_t *data;
842         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
843         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
844         dma_addr_t mapping;
845
846         data = bnx2x_frag_alloc(fp, gfp_mask);
847         if (unlikely(data == NULL))
848                 return -ENOMEM;
849
850         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
851                                  fp->rx_buf_size,
852                                  DMA_FROM_DEVICE);
853         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
854                 bnx2x_frag_free(fp, data);
855                 BNX2X_ERR("Can't map rx data\n");
856                 return -ENOMEM;
857         }
858
859         rx_buf->data = data;
860         dma_unmap_addr_set(rx_buf, mapping, mapping);
861
862         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
863         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
864
865         return 0;
866 }
867
868 static
869 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
870                                  struct bnx2x_fastpath *fp,
871                                  struct bnx2x_eth_q_stats *qstats)
872 {
873 panic("Not implemented");
874 #if 0 // AKAROS_PORT
875         /* Do nothing if no L4 csum validation was done.
876          * We do not check whether IP csum was validated. For IPv4 we assume
877          * that if the card got as far as validating the L4 csum, it also
878          * validated the IP csum. IPv6 has no IP csum.
879          */
880         if (cqe->fast_path_cqe.status_flags &
881             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
882                 return;
883
884         /* If L4 validation was done, check if an error was found. */
885
886         if (cqe->fast_path_cqe.type_error_flags &
887             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
888              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
889                 qstats->hw_csum_err++;
890         else
891                 skb->ip_summed = CHECKSUM_UNNECESSARY;
892 #endif
893 }
894
895 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
896 {
897         struct bnx2x *bp = fp->bp;
898         uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
899         uint16_t sw_comp_cons, sw_comp_prod;
900         int rx_pkt = 0;
901         union eth_rx_cqe *cqe;
902         struct eth_fast_path_rx_cqe *cqe_fp;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         if (unlikely(bp->panic))
906                 return 0;
907 #endif
908         if (budget <= 0)
909                 return rx_pkt;
910
911         bd_cons = fp->rx_bd_cons;
912         bd_prod = fp->rx_bd_prod;
913         bd_prod_fw = bd_prod;
914         sw_comp_cons = fp->rx_comp_cons;
915         sw_comp_prod = fp->rx_comp_prod;
916
917         comp_ring_cons = RCQ_BD(sw_comp_cons);
918         cqe = &fp->rx_comp_ring[comp_ring_cons];
919         cqe_fp = &cqe->fast_path_cqe;
920
921         DP(NETIF_MSG_RX_STATUS,
922            "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
923
924         while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
925                 struct sw_rx_bd *rx_buf = NULL;
926                 uint8_t cqe_fp_flags;
927                 enum eth_rx_cqe_type cqe_fp_type;
928                 uint16_t len, pad, queue;
929                 uint8_t *data;
930                 uint32_t rxhash;
931
932 #ifdef BNX2X_STOP_ON_ERROR
933                 if (unlikely(bp->panic))
934                         return 0;
935 #endif
936
937                 bd_prod = RX_BD(bd_prod);
938                 bd_cons = RX_BD(bd_cons);
939
940                 /* A rmb() is required to ensure that the CQE is not read
941                  * before it is written by the adapter DMA.  PCI ordering
942                  * rules will make sure the other fields are written before
943                  * the marker at the end of struct eth_fast_path_rx_cqe
944                  * but without rmb() a weakly ordered processor can process
945                  * stale data.  Without the barrier TPA state-machine might
946                  * enter inconsistent state and kernel stack might be
947                  * provided with incorrect packet description - these lead
948                  * to various kernel crashed.
949                  */
950                 rmb();
951
952                 cqe_fp_flags = cqe_fp->type_error_flags;
953                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
954
955                 DP(NETIF_MSG_RX_STATUS,
956                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
957                    CQE_TYPE(cqe_fp_flags),
958                    cqe_fp_flags, cqe_fp->status_flags,
959                    le32_to_cpu(cqe_fp->rss_hash_result),
960                    le16_to_cpu(cqe_fp->vlan_tag),
961                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
962
963                 /* is this a slowpath msg? */
964                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
965                         bnx2x_sp_event(fp, cqe);
966                         goto next_cqe;
967                 }
968
969 panic("Not implemented");
970 #if 0 // AKAROS_PORT
971                 rx_buf = &fp->rx_buf_ring[bd_cons];
972                 data = rx_buf->data;
973
974                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
975                         struct bnx2x_agg_info *tpa_info;
976                         uint16_t frag_size, pages;
977 #ifdef BNX2X_STOP_ON_ERROR
978                         /* sanity check */
979                         if (fp->disable_tpa &&
980                             (CQE_TYPE_START(cqe_fp_type) ||
981                              CQE_TYPE_STOP(cqe_fp_type)))
982                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
983                                           CQE_TYPE(cqe_fp_type));
984 #endif
985
986                         if (CQE_TYPE_START(cqe_fp_type)) {
987                                 uint16_t queue = cqe_fp->queue_index;
988                                 DP(NETIF_MSG_RX_STATUS,
989                                    "calling tpa_start on queue %d\n",
990                                    queue);
991
992                                 bnx2x_tpa_start(fp, queue,
993                                                 bd_cons, bd_prod,
994                                                 cqe_fp);
995
996                                 goto next_rx;
997                         }
998                         queue = cqe->end_agg_cqe.queue_index;
999                         tpa_info = &fp->tpa_info[queue];
1000                         DP(NETIF_MSG_RX_STATUS,
1001                            "calling tpa_stop on queue %d\n",
1002                            queue);
1003
1004                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
1005                                     tpa_info->len_on_bd;
1006
1007                         if (fp->mode == TPA_MODE_GRO)
1008                                 pages = (frag_size + tpa_info->full_page - 1) /
1009                                          tpa_info->full_page;
1010                         else
1011                                 pages = SGE_PAGE_ALIGN(frag_size) >>
1012                                         SGE_PAGE_SHIFT;
1013
1014                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1015                                        &cqe->end_agg_cqe, comp_ring_cons);
1016 #ifdef BNX2X_STOP_ON_ERROR
1017                         if (bp->panic)
1018                                 return 0;
1019 #endif
1020
1021                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1022                         goto next_cqe;
1023                 }
1024                 /* non TPA */
1025                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1026                 pad = cqe_fp->placement_offset;
1027                 dma_sync_single_for_cpu(&bp->pdev->dev,
1028                                         dma_unmap_addr(rx_buf, mapping),
1029                                         pad + RX_COPY_THRESH,
1030                                         DMA_FROM_DEVICE);
1031                 pad += NET_SKB_PAD;
1032                 prefetch(data + pad); /* speedup eth_type_trans() */
1033                 /* is this an error packet? */
1034                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1035                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1036                            "ERROR  flags %x  rx packet %u\n",
1037                            cqe_fp_flags, sw_comp_cons);
1038                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1039                         goto reuse_rx;
1040                 }
1041
1042                 /* Since we don't have a jumbo ring
1043                  * copy small packets if mtu > 1500
1044                  */
1045                 if ((bp->dev->maxmtu > ETH_MAX_PACKET_SIZE) &&
1046                     (len <= RX_COPY_THRESH)) {
1047                         skb = napi_alloc_skb(&fp->napi, len);
1048                         if (skb == NULL) {
1049                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1050                                    "ERROR  packet dropped because of alloc failure\n");
1051                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1052                                 goto reuse_rx;
1053                         }
1054                         memcpy(skb->data, data + pad, len);
1055                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1056                 } else {
1057                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1058                                                        0) == 0)) {
1059                                 dma_unmap_single(&bp->pdev->dev,
1060                                                  dma_unmap_addr(rx_buf, mapping),
1061                                                  fp->rx_buf_size,
1062                                                  DMA_FROM_DEVICE);
1063                                 skb = build_skb(data, fp->rx_frag_size);
1064                                 if (unlikely(!skb)) {
1065                                         bnx2x_frag_free(fp, data);
1066                                         bnx2x_fp_qstats(bp, fp)->
1067                                                         rx_skb_alloc_failed++;
1068                                         goto next_rx;
1069                                 }
1070                                 skb_reserve(skb, pad);
1071                         } else {
1072                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1073                                    "ERROR  packet dropped because of alloc failure\n");
1074                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1075 reuse_rx:
1076                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1077                                 goto next_rx;
1078                         }
1079                 }
1080
1081                 skb_put(skb, len);
1082                 skb->protocol = eth_type_trans(skb, bp->dev);
1083
1084                 /* Set Toeplitz hash for a none-LRO skb */
1085                 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1086                 skb_set_hash(skb, rxhash, rxhash_type);
1087
1088                 skb_checksum_none_assert(skb);
1089
1090                 if (bp->dev->feat & NETIF_F_RXCSUM)
1091                         bnx2x_csum_validate(skb, cqe, fp,
1092                                             bnx2x_fp_qstats(bp, fp));
1093
1094                 skb_record_rx_queue(skb, fp->rx_queue);
1095
1096                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1097                     PARSING_FLAGS_VLAN)
1098                         __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1099                                                le16_to_cpu(cqe_fp->vlan_tag));
1100
1101                 skb_mark_napi_id(skb, &fp->napi);
1102
1103                 if (bnx2x_fp_ll_polling(fp))
1104                         netif_receive_skb(skb);
1105                 else
1106                         napi_gro_receive(&fp->napi, skb);
1107 next_rx:
1108                 rx_buf->data = NULL;
1109
1110                 bd_cons = NEXT_RX_IDX(bd_cons);
1111                 bd_prod = NEXT_RX_IDX(bd_prod);
1112                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1113                 rx_pkt++;
1114 #endif
1115 next_cqe:
1116                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1117                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1118
1119                 /* mark CQE as free */
1120                 BNX2X_SEED_CQE(cqe_fp);
1121
1122                 if (rx_pkt == budget)
1123                         break;
1124
1125                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1126                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1127                 cqe_fp = &cqe->fast_path_cqe;
1128         } /* while */
1129
1130         fp->rx_bd_cons = bd_cons;
1131         fp->rx_bd_prod = bd_prod_fw;
1132         fp->rx_comp_cons = sw_comp_cons;
1133         fp->rx_comp_prod = sw_comp_prod;
1134
1135         /* Update producers */
1136         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1137                              fp->rx_sge_prod);
1138
1139         fp->rx_pkt += rx_pkt;
1140         fp->rx_calls++;
1141
1142         return rx_pkt;
1143 }
1144
1145 static void bnx2x_msix_fp_int(struct hw_trapframe *hw_tf, void *fp_cookie)
1146 {
1147         struct bnx2x_fastpath *fp = fp_cookie;
1148         struct bnx2x *bp = fp->bp;
1149         uint8_t cos;
1150
1151         DP(NETIF_MSG_INTR,
1152            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1153            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1154
1155         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1156
1157 #ifdef BNX2X_STOP_ON_ERROR
1158         if (unlikely(bp->panic))
1159                 return;
1160 #endif
1161
1162         /* Handle Rx and Tx according to MSI-X vector */
1163         for_each_cos_in_tx_queue(fp, cos)
1164                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1165
1166         prefetch(&fp->sb_running_index[SM_RX_ID]);
1167         // AKAROS_PORT
1168         send_kernel_message(core_id(), bnx2x_poll, (long)fp, 0, 0, KMSG_ROUTINE);
1169         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1170
1171         return;
1172 }
1173
1174 /* HW Lock for shared dual port PHYs */
1175 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1176 {
1177         qlock(&bp->port.phy_mutex);
1178
1179         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1180 }
1181
1182 void bnx2x_release_phy_lock(struct bnx2x *bp)
1183 {
1184         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1185
1186         qunlock(&bp->port.phy_mutex);
1187 }
1188
1189 /* calculates MF speed according to current linespeed and MF configuration */
1190 uint16_t bnx2x_get_mf_speed(struct bnx2x *bp)
1191 {
1192         uint16_t line_speed = bp->link_vars.line_speed;
1193         if (IS_MF(bp)) {
1194                 uint16_t maxCfg = bnx2x_extract_max_cfg(bp,
1195                                                    bp->mf_config[BP_VN(bp)]);
1196
1197                 /* Calculate the current MAX line speed limit for the MF
1198                  * devices
1199                  */
1200                 if (IS_MF_SI(bp))
1201                         line_speed = (line_speed * maxCfg) / 100;
1202                 else { /* SD mode */
1203                         uint16_t vn_max_rate = maxCfg * 100;
1204
1205                         if (vn_max_rate < line_speed)
1206                                 line_speed = vn_max_rate;
1207                 }
1208         }
1209
1210         return line_speed;
1211 }
1212
1213 /**
1214  * bnx2x_fill_report_data - fill link report data to report
1215  *
1216  * @bp:         driver handle
1217  * @data:       link state to update
1218  *
1219  * It uses a none-atomic bit operations because is called under the mutex.
1220  */
1221 static void bnx2x_fill_report_data(struct bnx2x *bp,
1222                                    struct bnx2x_link_report_data *data)
1223 {
1224 panic("Not implemented");
1225 #if 0 // AKAROS_PORT
1226         memset(data, 0, sizeof(*data));
1227
1228         if (IS_PF(bp)) {
1229                 /* Fill the report data: effective line speed */
1230                 data->line_speed = bnx2x_get_mf_speed(bp);
1231
1232                 /* Link is down */
1233                 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1234                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1235                                   &data->link_report_flags);
1236
1237                 if (!BNX2X_NUM_ETH_QUEUES(bp))
1238                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1239                                   &data->link_report_flags);
1240
1241                 /* Full DUPLEX */
1242                 if (bp->link_vars.duplex == DUPLEX_FULL)
1243                         __set_bit(BNX2X_LINK_REPORT_FD,
1244                                   &data->link_report_flags);
1245
1246                 /* Rx Flow Control is ON */
1247                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1248                         __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1249                                   &data->link_report_flags);
1250
1251                 /* Tx Flow Control is ON */
1252                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1253                         __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1254                                   &data->link_report_flags);
1255         } else { /* VF */
1256                 *data = bp->vf_link_vars;
1257         }
1258 #endif
1259 }
1260
1261 /**
1262  * bnx2x_link_report - report link status to OS.
1263  *
1264  * @bp:         driver handle
1265  *
1266  * Calls the __bnx2x_link_report() under the same locking scheme
1267  * as a link/PHY state managing code to ensure a consistent link
1268  * reporting.
1269  */
1270
1271 void bnx2x_link_report(struct bnx2x *bp)
1272 {
1273         bnx2x_acquire_phy_lock(bp);
1274         __bnx2x_link_report(bp);
1275         bnx2x_release_phy_lock(bp);
1276 }
1277
1278 /**
1279  * __bnx2x_link_report - report link status to OS.
1280  *
1281  * @bp:         driver handle
1282  *
1283  * None atomic implementation.
1284  * Should be called under the phy_lock.
1285  */
1286 void __bnx2x_link_report(struct bnx2x *bp)
1287 {
1288 panic("Not implemented");
1289 #if 0 // AKAROS_PORT
1290         struct bnx2x_link_report_data cur_data;
1291
1292         /* reread mf_cfg */
1293         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1294                 bnx2x_read_mf_cfg(bp);
1295
1296         /* Read the current link report info */
1297         bnx2x_fill_report_data(bp, &cur_data);
1298
1299         /* Don't report link down or exactly the same link status twice */
1300         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1301             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1302                       &bp->last_reported_link.link_report_flags) &&
1303              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1304                       &cur_data.link_report_flags)))
1305                 return;
1306
1307         bp->link_cnt++;
1308
1309         /* We are going to report a new link parameters now -
1310          * remember the current data for the next time.
1311          */
1312         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1313
1314         /* propagate status to VFs */
1315         if (IS_PF(bp))
1316                 bnx2x_iov_link_update(bp);
1317
1318         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1319                      &cur_data.link_report_flags)) {
1320                 netif_carrier_off(bp->dev);
1321                 netdev_err(bp->dev, "NIC Link is Down\n");
1322                 return;
1323         } else {
1324                 const char *duplex;
1325                 const char *flow;
1326
1327                 netif_carrier_on(bp->dev);
1328
1329                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1330                                        &cur_data.link_report_flags))
1331                         duplex = "full";
1332                 else
1333                         duplex = "half";
1334
1335                 /* Handle the FC at the end so that only these flags would be
1336                  * possibly set. This way we may easily check if there is no FC
1337                  * enabled.
1338                  */
1339                 if (cur_data.link_report_flags) {
1340                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1341                                      &cur_data.link_report_flags)) {
1342                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1343                                      &cur_data.link_report_flags))
1344                                         flow = "ON - receive & transmit";
1345                                 else
1346                                         flow = "ON - receive";
1347                         } else {
1348                                 flow = "ON - transmit";
1349                         }
1350                 } else {
1351                         flow = "none";
1352                 }
1353                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1354                             cur_data.line_speed, duplex, flow);
1355         }
1356 #endif
1357 }
1358
1359 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1360 {
1361         int i;
1362
1363         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1364                 struct eth_rx_sge *sge;
1365
1366                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1367                 sge->addr_hi =
1368                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1369                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1370
1371                 sge->addr_lo =
1372                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1373                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1374         }
1375 }
1376
1377 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1378                                 struct bnx2x_fastpath *fp, int last)
1379 {
1380         int i;
1381
1382         for (i = 0; i < last; i++) {
1383                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1384                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1385                 uint8_t *data = first_buf->data;
1386
1387                 if (data == NULL) {
1388                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1389                         continue;
1390                 }
1391                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1392                         dma_unmap_single(&bp->pdev->dev,
1393                                          dma_unmap_addr(first_buf, mapping),
1394                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1395                 bnx2x_frag_free(fp, data);
1396                 first_buf->data = NULL;
1397         }
1398 }
1399
1400 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1401 {
1402         int j;
1403
1404         for_each_rx_queue_cnic(bp, j) {
1405                 struct bnx2x_fastpath *fp = &bp->fp[j];
1406
1407                 fp->rx_bd_cons = 0;
1408
1409                 /* Activate BD ring */
1410                 /* Warning!
1411                  * this will generate an interrupt (to the TSTORM)
1412                  * must only be done after chip is initialized
1413                  */
1414                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1415                                      fp->rx_sge_prod);
1416         }
1417 }
1418
1419 void bnx2x_init_rx_rings(struct bnx2x *bp)
1420 {
1421         int func = BP_FUNC(bp);
1422         uint16_t ring_prod;
1423         int i, j;
1424
1425         /* Allocate TPA resources */
1426         for_each_eth_queue(bp, j) {
1427                 struct bnx2x_fastpath *fp = &bp->fp[j];
1428
1429                 DP(NETIF_MSG_IFUP,
1430                    "mtu %d  rx_buf_size %d\n", bp->dev->maxmtu, fp->rx_buf_size);
1431
1432                 if (!fp->disable_tpa) {
1433                         /* Fill the per-aggregation pool */
1434                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1435                                 struct bnx2x_agg_info *tpa_info =
1436                                         &fp->tpa_info[i];
1437                                 struct sw_rx_bd *first_buf =
1438                                         &tpa_info->first_buf;
1439
1440                                 first_buf->data =
1441                                         bnx2x_frag_alloc(fp, KMALLOC_WAIT);
1442                                 if (!first_buf->data) {
1443                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1444                                                   j);
1445                                         bnx2x_free_tpa_pool(bp, fp, i);
1446                                         fp->disable_tpa = 1;
1447                                         break;
1448                                 }
1449                                 dma_unmap_addr_set(first_buf, mapping, 0);
1450                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1451                         }
1452
1453                         /* "next page" elements initialization */
1454                         bnx2x_set_next_page_sgl(fp);
1455
1456                         /* set SGEs bit mask */
1457                         bnx2x_init_sge_ring_bit_mask(fp);
1458
1459                         /* Allocate SGEs and initialize the ring elements */
1460                         for (i = 0, ring_prod = 0;
1461                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1462
1463                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1464                                                        KMALLOC_WAIT) < 0) {
1465                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1466                                                   i);
1467                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1468                                                   j);
1469                                         /* Cleanup already allocated elements */
1470                                         bnx2x_free_rx_sge_range(bp, fp,
1471                                                                 ring_prod);
1472                                         bnx2x_free_tpa_pool(bp, fp,
1473                                                             MAX_AGG_QS(bp));
1474                                         fp->disable_tpa = 1;
1475                                         ring_prod = 0;
1476                                         break;
1477                                 }
1478                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1479                         }
1480
1481                         fp->rx_sge_prod = ring_prod;
1482                 }
1483         }
1484
1485         for_each_eth_queue(bp, j) {
1486                 struct bnx2x_fastpath *fp = &bp->fp[j];
1487
1488                 fp->rx_bd_cons = 0;
1489
1490                 /* Activate BD ring */
1491                 /* Warning!
1492                  * this will generate an interrupt (to the TSTORM)
1493                  * must only be done after chip is initialized
1494                  */
1495                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1496                                      fp->rx_sge_prod);
1497
1498                 if (j != 0)
1499                         continue;
1500
1501                 if (CHIP_IS_E1(bp)) {
1502                         REG_WR(bp, BAR_USTRORM_INTMEM +
1503                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1504                                U64_LO(fp->rx_comp_mapping));
1505                         REG_WR(bp, BAR_USTRORM_INTMEM +
1506                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1507                                U64_HI(fp->rx_comp_mapping));
1508                 }
1509         }
1510 }
1511
1512 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1513 {
1514 panic("Not implemented");
1515 #if 0 // AKAROS_PORT
1516         uint8_t cos;
1517         struct bnx2x *bp = fp->bp;
1518
1519         for_each_cos_in_tx_queue(fp, cos) {
1520                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1521                 unsigned pkts_compl = 0, bytes_compl = 0;
1522
1523                 uint16_t sw_prod = txdata->tx_pkt_prod;
1524                 uint16_t sw_cons = txdata->tx_pkt_cons;
1525
1526                 while (sw_cons != sw_prod) {
1527                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1528                                           &pkts_compl, &bytes_compl);
1529                         sw_cons++;
1530                 }
1531
1532                 netdev_tx_reset_queue(
1533                         netdev_get_tx_queue(bp->dev,
1534                                             txdata->txq_index));
1535         }
1536 #endif
1537 }
1538
1539 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1540 {
1541         int i;
1542
1543         for_each_tx_queue_cnic(bp, i) {
1544                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1545         }
1546 }
1547
1548 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1549 {
1550         int i;
1551
1552         for_each_eth_queue(bp, i) {
1553                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1554         }
1555 }
1556
1557 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1558 {
1559         struct bnx2x *bp = fp->bp;
1560         int i;
1561
1562         /* ring wasn't allocated */
1563         if (fp->rx_buf_ring == NULL)
1564                 return;
1565
1566         for (i = 0; i < NUM_RX_BD; i++) {
1567                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1568                 uint8_t *data = rx_buf->data;
1569
1570                 if (data == NULL)
1571                         continue;
1572                 dma_unmap_single(&bp->pdev->dev,
1573                                  dma_unmap_addr(rx_buf, mapping),
1574                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1575
1576                 rx_buf->data = NULL;
1577                 bnx2x_frag_free(fp, data);
1578         }
1579 }
1580
1581 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1582 {
1583         int j;
1584
1585         for_each_rx_queue_cnic(bp, j) {
1586                 bnx2x_free_rx_bds(&bp->fp[j]);
1587         }
1588 }
1589
1590 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1591 {
1592         int j;
1593
1594         for_each_eth_queue(bp, j) {
1595                 struct bnx2x_fastpath *fp = &bp->fp[j];
1596
1597                 bnx2x_free_rx_bds(fp);
1598
1599                 if (!fp->disable_tpa)
1600                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1601         }
1602 }
1603
1604 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1605 {
1606         bnx2x_free_tx_skbs_cnic(bp);
1607         bnx2x_free_rx_skbs_cnic(bp);
1608 }
1609
1610 void bnx2x_free_skbs(struct bnx2x *bp)
1611 {
1612         bnx2x_free_tx_skbs(bp);
1613         bnx2x_free_rx_skbs(bp);
1614 }
1615
1616 void bnx2x_update_max_mf_config(struct bnx2x *bp, uint32_t value)
1617 {
1618         /* load old values */
1619         uint32_t mf_cfg = bp->mf_config[BP_VN(bp)];
1620
1621         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1622                 /* leave all but MAX value */
1623                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1624
1625                 /* set new MAX value */
1626                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1627                                 & FUNC_MF_CFG_MAX_BW_MASK;
1628
1629                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1630         }
1631 }
1632
1633 /**
1634  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1635  *
1636  * @bp:         driver handle
1637  * @nvecs:      number of vectors to be released
1638  */
1639 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1640 {
1641 panic("Not implemented");
1642 #if 0 // AKAROS_PORT
1643         int i, offset = 0;
1644
1645         if (nvecs == offset)
1646                 return;
1647
1648         /* VFs don't have a default SB */
1649         if (IS_PF(bp)) {
1650                 free_irq(bp->msix_table[offset].vector, bp->dev);
1651                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1652                    bp->msix_table[offset].vector);
1653                 offset++;
1654         }
1655
1656         if (CNIC_SUPPORT(bp)) {
1657                 if (nvecs == offset)
1658                         return;
1659                 offset++;
1660         }
1661
1662         for_each_eth_queue(bp, i) {
1663                 if (nvecs == offset)
1664                         return;
1665                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1666                    i, bp->msix_table[offset].vector);
1667
1668                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1669         }
1670 #endif
1671 }
1672
1673 void bnx2x_free_irq(struct bnx2x *bp)
1674 {
1675 panic("Not implemented");
1676 #if 0 // AKAROS_PORT
1677         if (bp->flags & USING_MSIX_FLAG &&
1678             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1679                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1680
1681                 /* vfs don't have a default status block */
1682                 if (IS_PF(bp))
1683                         nvecs++;
1684
1685                 bnx2x_free_msix_irqs(bp, nvecs);
1686         } else {
1687                 free_irq(bp->dev->irq, bp->dev);
1688         }
1689 #endif
1690 }
1691
1692 int bnx2x_enable_msix(struct bnx2x *bp)
1693 {
1694         int msix_vec = 0, i, rc;
1695 panic("Not implemented");
1696 #if 0 // AKAROS_PORT
1697         /* VFs don't have a default status block */
1698         if (IS_PF(bp)) {
1699                 bp->msix_table[msix_vec].entry = msix_vec;
1700                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1701                                bp->msix_table[0].entry);
1702                 msix_vec++;
1703         }
1704
1705         /* Cnic requires an msix vector for itself */
1706         if (CNIC_SUPPORT(bp)) {
1707                 bp->msix_table[msix_vec].entry = msix_vec;
1708                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1709                                msix_vec, bp->msix_table[msix_vec].entry);
1710                 msix_vec++;
1711         }
1712
1713         /* We need separate vectors for ETH queues only (not FCoE) */
1714         for_each_eth_queue(bp, i) {
1715                 bp->msix_table[msix_vec].entry = msix_vec;
1716                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1717                                msix_vec, msix_vec, i);
1718                 msix_vec++;
1719         }
1720
1721         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1722            msix_vec);
1723
1724         rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1725                                    BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1726         /*
1727          * reconfigure number of tx/rx queues according to available
1728          * MSI-X vectors
1729          */
1730         if (rc == -ENOSPC) {
1731                 /* Get by with single vector */
1732                 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1733                 if (rc < 0) {
1734                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1735                                        rc);
1736                         goto no_msix;
1737                 }
1738
1739                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1740                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1741
1742                 BNX2X_DEV_INFO("set number of queues to 1\n");
1743                 bp->num_ethernet_queues = 1;
1744                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1745         } else if (rc < 0) {
1746                 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1747                 goto no_msix;
1748         } else if (rc < msix_vec) {
1749                 /* how less vectors we will have? */
1750                 int diff = msix_vec - rc;
1751
1752                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1753
1754                 /*
1755                  * decrease number of queues by number of unallocated entries
1756                  */
1757                 bp->num_ethernet_queues -= diff;
1758                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1759
1760                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1761                                bp->num_queues);
1762         }
1763
1764         bp->flags |= USING_MSIX_FLAG;
1765
1766         return 0;
1767
1768 no_msix:
1769         /* fall to INTx if not enough memory */
1770         if (rc == -ENOMEM)
1771                 bp->flags |= DISABLE_MSI_FLAG;
1772
1773         return rc;
1774 #endif
1775 }
1776
1777 static void bullshit_handler(struct hw_trapframe *hw_tf, void *cnic_turd)
1778 {
1779         printk("bnx2x CNIC IRQ fired.  Probably a bug!\n");
1780 }
1781
1782 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1783 {
1784         int i, rc, offset = 0;
1785
1786         /* no default status block for vf */
1787         if (IS_PF(bp)) {
1788                 rc = register_irq(bp->msix_table[offset++].vector,
1789                                   bnx2x_msix_sp_int, bp->dev,
1790                                   pci_to_tbdf(bp->pdev));
1791                 if (rc) {
1792                         BNX2X_ERR("request sp irq failed\n");
1793                         return -EBUSY;
1794                 }
1795         }
1796
1797         if (CNIC_SUPPORT(bp)) {
1798                 offset++;
1799                 // AKAROS_PORT
1800                 rc = register_irq(0, bullshit_handler, 0, pci_to_tbdf(bp->pdev));
1801                 if (rc) {
1802                         BNX2X_ERR("Fucked up getting a CNIC MSIX vector!");
1803                         return -EBUSY;
1804                 }
1805         }
1806
1807         for_each_eth_queue(bp, i) {
1808                 struct bnx2x_fastpath *fp = &bp->fp[i];
1809                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1810                          bp->dev->name, i);
1811
1812                 rc = register_irq(bp->msix_table[offset].vector,
1813                                   bnx2x_msix_fp_int, fp, pci_to_tbdf(bp->pdev));
1814                 if (rc) {
1815                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1816                               bp->msix_table[offset].vector, rc);
1817                         bnx2x_free_msix_irqs(bp, offset);
1818                         return -EBUSY;
1819                 }
1820
1821                 offset++;
1822         }
1823
1824         i = BNX2X_NUM_ETH_QUEUES(bp);
1825         if (IS_PF(bp)) {
1826                 offset = 1 + CNIC_SUPPORT(bp);
1827                 netdev_info(bp->dev,
1828                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1829                             bp->msix_table[0].vector,
1830                             0, bp->msix_table[offset].vector,
1831                             i - 1, bp->msix_table[offset + i - 1].vector);
1832         } else {
1833                 offset = CNIC_SUPPORT(bp);
1834                 netdev_info(bp->dev,
1835                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1836                             0, bp->msix_table[offset].vector,
1837                             i - 1, bp->msix_table[offset + i - 1].vector);
1838         }
1839         return 0;
1840 }
1841
1842 int bnx2x_enable_msi(struct bnx2x *bp)
1843 {
1844 panic("Not implemented");
1845 #if 0 // AKAROS_PORT
1846         int rc;
1847
1848         rc = pci_enable_msi(bp->pdev);
1849         if (rc) {
1850                 BNX2X_DEV_INFO("MSI is not attainable\n");
1851                 return -1;
1852         }
1853         bp->flags |= USING_MSI_FLAG;
1854
1855         return 0;
1856 #endif
1857 }
1858
1859 static int bnx2x_req_irq(struct bnx2x *bp)
1860 {
1861         unsigned long flags;
1862 panic("Not implemented");
1863 #if 0 // AKAROS_PORT
1864         unsigned int irq;
1865
1866         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1867                 flags = 0;
1868         else
1869                 flags = IRQF_SHARED;
1870
1871         if (bp->flags & USING_MSIX_FLAG)
1872                 irq = bp->msix_table[0].vector;
1873         else
1874                 irq = bp->pdev->irq;
1875
1876         return register_irq(irq, bnx2x_interrupt, bp->dev,
1877                             pci_to_tbdf(bp->pdev));
1878 #endif
1879 }
1880
1881 static int bnx2x_setup_irqs(struct bnx2x *bp)
1882 {
1883         return bnx2x_req_msix_irqs(bp);
1884 #if 0 // AKAROS_PORT we just register_irq
1885         if (bp->flags & USING_MSIX_FLAG &&
1886             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1887                 rc = bnx2x_req_msix_irqs(bp);
1888                 if (rc)
1889                         return rc;
1890         } else {
1891                 rc = bnx2x_req_irq(bp);
1892                 if (rc) {
1893                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1894                         return rc;
1895                 }
1896                 if (bp->flags & USING_MSI_FLAG) {
1897                         bp->dev->irq = bp->pdev->irq;
1898                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1899                                     bp->dev->irq);
1900                 }
1901                 if (bp->flags & USING_MSIX_FLAG) {
1902                         bp->dev->irq = bp->msix_table[0].vector;
1903                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1904                                     bp->dev->irq);
1905                 }
1906         }
1907
1908         return 0;
1909 #endif
1910 }
1911
1912 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1913 {
1914         int i;
1915
1916         for_each_rx_queue_cnic(bp, i) {
1917                 bnx2x_fp_init_lock(&bp->fp[i]);
1918                 napi_enable(&bnx2x_fp(bp, i, napi));
1919         }
1920 }
1921
1922 static void bnx2x_napi_enable(struct bnx2x *bp)
1923 {
1924         int i;
1925
1926         for_each_eth_queue(bp, i) {
1927                 bnx2x_fp_init_lock(&bp->fp[i]);
1928                 napi_enable(&bnx2x_fp(bp, i, napi));
1929         }
1930 }
1931
1932 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1933 {
1934         int i;
1935
1936         for_each_rx_queue_cnic(bp, i) {
1937                 napi_disable(&bnx2x_fp(bp, i, napi));
1938                 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1939                         kthread_usleep(1000);
1940         }
1941 }
1942
1943 static void bnx2x_napi_disable(struct bnx2x *bp)
1944 {
1945         int i;
1946
1947         for_each_eth_queue(bp, i) {
1948                 napi_disable(&bnx2x_fp(bp, i, napi));
1949                 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1950                         kthread_usleep(1000);
1951         }
1952 }
1953
1954 void bnx2x_netif_start(struct bnx2x *bp)
1955 {
1956 panic("Not implemented");
1957 #if 0 // AKAROS_PORT
1958         if (netif_running(bp->dev)) {
1959                 bnx2x_napi_enable(bp);
1960                 if (CNIC_LOADED(bp))
1961                         bnx2x_napi_enable_cnic(bp);
1962                 bnx2x_int_enable(bp);
1963                 if (bp->state == BNX2X_STATE_OPEN)
1964                         netif_tx_wake_all_queues(bp->dev);
1965         }
1966 #endif
1967 }
1968
1969 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1970 {
1971         bnx2x_int_disable_sync(bp, disable_hw);
1972         bnx2x_napi_disable(bp);
1973         if (CNIC_LOADED(bp))
1974                 bnx2x_napi_disable_cnic(bp);
1975 }
1976
1977 uint16_t bnx2x_select_queue(struct ether *dev, struct sk_buff *skb,
1978                        void *accel_priv, select_queue_fallback_t fallback)
1979 {
1980 panic("Not implemented");
1981 #if 0 // AKAROS_PORT
1982         struct bnx2x *bp = netdev_priv(dev);
1983
1984         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1985                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1986                 uint16_t ether_type = be16_to_cpu(hdr->h_proto);
1987
1988                 /* Skip VLAN tag if present */
1989                 if (ether_type == ETH_P_8021Q) {
1990                         struct vlan_ethhdr *vhdr =
1991                                 (struct vlan_ethhdr *)skb->data;
1992
1993                         ether_type = be16_to_cpu(vhdr->h_vlan_encapsulated_proto);
1994                 }
1995
1996                 /* If ethertype is FCoE or FIP - use FCoE ring */
1997                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1998                         return bnx2x_fcoe_tx(bp, txq_index);
1999         }
2000
2001         /* select a non-FCoE queue */
2002         return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
2003 #endif
2004 }
2005
2006 void bnx2x_set_num_queues(struct bnx2x *bp)
2007 {
2008         /* RSS queues */
2009         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
2010
2011         /* override in STORAGE SD modes */
2012         if (IS_MF_STORAGE_ONLY(bp))
2013                 bp->num_ethernet_queues = 1;
2014
2015         /* Add special queues */
2016         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
2017         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
2018
2019         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
2020 }
2021
2022 /**
2023  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
2024  *
2025  * @bp:         Driver handle
2026  *
2027  * We currently support for at most 16 Tx queues for each CoS thus we will
2028  * allocate a multiple of 16 for ETH L2 rings according to the value of the
2029  * bp->max_cos.
2030  *
2031  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
2032  * index after all ETH L2 indices.
2033  *
2034  * If the actual number of Tx queues (for each CoS) is less than 16 then there
2035  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
2036  * 16..31,...) with indices that are not coupled with any real Tx queue.
2037  *
2038  * The proper configuration of skb->queue_mapping is handled by
2039  * bnx2x_select_queue() and __skb_tx_hash().
2040  *
2041  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
2042  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
2043  */
2044 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
2045 {
2046         int rc, tx, rx;
2047
2048         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
2049         rx = BNX2X_NUM_ETH_QUEUES(bp);
2050
2051 /* account for fcoe queue */
2052         if (include_cnic && !NO_FCOE(bp)) {
2053                 rx++;
2054                 tx++;
2055         }
2056
2057 #if 0 // AKAROS_PORT XME: set queues in ether
2058         rc = netif_set_real_num_tx_queues(bp->dev, tx);
2059         if (rc) {
2060                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2061                 return rc;
2062         }
2063         rc = netif_set_real_num_rx_queues(bp->dev, rx);
2064         if (rc) {
2065                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2066                 return rc;
2067         }
2068 #else
2069         rc = 0;
2070 #endif
2071
2072         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2073                           tx, rx);
2074
2075         return rc;
2076 }
2077
2078 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2079 {
2080         int i;
2081
2082         for_each_queue(bp, i) {
2083                 struct bnx2x_fastpath *fp = &bp->fp[i];
2084                 uint32_t mtu;
2085
2086                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2087                 if (IS_FCOE_IDX(i))
2088                         /*
2089                          * Although there are no IP frames expected to arrive to
2090                          * this ring we still want to add an
2091                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2092                          * overrun attack.
2093                          */
2094                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2095                 else
2096                         mtu = bp->dev->maxmtu;
2097                 /* AKAROS_PORT XME struct block alignment and size issues? */
2098                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2099                                   IP_HEADER_ALIGNMENT_PADDING +
2100                                   ETH_OVREHEAD +
2101                                   mtu +
2102                                   BNX2X_FW_RX_ALIGN_END;
2103                 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2104                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2105                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2106                 else
2107                         fp->rx_frag_size = 0;
2108         }
2109 }
2110
2111 static int bnx2x_init_rss(struct bnx2x *bp)
2112 {
2113 panic("Not implemented");
2114 #if 0 // AKAROS_PORT
2115         int i;
2116         uint8_t num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2117
2118         /* Prepare the initial contents for the indirection table if RSS is
2119          * enabled
2120          */
2121         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2122                 bp->rss_conf_obj.ind_table[i] =
2123                         bp->fp->cl_id +
2124                         ethtool_rxfh_indir_default(i, num_eth_queues);
2125
2126         /*
2127          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2128          * per-port, so if explicit configuration is needed , do it only
2129          * for a PMF.
2130          *
2131          * For 57712 and newer on the other hand it's a per-function
2132          * configuration.
2133          */
2134         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2135 #endif
2136 }
2137
2138 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2139               bool config_hash, bool enable)
2140 {
2141 panic("Not implemented");
2142 #if 0 // AKAROS_PORT
2143         struct bnx2x_config_rss_params params = {NULL};
2144
2145         /* Although RSS is meaningless when there is a single HW queue we
2146          * still need it enabled in order to have HW Rx hash generated.
2147          *
2148          * if (!is_eth_multi(bp))
2149          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2150          */
2151
2152         params.rss_obj = rss_obj;
2153
2154         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2155
2156         if (enable) {
2157                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2158
2159                 /* RSS configuration */
2160                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2161                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2162                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2163                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2164                 if (rss_obj->udp_rss_v4)
2165                         __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2166                 if (rss_obj->udp_rss_v6)
2167                         __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2168
2169                 if (!CHIP_IS_E1x(bp))
2170                         /* valid only for TUNN_MODE_GRE tunnel mode */
2171                         __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
2172         } else {
2173                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2174         }
2175
2176         /* Hash bits */
2177         params.rss_result_mask = MULTI_MASK;
2178
2179         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2180
2181         if (config_hash) {
2182                 /* RSS keys */
2183                 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2184                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2185         }
2186
2187         if (IS_PF(bp))
2188                 return bnx2x_config_rss(bp, &params);
2189         else
2190                 return bnx2x_vfpf_config_rss(bp, &params);
2191 #endif
2192 }
2193
2194 static int bnx2x_init_hw(struct bnx2x *bp, uint32_t load_code)
2195 {
2196         struct bnx2x_func_state_params func_params = {NULL};
2197
2198         /* Prepare parameters for function state transitions */
2199         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2200
2201         func_params.f_obj = &bp->func_obj;
2202         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2203
2204         func_params.params.hw_init.load_phase = load_code;
2205
2206         return bnx2x_func_state_change(bp, &func_params);
2207 }
2208
2209 /*
2210  * Cleans the object that have internal lists without sending
2211  * ramrods. Should be run when interrupts are disabled.
2212  */
2213 void bnx2x_squeeze_objects(struct bnx2x *bp)
2214 {
2215         int rc;
2216         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2217         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2218         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2219
2220         /***************** Cleanup MACs' object first *************************/
2221
2222         /* Wait for completion of requested */
2223         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2224         /* Perform a dry cleanup */
2225         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2226
2227         /* Clean ETH primary MAC */
2228         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2229         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2230                                  &ramrod_flags);
2231         if (rc != 0)
2232                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2233
2234         /* Cleanup UC list */
2235         vlan_mac_flags = 0;
2236         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2237         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2238                                  &ramrod_flags);
2239         if (rc != 0)
2240                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2241
2242         /***************** Now clean mcast object *****************************/
2243         rparam.mcast_obj = &bp->mcast_obj;
2244         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2245
2246         /* Add a DEL command... - Since we're doing a driver cleanup only,
2247          * we take a lock surrounding both the initial send and the CONTs,
2248          * as we don't want a true completion to disrupt us in the middle.
2249          */
2250 // KPF HERE (prob not init) XME (devether qlock. really the first time?)
2251         qlock(&bp->dev->qlock);
2252         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2253         if (rc < 0)
2254                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2255                           rc);
2256
2257         /* ...and wait until all pending commands are cleared */
2258         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2259         while (rc != 0) {
2260                 if (rc < 0) {
2261                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2262                                   rc);
2263                         qunlock(&bp->dev->qlock);
2264                         return;
2265                 }
2266
2267                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2268         }
2269         qunlock(&bp->dev->qlock);
2270 }
2271
2272 #ifndef BNX2X_STOP_ON_ERROR
2273 #define LOAD_ERROR_EXIT(bp, label) \
2274         do { \
2275                 (bp)->state = BNX2X_STATE_ERROR; \
2276                 goto label; \
2277         } while (0)
2278
2279 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2280         do { \
2281                 bp->cnic_loaded = false; \
2282                 goto label; \
2283         } while (0)
2284 #else /*BNX2X_STOP_ON_ERROR*/
2285 #define LOAD_ERROR_EXIT(bp, label) \
2286         do { \
2287                 (bp)->state = BNX2X_STATE_ERROR; \
2288                 (bp)->panic = 1; \
2289                 return -EBUSY; \
2290         } while (0)
2291 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2292         do { \
2293                 bp->cnic_loaded = false; \
2294                 (bp)->panic = 1; \
2295                 return -EBUSY; \
2296         } while (0)
2297 #endif /*BNX2X_STOP_ON_ERROR*/
2298
2299 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2300 {
2301         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2302                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2303         return;
2304 }
2305
2306 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2307 {
2308         int num_groups, vf_headroom = 0;
2309         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2310
2311         /* number of queues for statistics is number of eth queues + FCoE */
2312         uint8_t num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2313
2314         /* Total number of FW statistics requests =
2315          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2316          * and fcoe l2 queue) stats + num of queues (which includes another 1
2317          * for fcoe l2 queue if applicable)
2318          */
2319         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2320
2321         /* vf stats appear in the request list, but their data is allocated by
2322          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2323          * it is used to determine where to place the vf stats queries in the
2324          * request struct
2325          */
2326         if (IS_SRIOV(bp))
2327                 vf_headroom = bnx2x_vf_headroom(bp);
2328
2329         /* Request is built from stats_query_header and an array of
2330          * stats_query_cmd_group each of which contains
2331          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2332          * configured in the stats_query_header.
2333          */
2334         num_groups =
2335                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2336                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2337                  1 : 0));
2338
2339         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2340            bp->fw_stats_num, vf_headroom, num_groups);
2341         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2342                 num_groups * sizeof(struct stats_query_cmd_group);
2343
2344         /* Data for statistics requests + stats_counter
2345          * stats_counter holds per-STORM counters that are incremented
2346          * when STORM has finished with the current request.
2347          * memory for FCoE offloaded statistics are counted anyway,
2348          * even if they will not be sent.
2349          * VF stats are not accounted for here as the data of VF stats is stored
2350          * in memory allocated by the VF, not here.
2351          */
2352         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2353                 sizeof(struct per_pf_stats) +
2354                 sizeof(struct fcoe_statistics_params) +
2355                 sizeof(struct per_queue_stats) * num_queue_stats +
2356                 sizeof(struct stats_counter);
2357
2358         bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2359                                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2360         if (!bp->fw_stats)
2361                 goto alloc_mem_err;
2362
2363         /* Set shortcuts */
2364         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2365         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2366         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2367                 ((uint8_t *)bp->fw_stats + bp->fw_stats_req_sz);
2368         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2369                 bp->fw_stats_req_sz;
2370
2371         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2372            U64_HI(bp->fw_stats_req_mapping),
2373            U64_LO(bp->fw_stats_req_mapping));
2374         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2375            U64_HI(bp->fw_stats_data_mapping),
2376            U64_LO(bp->fw_stats_data_mapping));
2377         return 0;
2378
2379 alloc_mem_err:
2380         bnx2x_free_fw_stats_mem(bp);
2381         BNX2X_ERR("Can't allocate FW stats memory\n");
2382         return -ENOMEM;
2383 }
2384
2385 /* send load request to mcp and analyze response */
2386 static int bnx2x_nic_load_request(struct bnx2x *bp, uint32_t *load_code)
2387 {
2388         uint32_t param;
2389
2390         /* init fw_seq */
2391         bp->fw_seq =
2392                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2393                  DRV_MSG_SEQ_NUMBER_MASK);
2394         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2395
2396         /* Get current FW pulse sequence */
2397         bp->fw_drv_pulse_wr_seq =
2398                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2399                  DRV_PULSE_SEQ_MASK);
2400         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2401
2402         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2403
2404         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2405                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2406
2407         /* load request */
2408         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2409
2410         /* if mcp fails to respond we must abort */
2411         if (!(*load_code)) {
2412                 BNX2X_ERR("MCP response failure, aborting\n");
2413                 return -EBUSY;
2414         }
2415
2416         /* If mcp refused (e.g. other port is in diagnostic mode) we
2417          * must abort
2418          */
2419         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2420                 BNX2X_ERR("MCP refused load request, aborting\n");
2421                 return -EBUSY;
2422         }
2423         return 0;
2424 }
2425
2426 /* check whether another PF has already loaded FW to chip. In
2427  * virtualized environments a pf from another VM may have already
2428  * initialized the device including loading FW
2429  */
2430 int bnx2x_compare_fw_ver(struct bnx2x *bp, uint32_t load_code,
2431                          bool print_err)
2432 {
2433         /* is another pf loaded on this engine? */
2434         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2435             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2436                 /* build my FW version dword */
2437                 uint32_t my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2438                         (BCM_5710_FW_MINOR_VERSION << 8) +
2439                         (BCM_5710_FW_REVISION_VERSION << 16) +
2440                         (BCM_5710_FW_ENGINEERING_VERSION << 24);
2441
2442                 /* read loaded FW from chip */
2443                 uint32_t loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2444
2445                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2446                    loaded_fw, my_fw);
2447
2448                 /* abort nic load if version mismatch */
2449                 if (my_fw != loaded_fw) {
2450                         if (print_err)
2451                                 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2452                                           loaded_fw, my_fw);
2453                         else
2454                                 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2455                                                loaded_fw, my_fw);
2456                         return -EBUSY;
2457                 }
2458         }
2459         return 0;
2460 }
2461
2462 /* returns the "mcp load_code" according to global load_count array */
2463 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2464 {
2465         int path = BP_PATH(bp);
2466
2467         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2468            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2469            bnx2x_load_count[path][2]);
2470         bnx2x_load_count[path][0]++;
2471         bnx2x_load_count[path][1 + port]++;
2472         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2473            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2474            bnx2x_load_count[path][2]);
2475         if (bnx2x_load_count[path][0] == 1)
2476                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2477         else if (bnx2x_load_count[path][1 + port] == 1)
2478                 return FW_MSG_CODE_DRV_LOAD_PORT;
2479         else
2480                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2481 }
2482
2483 /* mark PMF if applicable */
2484 static void bnx2x_nic_load_pmf(struct bnx2x *bp, uint32_t load_code)
2485 {
2486         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2487             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2488             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2489                 bp->port.pmf = 1;
2490                 /* We need the barrier to ensure the ordering between the
2491                  * writing to bp->port.pmf here and reading it from the
2492                  * bnx2x_periodic_task().
2493                  */
2494                 mb();
2495         } else {
2496                 bp->port.pmf = 0;
2497         }
2498
2499         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2500 }
2501
2502 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2503 {
2504         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2505              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2506             (bp->common.shmem2_base)) {
2507                 if (SHMEM2_HAS(bp, dcc_support))
2508                         SHMEM2_WR(bp, dcc_support,
2509                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2510                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2511                 if (SHMEM2_HAS(bp, afex_driver_support))
2512                         SHMEM2_WR(bp, afex_driver_support,
2513                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2514         }
2515
2516         /* Set AFEX default VLAN tag to an invalid value */
2517         bp->afex_def_vlan_tag = -1;
2518 }
2519
2520 /**
2521  * bnx2x_bz_fp - zero content of the fastpath structure.
2522  *
2523  * @bp:         driver handle
2524  * @index:      fastpath index to be zeroed
2525  *
2526  * Makes sure the contents of the bp->fp[index].napi is kept
2527  * intact.
2528  */
2529 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2530 {
2531         struct bnx2x_fastpath *fp = &bp->fp[index];
2532         int cos;
2533         struct napi_struct orig_napi = fp->napi;
2534         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2535
2536         /* bzero bnx2x_fastpath contents */
2537         if (fp->tpa_info)
2538                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2539                        sizeof(struct bnx2x_agg_info));
2540         memset(fp, 0, sizeof(*fp));
2541
2542         /* AKAROS_PORT: let the code set up whatever fake napi stuff it needs */
2543         /* Restore the NAPI object as it has been already initialized */
2544         fp->napi = orig_napi;
2545         fp->tpa_info = orig_tpa_info;
2546         fp->bp = bp;
2547         fp->index = index;
2548         if (IS_ETH_FP(fp))
2549                 fp->max_cos = bp->max_cos;
2550         else
2551                 /* Special queues support only one CoS */
2552                 fp->max_cos = 1;
2553
2554         /* Init txdata pointers */
2555         if (IS_FCOE_FP(fp))
2556                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2557         if (IS_ETH_FP(fp))
2558                 for_each_cos_in_tx_queue(fp, cos)
2559                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2560                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2561
2562         /* set the tpa flag for each queue. The tpa flag determines the queue
2563          * minimal size so it must be set prior to queue memory allocation
2564          */
2565         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2566                                   (bp->flags & GRO_ENABLE_FLAG &&
2567                                    bnx2x_mtu_allows_gro(bp->dev->maxmtu)));
2568         if (bp->flags & TPA_ENABLE_FLAG)
2569                 fp->mode = TPA_MODE_LRO;
2570         else if (bp->flags & GRO_ENABLE_FLAG)
2571                 fp->mode = TPA_MODE_GRO;
2572
2573         /* We don't want TPA on an FCoE L2 ring */
2574         if (IS_FCOE_FP(fp))
2575                 fp->disable_tpa = 1;
2576 }
2577
2578 int bnx2x_load_cnic(struct bnx2x *bp)
2579 {
2580         int i, rc, port = BP_PORT(bp);
2581
2582         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2583
2584         qlock_init(&bp->cnic_mutex);
2585
2586         if (IS_PF(bp)) {
2587                 rc = bnx2x_alloc_mem_cnic(bp);
2588                 if (rc) {
2589                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2590                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2591                 }
2592         }
2593
2594         rc = bnx2x_alloc_fp_mem_cnic(bp);
2595         if (rc) {
2596                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2597                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2598         }
2599
2600         /* Update the number of queues with the cnic queues */
2601         rc = bnx2x_set_real_num_queues(bp, 1);
2602         if (rc) {
2603                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2604                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2605         }
2606
2607         /* Add all CNIC NAPI objects */
2608         bnx2x_add_all_napi_cnic(bp);
2609         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2610         bnx2x_napi_enable_cnic(bp);
2611
2612         rc = bnx2x_init_hw_func_cnic(bp);
2613         if (rc)
2614                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2615
2616         bnx2x_nic_init_cnic(bp);
2617
2618         if (IS_PF(bp)) {
2619                 /* Enable Timer scan */
2620                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2621
2622                 /* setup cnic queues */
2623                 for_each_cnic_queue(bp, i) {
2624                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2625                         if (rc) {
2626                                 BNX2X_ERR("Queue setup failed\n");
2627                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2628                         }
2629                 }
2630         }
2631
2632         /* Initialize Rx filter. */
2633         bnx2x_set_rx_mode_inner(bp);
2634
2635         /* re-read iscsi info */
2636         bnx2x_get_iscsi_info(bp);
2637         bnx2x_setup_cnic_irq_info(bp);
2638         bnx2x_setup_cnic_info(bp);
2639         bp->cnic_loaded = true;
2640         if (bp->state == BNX2X_STATE_OPEN)
2641                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2642
2643         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2644
2645         return 0;
2646
2647 #ifndef BNX2X_STOP_ON_ERROR
2648 load_error_cnic2:
2649         /* Disable Timer scan */
2650         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2651
2652 load_error_cnic1:
2653         bnx2x_napi_disable_cnic(bp);
2654         /* Update the number of queues without the cnic queues */
2655         if (bnx2x_set_real_num_queues(bp, 0))
2656                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2657 load_error_cnic0:
2658         BNX2X_ERR("CNIC-related load failed\n");
2659         bnx2x_free_fp_mem_cnic(bp);
2660         bnx2x_free_mem_cnic(bp);
2661         return rc;
2662 #endif /* ! BNX2X_STOP_ON_ERROR */
2663 }
2664
2665 /* must be called with rtnl_lock */
2666 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2667 {
2668         int port = BP_PORT(bp);
2669         int i, rc = 0;
2670         uint32_t load_code = 0;
2671
2672         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2673         DP(NETIF_MSG_IFUP,
2674            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2675
2676 #ifdef BNX2X_STOP_ON_ERROR
2677         if (unlikely(bp->panic)) {
2678                 BNX2X_ERR("Can't load NIC when there is panic\n");
2679                 return -EPERM;
2680         }
2681 #endif
2682
2683         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2684
2685         /* zero the structure w/o any lock, before SP handler is initialized */
2686         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2687         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2688                 &bp->last_reported_link.link_report_flags);
2689
2690         if (IS_PF(bp))
2691                 /* must be called before memory allocation and HW init */
2692                 bnx2x_ilt_set_info(bp);
2693
2694         /*
2695          * Zero fastpath structures preserving invariants like napi, which are
2696          * allocated only once, fp index, max_cos, bp pointer.
2697          * Also set fp->disable_tpa and txdata_ptr.
2698          */
2699         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2700         for_each_queue(bp, i)
2701                 bnx2x_bz_fp(bp, i);
2702         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2703                                   bp->num_cnic_queues) *
2704                                   sizeof(struct bnx2x_fp_txdata));
2705
2706         bp->fcoe_init = false;
2707
2708         /* Set the receive queues buffer size */
2709         bnx2x_set_rx_buf_size(bp);
2710
2711         if (IS_PF(bp)) {
2712                 rc = bnx2x_alloc_mem(bp);
2713                 if (rc) {
2714                         BNX2X_ERR("Unable to allocate bp memory\n");
2715                         return rc;
2716                 }
2717         }
2718
2719         /* need to be done after alloc mem, since it's self adjusting to amount
2720          * of memory available for RSS queues
2721          */
2722         rc = bnx2x_alloc_fp_mem(bp);
2723         if (rc) {
2724                 BNX2X_ERR("Unable to allocate memory for fps\n");
2725                 LOAD_ERROR_EXIT(bp, load_error0);
2726         }
2727
2728         /* Allocated memory for FW statistics  */
2729         if (bnx2x_alloc_fw_stats_mem(bp))
2730                 LOAD_ERROR_EXIT(bp, load_error0);
2731
2732         /* request pf to initialize status blocks */
2733         if (IS_VF(bp)) {
2734                 rc = bnx2x_vfpf_init(bp);
2735                 if (rc)
2736                         LOAD_ERROR_EXIT(bp, load_error0);
2737         }
2738
2739         /* As long as bnx2x_alloc_mem() may possibly update
2740          * bp->num_queues, bnx2x_set_real_num_queues() should always
2741          * come after it. At this stage cnic queues are not counted.
2742          */
2743         rc = bnx2x_set_real_num_queues(bp, 0);
2744         if (rc) {
2745                 BNX2X_ERR("Unable to set real_num_queues\n");
2746                 LOAD_ERROR_EXIT(bp, load_error0);
2747         }
2748
2749         /* configure multi cos mappings in kernel.
2750          * this configuration may be overridden by a multi class queue
2751          * discipline or by a dcbx negotiation result.
2752          */
2753         bnx2x_setup_tc(bp->dev, bp->max_cos);
2754
2755         /* Add all NAPI objects */
2756         bnx2x_add_all_napi(bp);
2757         DP(NETIF_MSG_IFUP, "napi added\n");
2758         bnx2x_napi_enable(bp);
2759
2760         if (IS_PF(bp)) {
2761                 /* set pf load just before approaching the MCP */
2762                 bnx2x_set_pf_load(bp);
2763
2764                 /* if mcp exists send load request and analyze response */
2765                 if (!BP_NOMCP(bp)) {
2766                         /* attempt to load pf */
2767                         rc = bnx2x_nic_load_request(bp, &load_code);
2768                         if (rc)
2769                                 LOAD_ERROR_EXIT(bp, load_error1);
2770
2771                         /* what did mcp say? */
2772                         rc = bnx2x_compare_fw_ver(bp, load_code, true);
2773                         if (rc) {
2774                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2775                                 LOAD_ERROR_EXIT(bp, load_error2);
2776                         }
2777                 } else {
2778                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2779                 }
2780
2781                 /* mark pmf if applicable */
2782                 bnx2x_nic_load_pmf(bp, load_code);
2783
2784                 /* Init Function state controlling object */
2785                 bnx2x__init_func_obj(bp);
2786
2787                 /* Initialize HW */
2788                 rc = bnx2x_init_hw(bp, load_code);
2789                 if (rc) {
2790                         BNX2X_ERR("HW init failed, aborting\n");
2791                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2792                         LOAD_ERROR_EXIT(bp, load_error2);
2793                 }
2794         }
2795
2796         bnx2x_pre_irq_nic_init(bp);
2797
2798 // XME HERE
2799         /* Connect to IRQs */
2800         rc = bnx2x_setup_irqs(bp);
2801         if (rc) {
2802                 BNX2X_ERR("setup irqs failed\n");
2803                 if (IS_PF(bp))
2804                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2805                 LOAD_ERROR_EXIT(bp, load_error2);
2806         }
2807
2808         /* Init per-function objects */
2809         if (IS_PF(bp)) {
2810                 /* Setup NIC internals and enable interrupts */
2811                 bnx2x_post_irq_nic_init(bp, load_code);
2812
2813                 bnx2x_init_bp_objs(bp);
2814                 bnx2x_iov_nic_init(bp);
2815
2816                 /* Set AFEX default VLAN tag to an invalid value */
2817                 bp->afex_def_vlan_tag = -1;
2818                 bnx2x_nic_load_afex_dcc(bp, load_code);
2819                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2820                 rc = bnx2x_func_start(bp);
2821                 if (rc) {
2822                         BNX2X_ERR("Function start failed!\n");
2823                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2824
2825                         LOAD_ERROR_EXIT(bp, load_error3);
2826                 }
2827
2828                 /* Send LOAD_DONE command to MCP */
2829                 if (!BP_NOMCP(bp)) {
2830                         load_code = bnx2x_fw_command(bp,
2831                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2832                         if (!load_code) {
2833                                 BNX2X_ERR("MCP response failure, aborting\n");
2834                                 rc = -EBUSY;
2835                                 LOAD_ERROR_EXIT(bp, load_error3);
2836                         }
2837                 }
2838
2839                 /* initialize FW coalescing state machines in RAM */
2840                 bnx2x_update_coalesce(bp);
2841         }
2842
2843         /* setup the leading queue */
2844         rc = bnx2x_setup_leading(bp);
2845         if (rc) {
2846                 BNX2X_ERR("Setup leading failed!\n");
2847                 LOAD_ERROR_EXIT(bp, load_error3);
2848         }
2849
2850         /* set up the rest of the queues */
2851         for_each_nondefault_eth_queue(bp, i) {
2852                 if (IS_PF(bp))
2853                         rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2854                 else /* VF */
2855                         rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2856                 if (rc) {
2857                         BNX2X_ERR("Queue %d setup failed\n", i);
2858                         LOAD_ERROR_EXIT(bp, load_error3);
2859                 }
2860         }
2861
2862         /* setup rss */
2863         rc = bnx2x_init_rss(bp);
2864         if (rc) {
2865                 BNX2X_ERR("PF RSS init failed\n");
2866                 LOAD_ERROR_EXIT(bp, load_error3);
2867         }
2868
2869         /* Now when Clients are configured we are ready to work */
2870         bp->state = BNX2X_STATE_OPEN;
2871
2872         /* Configure a ucast MAC */
2873 panic("Not implemented");
2874 #if 0 // AKAROS_PORT
2875         if (IS_PF(bp))
2876                 rc = bnx2x_set_eth_mac(bp, true);
2877         else /* vf */
2878                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2879                                            true);
2880 #endif
2881         if (rc) {
2882                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2883                 LOAD_ERROR_EXIT(bp, load_error3);
2884         }
2885
2886         if (IS_PF(bp) && bp->pending_max) {
2887                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2888                 bp->pending_max = 0;
2889         }
2890
2891         if (bp->port.pmf) {
2892                 rc = bnx2x_initial_phy_init(bp, load_mode);
2893                 if (rc)
2894                         LOAD_ERROR_EXIT(bp, load_error3);
2895         }
2896         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2897
2898         /* Start fast path */
2899
2900         /* Initialize Rx filter. */
2901         bnx2x_set_rx_mode_inner(bp);
2902
2903         /* Start Tx */
2904         switch (load_mode) {
2905         case LOAD_NORMAL:
2906                 /* Tx queue should be only re-enabled */
2907                 netif_tx_wake_all_queues(bp->dev);
2908                 break;
2909
2910         case LOAD_OPEN:
2911                 netif_tx_start_all_queues(bp->dev);
2912                 cmb();
2913                 break;
2914
2915         case LOAD_DIAG:
2916         case LOAD_LOOPBACK_EXT:
2917                 bp->state = BNX2X_STATE_DIAG;
2918                 break;
2919
2920         default:
2921                 break;
2922         }
2923
2924         if (bp->port.pmf)
2925                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2926         else
2927                 bnx2x__link_status_update(bp);
2928
2929         /* start the timer */
2930         set_awaiter_rel(&bp->timer, bp->current_interval * 1000); // fudge
2931         set_alarm(&per_cpu_info[0].tchain, &bp->timer);
2932
2933         if (CNIC_ENABLED(bp))
2934                 bnx2x_load_cnic(bp);
2935
2936         if (IS_PF(bp))
2937                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2938
2939         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2940                 /* mark driver is loaded in shmem2 */
2941                 uint32_t val;
2942                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2943                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2944                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2945                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2946         }
2947
2948         /* Wait for all pending SP commands to complete */
2949         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2950                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2951                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2952                 return -EBUSY;
2953         }
2954
2955         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2956         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2957                 bnx2x_dcbx_init(bp, false);
2958
2959         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2960
2961         return 0;
2962
2963 #ifndef BNX2X_STOP_ON_ERROR
2964 load_error3:
2965         if (IS_PF(bp)) {
2966                 bnx2x_int_disable_sync(bp, 1);
2967
2968                 /* Clean queueable objects */
2969                 bnx2x_squeeze_objects(bp);
2970         }
2971
2972         /* Free SKBs, SGEs, TPA pool and driver internals */
2973         bnx2x_free_skbs(bp);
2974         for_each_rx_queue(bp, i)
2975                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2976
2977         /* Release IRQs */
2978         bnx2x_free_irq(bp);
2979 load_error2:
2980         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2981                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2982                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2983         }
2984
2985         bp->port.pmf = 0;
2986 load_error1:
2987         bnx2x_napi_disable(bp);
2988         bnx2x_del_all_napi(bp);
2989
2990         /* clear pf_load status, as it was already set */
2991         if (IS_PF(bp))
2992                 bnx2x_clear_pf_load(bp);
2993 load_error0:
2994         bnx2x_free_fw_stats_mem(bp);
2995         bnx2x_free_fp_mem(bp);
2996         bnx2x_free_mem(bp);
2997
2998         return rc;
2999 #endif /* ! BNX2X_STOP_ON_ERROR */
3000 }
3001
3002 int bnx2x_drain_tx_queues(struct bnx2x *bp)
3003 {
3004         uint8_t rc = 0, cos, i;
3005
3006         /* Wait until tx fastpath tasks complete */
3007         for_each_tx_queue(bp, i) {
3008                 struct bnx2x_fastpath *fp = &bp->fp[i];
3009
3010                 for_each_cos_in_tx_queue(fp, cos)
3011                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
3012                 if (rc)
3013                         return rc;
3014         }
3015         return 0;
3016 }
3017
3018 /* must be called with rtnl_lock */
3019 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3020 {
3021 panic("Not implemented");
3022 #if 0 // AKAROS_PORT
3023         int i;
3024         bool global = false;
3025
3026         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
3027
3028         /* mark driver is unloaded in shmem2 */
3029         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
3030                 uint32_t val;
3031                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
3032                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
3033                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
3034         }
3035
3036         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3037             (bp->state == BNX2X_STATE_CLOSED ||
3038              bp->state == BNX2X_STATE_ERROR)) {
3039                 /* We can get here if the driver has been unloaded
3040                  * during parity error recovery and is either waiting for a
3041                  * leader to complete or for other functions to unload and
3042                  * then ifdown has been issued. In this case we want to
3043                  * unload and let other functions to complete a recovery
3044                  * process.
3045                  */
3046                 bp->recovery_state = BNX2X_RECOVERY_DONE;
3047                 bp->is_leader = 0;
3048                 bnx2x_release_leader_lock(bp);
3049                 mb();
3050
3051                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3052                 BNX2X_ERR("Can't unload in closed or error state\n");
3053                 return -EINVAL;
3054         }
3055
3056         /* Nothing to do during unload if previous bnx2x_nic_load()
3057          * have not completed successfully - all resources are released.
3058          *
3059          * we can get here only after unsuccessful ndo_* callback, during which
3060          * dev->IFF_UP flag is still on.
3061          */
3062         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3063                 return 0;
3064
3065         /* It's important to set the bp->state to the value different from
3066          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3067          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3068          */
3069         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3070         mb();
3071
3072         /* indicate to VFs that the PF is going down */
3073         bnx2x_iov_channel_down(bp);
3074
3075         if (CNIC_LOADED(bp))
3076                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3077
3078         /* Stop Tx */
3079         bnx2x_tx_disable(bp);
3080         netdev_reset_tc(bp->dev);
3081
3082         bp->rx_mode = BNX2X_RX_MODE_NONE;
3083
3084         del_timer_sync(&bp->timer);
3085
3086         if (IS_PF(bp)) {
3087                 /* Set ALWAYS_ALIVE bit in shmem */
3088                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3089                 bnx2x_drv_pulse(bp);
3090                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3091                 bnx2x_save_statistics(bp);
3092         }
3093
3094         /* wait till consumers catch up with producers in all queues */
3095         bnx2x_drain_tx_queues(bp);
3096
3097         /* if VF indicate to PF this function is going down (PF will delete sp
3098          * elements and clear initializations
3099          */
3100         if (IS_VF(bp))
3101                 bnx2x_vfpf_close_vf(bp);
3102         else if (unload_mode != UNLOAD_RECOVERY)
3103                 /* if this is a normal/close unload need to clean up chip*/
3104                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3105         else {
3106                 /* Send the UNLOAD_REQUEST to the MCP */
3107                 bnx2x_send_unload_req(bp, unload_mode);
3108
3109                 /* Prevent transactions to host from the functions on the
3110                  * engine that doesn't reset global blocks in case of global
3111                  * attention once global blocks are reset and gates are opened
3112                  * (the engine which leader will perform the recovery
3113                  * last).
3114                  */
3115                 if (!CHIP_IS_E1x(bp))
3116                         bnx2x_pf_disable(bp);
3117
3118                 /* Disable HW interrupts, NAPI */
3119                 bnx2x_netif_stop(bp, 1);
3120                 /* Delete all NAPI objects */
3121                 bnx2x_del_all_napi(bp);
3122                 if (CNIC_LOADED(bp))
3123                         bnx2x_del_all_napi_cnic(bp);
3124                 /* Release IRQs */
3125                 bnx2x_free_irq(bp);
3126
3127                 /* Report UNLOAD_DONE to MCP */
3128                 bnx2x_send_unload_done(bp, false);
3129         }
3130
3131         /*
3132          * At this stage no more interrupts will arrive so we may safely clean
3133          * the queueable objects here in case they failed to get cleaned so far.
3134          */
3135         if (IS_PF(bp))
3136                 bnx2x_squeeze_objects(bp);
3137
3138         /* There should be no more pending SP commands at this stage */
3139         bp->sp_state = 0;
3140
3141         bp->port.pmf = 0;
3142
3143         /* clear pending work in rtnl task */
3144         bp->sp_rtnl_state = 0;
3145         mb();
3146
3147         /* Free SKBs, SGEs, TPA pool and driver internals */
3148         bnx2x_free_skbs(bp);
3149         if (CNIC_LOADED(bp))
3150                 bnx2x_free_skbs_cnic(bp);
3151         for_each_rx_queue(bp, i)
3152                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3153
3154         bnx2x_free_fp_mem(bp);
3155         if (CNIC_LOADED(bp))
3156                 bnx2x_free_fp_mem_cnic(bp);
3157
3158         if (IS_PF(bp)) {
3159                 if (CNIC_LOADED(bp))
3160                         bnx2x_free_mem_cnic(bp);
3161         }
3162         bnx2x_free_mem(bp);
3163
3164         bp->state = BNX2X_STATE_CLOSED;
3165         bp->cnic_loaded = false;
3166
3167         /* Clear driver version indication in shmem */
3168         if (IS_PF(bp))
3169                 bnx2x_update_mng_version(bp);
3170
3171         /* Check if there are pending parity attentions. If there are - set
3172          * RECOVERY_IN_PROGRESS.
3173          */
3174         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3175                 bnx2x_set_reset_in_progress(bp);
3176
3177                 /* Set RESET_IS_GLOBAL if needed */
3178                 if (global)
3179                         bnx2x_set_reset_global(bp);
3180         }
3181
3182         /* The last driver must disable a "close the gate" if there is no
3183          * parity attention or "process kill" pending.
3184          */
3185         if (IS_PF(bp) &&
3186             !bnx2x_clear_pf_load(bp) &&
3187             bnx2x_reset_is_done(bp, BP_PATH(bp)))
3188                 bnx2x_disable_close_the_gate(bp);
3189
3190         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3191
3192         return 0;
3193 #endif
3194 }