akaros/kern/drivers/net/bnx2x/bnx2x_main.c
<<
>>
Prefs
   1/* bnx2x_main.c: Broadcom Everest network driver.
   2 *
   3 * Copyright (c) 2007-2013 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  10 * Written by: Eliezer Tamir
  11 * Based on code from Michael Chan's bnx2 driver
  12 * UDP CSUM errata workaround by Arik Gendelman
  13 * Slowpath and fastpath rework by Vladislav Zolotarov
  14 * Statistics and Link management by Yitchak Gertner
  15 *
  16 */
  17
  18#include <linux_compat.h>
  19
  20#include "bnx2x.h"
  21#include "bnx2x_init.h"
  22#include "bnx2x_init_ops.h"
  23#include "bnx2x_cmn.h"
  24#include "bnx2x_vfpf.h"
  25#include "bnx2x_dcb.h"
  26#include "bnx2x_sp.h"
  27#include "bnx2x_fw_file_hdr.h"
  28/* FW files */
  29#define FW_FILE_VERSION                                 \
  30        __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
  31        __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
  32        __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
  33        __stringify(BCM_5710_FW_ENGINEERING_VERSION)
  34#define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
  35#define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
  36#define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
  37
  38/* Time in jiffies before concluding the transmitter is hung */
  39#define TX_TIMEOUT              (5*HZ)
  40
  41static char version[] =
  42        "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
  43        DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  44
  45MODULE_AUTHOR("Eliezer Tamir");
  46MODULE_DESCRIPTION("Broadcom NetXtreme II "
  47                   "BCM57710/57711/57711E/"
  48                   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
  49                   "57840/57840_MF Driver");
  50MODULE_LICENSE("GPL");
  51MODULE_VERSION(DRV_MODULE_VERSION);
  52MODULE_FIRMWARE(FW_FILE_NAME_E1);
  53MODULE_FIRMWARE(FW_FILE_NAME_E1H);
  54MODULE_FIRMWARE(FW_FILE_NAME_E2);
  55
  56int bnx2x_num_queues = 16;      // AKAROS_PORT try for the max
  57module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
  58MODULE_PARM_DESC(num_queues,
  59                 " Set number of queues (default is as a number of CPUs)");
  60
  61static int disable_tpa;
  62module_param(disable_tpa, int, S_IRUGO);
  63MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
  64
  65static int int_mode;
  66module_param(int_mode, int, S_IRUGO);
  67MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
  68                                "(1 INT#x; 2 MSI)");
  69
  70static int dropless_fc;
  71module_param(dropless_fc, int, S_IRUGO);
  72MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
  73
  74static int mrrs = -1;
  75module_param(mrrs, int, S_IRUGO);
  76MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
  77
  78/* Set this for debugging during boot */
  79static int debug; // = 0xffffffff & ~BNX2X_MSG_DMAE;
  80module_param(debug, int, S_IRUGO);
  81MODULE_PARM_DESC(debug, " Default debug msglevel");
  82
  83static struct workqueue_struct *bnx2x_wq;
  84struct workqueue_struct *bnx2x_iov_wq;
  85
  86struct bnx2x_mac_vals {
  87        uint32_t xmac_addr;
  88        uint32_t xmac_val;
  89        uint32_t emac_addr;
  90        uint32_t emac_val;
  91        uint32_t umac_addr;
  92        uint32_t umac_val;
  93        uint32_t bmac_addr;
  94        uint32_t bmac_val[2];
  95};
  96
  97enum bnx2x_board_type {
  98        BCM57710 = 0,
  99        BCM57711,
 100        BCM57711E,
 101        BCM57712,
 102        BCM57712_MF,
 103        BCM57712_VF,
 104        BCM57800,
 105        BCM57800_MF,
 106        BCM57800_VF,
 107        BCM57810,
 108        BCM57810_MF,
 109        BCM57810_VF,
 110        BCM57840_4_10,
 111        BCM57840_2_20,
 112        BCM57840_MF,
 113        BCM57840_VF,
 114        BCM57811,
 115        BCM57811_MF,
 116        BCM57840_O,
 117        BCM57840_MFO,
 118        BCM57811_VF
 119};
 120
 121/* indexed by board_type, above */
 122static struct {
 123        char *name;
 124} board_info[] = {
 125        [BCM57710]      = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
 126        [BCM57711]      = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
 127        [BCM57711E]     = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
 128        [BCM57712]      = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
 129        [BCM57712_MF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
 130        [BCM57712_VF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
 131        [BCM57800]      = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
 132        [BCM57800_MF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
 133        [BCM57800_VF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
 134        [BCM57810]      = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
 135        [BCM57810_MF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
 136        [BCM57810_VF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
 137        [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
 138        [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
 139        [BCM57840_MF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
 140        [BCM57840_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
 141        [BCM57811]      = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
 142        [BCM57811_MF]   = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
 143        [BCM57840_O]    = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
 144        [BCM57840_MFO]  = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
 145        [BCM57811_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
 146};
 147
 148#ifndef PCI_DEVICE_ID_NX2_57710
 149#define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
 150#endif
 151#ifndef PCI_DEVICE_ID_NX2_57711
 152#define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
 153#endif
 154#ifndef PCI_DEVICE_ID_NX2_57711E
 155#define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
 156#endif
 157#ifndef PCI_DEVICE_ID_NX2_57712
 158#define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
 159#endif
 160#ifndef PCI_DEVICE_ID_NX2_57712_MF
 161#define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
 162#endif
 163#ifndef PCI_DEVICE_ID_NX2_57712_VF
 164#define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
 165#endif
 166#ifndef PCI_DEVICE_ID_NX2_57800
 167#define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
 168#endif
 169#ifndef PCI_DEVICE_ID_NX2_57800_MF
 170#define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
 171#endif
 172#ifndef PCI_DEVICE_ID_NX2_57800_VF
 173#define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
 174#endif
 175#ifndef PCI_DEVICE_ID_NX2_57810
 176#define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
 177#endif
 178#ifndef PCI_DEVICE_ID_NX2_57810_MF
 179#define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
 180#endif
 181#ifndef PCI_DEVICE_ID_NX2_57840_O
 182#define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
 183#endif
 184#ifndef PCI_DEVICE_ID_NX2_57810_VF
 185#define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
 186#endif
 187#ifndef PCI_DEVICE_ID_NX2_57840_4_10
 188#define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
 189#endif
 190#ifndef PCI_DEVICE_ID_NX2_57840_2_20
 191#define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
 192#endif
 193#ifndef PCI_DEVICE_ID_NX2_57840_MFO
 194#define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
 195#endif
 196#ifndef PCI_DEVICE_ID_NX2_57840_MF
 197#define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
 198#endif
 199#ifndef PCI_DEVICE_ID_NX2_57840_VF
 200#define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
 201#endif
 202#ifndef PCI_DEVICE_ID_NX2_57811
 203#define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
 204#endif
 205#ifndef PCI_DEVICE_ID_NX2_57811_MF
 206#define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
 207#endif
 208#ifndef PCI_DEVICE_ID_NX2_57811_VF
 209#define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
 210#endif
 211
 212static const struct pci_device_id bnx2x_pci_tbl[] = {
 213        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 214        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
 215        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
 216        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
 217        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
 218        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
 219        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
 220        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
 221        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
 222        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
 223        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
 224        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
 225        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
 226        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
 227        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
 228        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
 229        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 230        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
 231        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
 232        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
 233        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
 234        { 0 }
 235};
 236
 237const struct pci_device_id *srch_bnx2x_pci_tbl(struct pci_device *needle)
 238{
 239        const struct pci_device_id *tbl = bnx2x_pci_tbl;
 240        return srch_linux_pci_tbl(tbl, needle);
 241}
 242
 243MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
 244
 245/* Global resources for unloading a previously loaded device */
 246#define BNX2X_PREV_WAIT_NEEDED 1
 247static DEFINE_SEMAPHORE(bnx2x_prev_sem);
 248static LINUX_LIST_HEAD(bnx2x_prev_list);
 249
 250/* Forward declaration */
 251static struct cnic_eth_dev *bnx2x_cnic_probe(struct ether *dev);
 252static uint32_t bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
 253static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
 254
 255/****************************************************************************
 256* General service functions
 257****************************************************************************/
 258
 259static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
 260
 261static void __storm_memset_dma_mapping(struct bnx2x *bp,
 262                                       uint32_t addr, dma_addr_t mapping)
 263{
 264        REG_WR(bp,  addr, U64_LO(mapping));
 265        REG_WR(bp,  addr + 4, U64_HI(mapping));
 266}
 267
 268static void storm_memset_spq_addr(struct bnx2x *bp,
 269                                  dma_addr_t mapping, uint16_t abs_fid)
 270{
 271        uint32_t addr = XSEM_REG_FAST_MEMORY +
 272                        XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
 273
 274        __storm_memset_dma_mapping(bp, addr, mapping);
 275}
 276
 277static void storm_memset_vf_to_pf(struct bnx2x *bp, uint16_t abs_fid,
 278                                  uint16_t pf_id)
 279{
 280        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
 281                pf_id);
 282        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
 283                pf_id);
 284        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
 285                pf_id);
 286        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
 287                pf_id);
 288}
 289
 290static void storm_memset_func_en(struct bnx2x *bp, uint16_t abs_fid,
 291                                 uint8_t enable)
 292{
 293        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
 294                enable);
 295        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
 296                enable);
 297        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
 298                enable);
 299        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
 300                enable);
 301}
 302
 303static void storm_memset_eq_data(struct bnx2x *bp,
 304                                 struct event_ring_data *eq_data,
 305                                uint16_t pfid)
 306{
 307        size_t size = sizeof(struct event_ring_data);
 308
 309        uint32_t addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
 310
 311        __storm_memset_struct(bp, addr, size, (uint32_t *)eq_data);
 312}
 313
 314static void storm_memset_eq_prod(struct bnx2x *bp, uint16_t eq_prod,
 315                                 uint16_t pfid)
 316{
 317        uint32_t addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
 318        REG_WR16(bp, addr, eq_prod);
 319}
 320
 321/* used only at init
 322 * locking is done by mcp
 323 */
 324static void bnx2x_reg_wr_ind(struct bnx2x *bp, uint32_t addr, uint32_t val)
 325{
 326        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
 327        pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
 328        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 329                               PCICFG_VENDOR_ID_OFFSET);
 330}
 331
 332static uint32_t bnx2x_reg_rd_ind(struct bnx2x *bp, uint32_t addr)
 333{
 334        uint32_t val;
 335
 336        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
 337        pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
 338        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 339                               PCICFG_VENDOR_ID_OFFSET);
 340
 341        return val;
 342}
 343
 344#define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
 345#define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
 346#define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
 347#define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
 348#define DMAE_DP_DST_NONE        "dst_addr [none]"
 349
 350static void bnx2x_dp_dmae(struct bnx2x *bp,
 351                          struct dmae_command *dmae, int msglvl)
 352{
 353        uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC;
 354        int i;
 355
 356        switch (dmae->opcode & DMAE_COMMAND_DST) {
 357        case DMAE_CMD_DST_PCI:
 358                if (src_type == DMAE_CMD_SRC_PCI)
 359                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 360                           "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
 361                           "comp_addr [%x:%08x], comp_val 0x%08x\n",
 362                           dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 363                           dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 364                           dmae->comp_addr_hi, dmae->comp_addr_lo,
 365                           dmae->comp_val);
 366                else
 367                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 368                           "src [%08x], len [%d*4], dst [%x:%08x]\n"
 369                           "comp_addr [%x:%08x], comp_val 0x%08x\n",
 370                           dmae->opcode, dmae->src_addr_lo >> 2,
 371                           dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 372                           dmae->comp_addr_hi, dmae->comp_addr_lo,
 373                           dmae->comp_val);
 374                break;
 375        case DMAE_CMD_DST_GRC:
 376                if (src_type == DMAE_CMD_SRC_PCI)
 377                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 378                           "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
 379                           "comp_addr [%x:%08x], comp_val 0x%08x\n",
 380                           dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 381                           dmae->len, dmae->dst_addr_lo >> 2,
 382                           dmae->comp_addr_hi, dmae->comp_addr_lo,
 383                           dmae->comp_val);
 384                else
 385                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 386                           "src [%08x], len [%d*4], dst [%08x]\n"
 387                           "comp_addr [%x:%08x], comp_val 0x%08x\n",
 388                           dmae->opcode, dmae->src_addr_lo >> 2,
 389                           dmae->len, dmae->dst_addr_lo >> 2,
 390                           dmae->comp_addr_hi, dmae->comp_addr_lo,
 391                           dmae->comp_val);
 392                break;
 393        default:
 394                if (src_type == DMAE_CMD_SRC_PCI)
 395                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 396                           "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
 397                           "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 398                           dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 399                           dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 400                           dmae->comp_val);
 401                else
 402                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 403                           "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
 404                           "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 405                           dmae->opcode, dmae->src_addr_lo >> 2,
 406                           dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 407                           dmae->comp_val);
 408                break;
 409        }
 410
 411        for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
 412                DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
 413                   i, *(((uint32_t *)dmae) + i));
 414}
 415
 416/* copy command into DMAE command memory and set DMAE command go */
 417void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
 418{
 419        uint32_t cmd_offset;
 420        int i;
 421
 422        cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
 423        for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
 424                REG_WR(bp, cmd_offset + i*4, *(((uint32_t *)dmae) + i));
 425        }
 426        REG_WR(bp, dmae_reg_go_c[idx], 1);
 427}
 428
 429uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type)
 430{
 431        return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
 432                           DMAE_CMD_C_ENABLE);
 433}
 434
 435uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode)
 436{
 437        return opcode & ~DMAE_CMD_SRC_RESET;
 438}
 439
 440uint32_t bnx2x_dmae_opcode(struct bnx2x *bp, uint8_t src_type, uint8_t dst_type,
 441                             bool with_comp, uint8_t comp_type)
 442{
 443        uint32_t opcode = 0;
 444
 445        opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
 446                   (dst_type << DMAE_COMMAND_DST_SHIFT));
 447
 448        opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
 449
 450        opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
 451        opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
 452                   (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
 453        opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
 454
 455#ifdef __BIG_ENDIAN
 456        opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
 457#else
 458        opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
 459#endif
 460        if (with_comp)
 461                opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
 462        return opcode;
 463}
 464
 465void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
 466                                      struct dmae_command *dmae,
 467                                      uint8_t src_type, uint8_t dst_type)
 468{
 469        memset(dmae, 0, sizeof(struct dmae_command));
 470
 471        /* set the opcode */
 472        dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
 473                                         true, DMAE_COMP_PCI);
 474
 475        /* fill in the completion parameters */
 476        dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
 477        dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
 478        dmae->comp_val = DMAE_COMP_VAL;
 479}
 480
 481/* issue a dmae command over the init-channel and wait for completion */
 482int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
 483                               uint32_t *comp)
 484{
 485        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
 486        int rc = 0;
 487
 488        bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
 489
 490        /* Lock the dmae channel. Disable BHs to prevent a dead-lock
 491         * as long as this code is called both from syscall context and
 492         * from ndo_set_rx_mode() flow that may be called from BH.
 493         */
 494
 495        spin_lock(&bp->dmae_lock);
 496
 497        /* reset completion */
 498        *comp = 0;
 499
 500        /* post the command on the channel used for initializations */
 501        bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
 502
 503        /* wait for completion */
 504        udelay(5);
 505        while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
 506
 507                if (!cnt ||
 508                    (bp->recovery_state != BNX2X_RECOVERY_DONE &&
 509                     bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
 510                        BNX2X_ERR("DMAE timeout!\n");
 511                        rc = DMAE_TIMEOUT;
 512                        goto unlock;
 513                }
 514                cnt--;
 515                udelay(50);
 516        }
 517        if (*comp & DMAE_PCI_ERR_FLAG) {
 518                BNX2X_ERR("DMAE PCI error!\n");
 519                rc = DMAE_PCI_ERROR;
 520        }
 521
 522unlock:
 523
 524        spin_unlock(&bp->dmae_lock);
 525
 526        return rc;
 527}
 528
 529void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
 530                      uint32_t dst_addr,
 531                      uint32_t len32)
 532{
 533        int rc;
 534        struct dmae_command dmae;
 535
 536        if (!bp->dmae_ready) {
 537                uint32_t *data = bnx2x_sp(bp, wb_data[0]);
 538
 539                if (CHIP_IS_E1(bp))
 540                        bnx2x_init_ind_wr(bp, dst_addr, data, len32);
 541                else
 542                        bnx2x_init_str_wr(bp, dst_addr, data, len32);
 543                return;
 544        }
 545
 546        /* set opcode and fixed command fields */
 547        bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
 548
 549        /* fill in addresses and len */
 550        dmae.src_addr_lo = U64_LO(dma_addr);
 551        dmae.src_addr_hi = U64_HI(dma_addr);
 552        dmae.dst_addr_lo = dst_addr >> 2;
 553        dmae.dst_addr_hi = 0;
 554        dmae.len = len32;
 555
 556        /* issue the command and wait for completion */
 557        rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
 558        if (rc) {
 559                BNX2X_ERR("DMAE returned failure %d\n", rc);
 560#ifdef BNX2X_STOP_ON_ERROR
 561                bnx2x_panic();
 562#endif
 563        }
 564}
 565
 566void bnx2x_read_dmae(struct bnx2x *bp, uint32_t src_addr, uint32_t len32)
 567{
 568        int rc;
 569        struct dmae_command dmae;
 570
 571        if (!bp->dmae_ready) {
 572                uint32_t *data = bnx2x_sp(bp, wb_data[0]);
 573                int i;
 574
 575                if (CHIP_IS_E1(bp))
 576                        for (i = 0; i < len32; i++)
 577                                data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
 578                else
 579                        for (i = 0; i < len32; i++)
 580                                data[i] = REG_RD(bp, src_addr + i*4);
 581
 582                return;
 583        }
 584
 585        /* set opcode and fixed command fields */
 586        bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
 587
 588        /* fill in addresses and len */
 589        dmae.src_addr_lo = src_addr >> 2;
 590        dmae.src_addr_hi = 0;
 591        dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
 592        dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
 593        dmae.len = len32;
 594
 595        /* issue the command and wait for completion */
 596        rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
 597        if (rc) {
 598                BNX2X_ERR("DMAE returned failure %d\n", rc);
 599#ifdef BNX2X_STOP_ON_ERROR
 600                bnx2x_panic();
 601#endif
 602        }
 603}
 604
 605static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
 606                                      uint32_t addr, uint32_t len)
 607{
 608        int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
 609        int offset = 0;
 610
 611        while (len > dmae_wr_max) {
 612                bnx2x_write_dmae(bp, phys_addr + offset,
 613                                 addr + offset, dmae_wr_max);
 614                offset += dmae_wr_max * 4;
 615                len -= dmae_wr_max;
 616        }
 617
 618        bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 619}
 620
 621enum storms {
 622           XSTORM,
 623           TSTORM,
 624           CSTORM,
 625           USTORM,
 626           MAX_STORMS
 627};
 628
 629#define STORMS_NUM 4
 630#define REGS_IN_ENTRY 4
 631
 632static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
 633                                              enum storms storm,
 634                                              int entry)
 635{
 636        switch (storm) {
 637        case XSTORM:
 638                return XSTORM_ASSERT_LIST_OFFSET(entry);
 639        case TSTORM:
 640                return TSTORM_ASSERT_LIST_OFFSET(entry);
 641        case CSTORM:
 642                return CSTORM_ASSERT_LIST_OFFSET(entry);
 643        case USTORM:
 644                return USTORM_ASSERT_LIST_OFFSET(entry);
 645        case MAX_STORMS:
 646        default:
 647                BNX2X_ERR("unknown storm\n");
 648        }
 649        return -EINVAL;
 650}
 651
 652static int bnx2x_mc_assert(struct bnx2x *bp)
 653{
 654        char last_idx;
 655        int i, j, rc = 0;
 656        enum storms storm;
 657        uint32_t regs[REGS_IN_ENTRY];
 658        uint32_t bar_storm_intmem[STORMS_NUM] = {
 659                BAR_XSTRORM_INTMEM,
 660                BAR_TSTRORM_INTMEM,
 661                BAR_CSTRORM_INTMEM,
 662                BAR_USTRORM_INTMEM
 663        };
 664        uint32_t storm_assert_list_index[STORMS_NUM] = {
 665                XSTORM_ASSERT_LIST_INDEX_OFFSET,
 666                TSTORM_ASSERT_LIST_INDEX_OFFSET,
 667                CSTORM_ASSERT_LIST_INDEX_OFFSET,
 668                USTORM_ASSERT_LIST_INDEX_OFFSET
 669        };
 670        char *storms_string[STORMS_NUM] = {
 671                "XSTORM",
 672                "TSTORM",
 673                "CSTORM",
 674                "USTORM"
 675        };
 676
 677        for (storm = XSTORM; storm < MAX_STORMS; storm++) {
 678                last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
 679                                   storm_assert_list_index[storm]);
 680                if (last_idx)
 681                        BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
 682                                  storms_string[storm], last_idx);
 683
 684                /* print the asserts */
 685                for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 686                        /* read a single assert entry */
 687                        for (j = 0; j < REGS_IN_ENTRY; j++)
 688                                regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
 689                                          bnx2x_get_assert_list_entry(bp,
 690                                                                      storm,
 691                                                                      i) +
 692                                          sizeof(uint32_t) * j);
 693
 694                        /* log entry if it contains a valid assert */
 695                        if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 696                                BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
 697                                          storms_string[storm], i, regs[3],
 698                                          regs[2], regs[1], regs[0]);
 699                                rc++;
 700                        } else {
 701                                break;
 702                        }
 703                }
 704        }
 705
 706        BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
 707                  CHIP_IS_E1(bp) ? "everest1" :
 708                  CHIP_IS_E1H(bp) ? "everest1h" :
 709                  CHIP_IS_E2(bp) ? "everest2" : "everest3",
 710                  BCM_5710_FW_MAJOR_VERSION,
 711                  BCM_5710_FW_MINOR_VERSION,
 712                  BCM_5710_FW_REVISION_VERSION);
 713
 714        return rc;
 715}
 716
 717#define MCPR_TRACE_BUFFER_SIZE  (0x800)
 718#define SCRATCH_BUFFER_SIZE(bp) \
 719        (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
 720
 721void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 722{
 723        uint32_t addr, val;
 724        uint32_t mark, offset;
 725        __be32 data[9];
 726        int word;
 727        uint32_t trace_shmem_base;
 728        if (BP_NOMCP(bp)) {
 729                BNX2X_ERR("NO MCP - can not dump\n");
 730                return;
 731        }
 732        netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
 733                (bp->common.bc_ver & 0xff0000) >> 16,
 734                (bp->common.bc_ver & 0xff00) >> 8,
 735                (bp->common.bc_ver & 0xff));
 736
 737        val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
 738        if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
 739                BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
 740
 741        if (BP_PATH(bp) == 0)
 742                trace_shmem_base = bp->common.shmem_base;
 743        else
 744                trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
 745
 746        /* sanity */
 747        if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
 748            trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
 749                                SCRATCH_BUFFER_SIZE(bp)) {
 750                BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
 751                          trace_shmem_base);
 752                return;
 753        }
 754
 755        addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
 756
 757        /* validate TRCB signature */
 758        mark = REG_RD(bp, addr);
 759        if (mark != MFW_TRACE_SIGNATURE) {
 760                BNX2X_ERR("Trace buffer signature is missing.");
 761                return ;
 762        }
 763
 764        /* read cyclic buffer pointer */
 765        addr += 4;
 766        mark = REG_RD(bp, addr);
 767        mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
 768        if (mark >= trace_shmem_base || mark < addr + 4) {
 769                BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
 770                return;
 771        }
 772        printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
 773
 774        printk("%s", lvl);
 775
 776        /* dump buffer after the mark */
 777        for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
 778                for (word = 0; word < 8; word++)
 779                        data[word] = cpu_to_be32(REG_RD(bp,
 780                                                 offset + 4 * word));
 781                data[8] = 0x0;
 782                pr_cont("%s", (char *)data);
 783        }
 784
 785        /* dump buffer before the mark */
 786        for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
 787                for (word = 0; word < 8; word++)
 788                        data[word] = cpu_to_be32(REG_RD(bp,
 789                                                 offset + 4 * word));
 790                data[8] = 0x0;
 791                pr_cont("%s", (char *)data);
 792        }
 793        printk("%s" "end of fw dump\n", lvl);
 794}
 795
 796static void bnx2x_fw_dump(struct bnx2x *bp)
 797{
 798        bnx2x_fw_dump_lvl(bp, KERN_ERR);
 799}
 800
 801static void bnx2x_hc_int_disable(struct bnx2x *bp)
 802{
 803        int port = BP_PORT(bp);
 804        uint32_t addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 805        uint32_t val = REG_RD(bp, addr);
 806
 807        /* in E1 we must use only PCI configuration space to disable
 808         * MSI/MSIX capability
 809         * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
 810         */
 811        if (CHIP_IS_E1(bp)) {
 812                /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
 813                 * Use mask register to prevent from HC sending interrupts
 814                 * after we exit the function
 815                 */
 816                REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
 817
 818                val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 819                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
 820                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 821        } else
 822                val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 823                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 824                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
 825                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 826
 827        DP(NETIF_MSG_IFDOWN,
 828           "write %x to HC %d (addr 0x%x)\n",
 829           val, port, addr);
 830
 831        /* flush all outstanding writes */
 832        bus_wmb();
 833
 834        REG_WR(bp, addr, val);
 835        if (REG_RD(bp, addr) != val)
 836                BNX2X_ERR("BUG! Proper val not read from IGU!\n");
 837}
 838
 839static void bnx2x_igu_int_disable(struct bnx2x *bp)
 840{
 841        uint32_t val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 842
 843        val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
 844                 IGU_PF_CONF_INT_LINE_EN |
 845                 IGU_PF_CONF_ATTN_BIT_EN);
 846
 847        DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
 848
 849        /* flush all outstanding writes */
 850        bus_wmb();
 851
 852        REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 853        if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
 854                BNX2X_ERR("BUG! Proper val not read from IGU!\n");
 855}
 856
 857static void bnx2x_int_disable(struct bnx2x *bp)
 858{
 859        if (bp->common.int_block == INT_BLOCK_HC)
 860                bnx2x_hc_int_disable(bp);
 861        else
 862                bnx2x_igu_int_disable(bp);
 863}
 864
 865void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
 866{
 867        int i;
 868        uint16_t j;
 869        struct hc_sp_status_block_data sp_sb_data;
 870        int func = BP_FUNC(bp);
 871#ifdef BNX2X_STOP_ON_ERROR
 872        uint16_t start = 0, end = 0;
 873        uint8_t cos;
 874#endif
 875        if (IS_PF(bp) && disable_int)
 876                bnx2x_int_disable(bp);
 877
 878        bp->stats_state = STATS_STATE_DISABLED;
 879        bp->eth_stats.unrecoverable_error++;
 880        DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
 881
 882        BNX2X_ERR("begin crash dump -----------------\n");
 883
 884        /* Indices */
 885        /* Common */
 886        if (IS_PF(bp)) {
 887                struct host_sp_status_block *def_sb = bp->def_status_blk;
 888                int data_size, cstorm_offset;
 889
 890                BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
 891                          bp->def_idx, bp->def_att_idx, bp->attn_state,
 892                          bp->spq_prod_idx, bp->stats_counter);
 893                BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
 894                          def_sb->atten_status_block.attn_bits,
 895                          def_sb->atten_status_block.attn_bits_ack,
 896                          def_sb->atten_status_block.status_block_id,
 897                          def_sb->atten_status_block.attn_bits_index);
 898                BNX2X_ERR("     def (");
 899                for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
 900                        pr_cont("0x%x%s",
 901                                def_sb->sp_sb.index_values[i],
 902                                (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
 903
 904                data_size = sizeof(struct hc_sp_status_block_data) /
 905                            sizeof(uint32_t);
 906                cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
 907                for (i = 0; i < data_size; i++)
 908                        *((uint32_t *)&sp_sb_data + i) =
 909                                REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
 910                                           i * sizeof(uint32_t));
 911
 912                pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
 913                        sp_sb_data.igu_sb_id,
 914                        sp_sb_data.igu_seg_id,
 915                        sp_sb_data.p_func.pf_id,
 916                        sp_sb_data.p_func.vnic_id,
 917                        sp_sb_data.p_func.vf_id,
 918                        sp_sb_data.p_func.vf_valid,
 919                        sp_sb_data.state);
 920        }
 921
 922        for_each_eth_queue(bp, i) {
 923                struct bnx2x_fastpath *fp = &bp->fp[i];
 924                int loop;
 925                struct hc_status_block_data_e2 sb_data_e2;
 926                struct hc_status_block_data_e1x sb_data_e1x;
 927                struct hc_status_block_sm  *hc_sm_p =
 928                        CHIP_IS_E1x(bp) ?
 929                        sb_data_e1x.common.state_machine :
 930                        sb_data_e2.common.state_machine;
 931                struct hc_index_data *hc_index_p =
 932                        CHIP_IS_E1x(bp) ?
 933                        sb_data_e1x.index_data :
 934                        sb_data_e2.index_data;
 935                uint8_t data_size, cos;
 936                uint32_t *sb_data_p;
 937                struct bnx2x_fp_txdata txdata;
 938
 939                if (!bp->fp)
 940                        break;
 941
 942                if (!fp->rx_cons_sb)
 943                        continue;
 944
 945                /* Rx */
 946                BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
 947                          i, fp->rx_bd_prod, fp->rx_bd_cons,
 948                          fp->rx_comp_prod,
 949                          fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
 950                BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
 951                          fp->rx_sge_prod, fp->last_max_sge,
 952                          le16_to_cpu(fp->fp_hc_idx));
 953
 954                /* Tx */
 955                for_each_cos_in_tx_queue(fp, cos)
 956                {
 957                        if (!fp->txdata_ptr[cos])
 958                                break;
 959
 960                        txdata = *fp->txdata_ptr[cos];
 961
 962                        if (!txdata.tx_cons_sb)
 963                                continue;
 964
 965                        BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
 966                                  i, txdata.tx_pkt_prod,
 967                                  txdata.tx_pkt_cons, txdata.tx_bd_prod,
 968                                  txdata.tx_bd_cons,
 969                                  le16_to_cpu(*txdata.tx_cons_sb));
 970                }
 971
 972                loop = CHIP_IS_E1x(bp) ?
 973                        HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
 974
 975                /* host sb data */
 976
 977                if (IS_FCOE_FP(fp))
 978                        continue;
 979
 980                BNX2X_ERR("     run indexes (");
 981                for (j = 0; j < HC_SB_MAX_SM; j++)
 982                        pr_cont("0x%x%s",
 983                               fp->sb_running_index[j],
 984                               (j == HC_SB_MAX_SM - 1) ? ")" : " ");
 985
 986                BNX2X_ERR("     indexes (");
 987                for (j = 0; j < loop; j++)
 988                        pr_cont("0x%x%s",
 989                               fp->sb_index_values[j],
 990                               (j == loop - 1) ? ")" : " ");
 991
 992                /* VF cannot access FW refelection for status block */
 993                if (IS_VF(bp))
 994                        continue;
 995
 996                /* fw sb data */
 997                data_size = CHIP_IS_E1x(bp) ?
 998                        sizeof(struct hc_status_block_data_e1x) :
 999                        sizeof(struct hc_status_block_data_e2);
1000                data_size /= sizeof(uint32_t);
1001                sb_data_p = CHIP_IS_E1x(bp) ?
1002                        (uint32_t *)&sb_data_e1x :
1003                        (uint32_t *)&sb_data_e2;
1004                /* copy sb data in here */
1005                for (j = 0; j < data_size; j++)
1006                        *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1007                                CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1008                                j * sizeof(uint32_t));
1009
1010                if (!CHIP_IS_E1x(bp)) {
1011                        pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1012                                sb_data_e2.common.p_func.pf_id,
1013                                sb_data_e2.common.p_func.vf_id,
1014                                sb_data_e2.common.p_func.vf_valid,
1015                                sb_data_e2.common.p_func.vnic_id,
1016                                sb_data_e2.common.same_igu_sb_1b,
1017                                sb_data_e2.common.state);
1018                } else {
1019                        pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1020                                sb_data_e1x.common.p_func.pf_id,
1021                                sb_data_e1x.common.p_func.vf_id,
1022                                sb_data_e1x.common.p_func.vf_valid,
1023                                sb_data_e1x.common.p_func.vnic_id,
1024                                sb_data_e1x.common.same_igu_sb_1b,
1025                                sb_data_e1x.common.state);
1026                }
1027
1028                /* SB_SMs data */
1029                for (j = 0; j < HC_SB_MAX_SM; j++) {
1030                        pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1031                                j, hc_sm_p[j].__flags,
1032                                hc_sm_p[j].igu_sb_id,
1033                                hc_sm_p[j].igu_seg_id,
1034                                hc_sm_p[j].time_to_expire,
1035                                hc_sm_p[j].timer_value);
1036                }
1037
1038                /* Indices data */
1039                for (j = 0; j < loop; j++) {
1040                        pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1041                               hc_index_p[j].flags,
1042                               hc_index_p[j].timeout);
1043                }
1044        }
1045
1046#ifdef BNX2X_STOP_ON_ERROR
1047        if (IS_PF(bp)) {
1048                /* event queue */
1049                BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1050                for (i = 0; i < NUM_EQ_DESC; i++) {
1051                        uint32_t *data = (uint32_t *)&bp->eq_ring[i].message.data;
1052
1053                        BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1054                                  i, bp->eq_ring[i].message.opcode,
1055                                  bp->eq_ring[i].message.error);
1056                        BNX2X_ERR("data: %x %x %x\n",
1057                                  data[0], data[1], data[2]);
1058                }
1059        }
1060
1061        /* Rings */
1062        /* Rx */
1063        for_each_valid_rx_queue(bp, i) {
1064                struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066                if (!bp->fp)
1067                        break;
1068
1069                if (!fp->rx_cons_sb)
1070                        continue;
1071
1072                start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1073                end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1074                for (j = start; j != end; j = RX_BD(j + 1)) {
1075                        uint32_t *rx_bd = (uint32_t *)&fp->rx_desc_ring[j];
1076                        struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1077
1078                        BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1079                                  i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1080                }
1081
1082                start = RX_SGE(fp->rx_sge_prod);
1083                end = RX_SGE(fp->last_max_sge);
1084                for (j = start; j != end; j = RX_SGE(j + 1)) {
1085                        uint32_t *rx_sge = (uint32_t *)&fp->rx_sge_ring[j];
1086                        struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1087
1088                        BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1089                                  i, j, rx_sge[1], rx_sge[0], sw_page->page);
1090                }
1091
1092                start = RCQ_BD(fp->rx_comp_cons - 10);
1093                end = RCQ_BD(fp->rx_comp_cons + 503);
1094                for (j = start; j != end; j = RCQ_BD(j + 1)) {
1095                        uint32_t *cqe = (uint32_t *)&fp->rx_comp_ring[j];
1096
1097                        BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1098                                  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1099                }
1100        }
1101
1102        /* Tx */
1103        for_each_valid_tx_queue(bp, i) {
1104                struct bnx2x_fastpath *fp = &bp->fp[i];
1105
1106                if (!bp->fp)
1107                        break;
1108
1109                for_each_cos_in_tx_queue(fp, cos) {
1110                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1111
1112                        if (!fp->txdata_ptr[cos])
1113                                break;
1114
1115                        if (!txdata->tx_cons_sb)
1116                                continue;
1117
1118                        start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1119                        end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1120                        for (j = start; j != end; j = TX_BD(j + 1)) {
1121                                struct sw_tx_bd *sw_bd =
1122                                        &txdata->tx_buf_ring[j];
1123
1124                                BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1125                                          i, cos, j, sw_bd->skb,
1126                                          sw_bd->first_bd);
1127                        }
1128
1129                        start = TX_BD(txdata->tx_bd_cons - 10);
1130                        end = TX_BD(txdata->tx_bd_cons + 254);
1131                        for (j = start; j != end; j = TX_BD(j + 1)) {
1132                                uint32_t *tx_bd = (uint32_t *)&txdata->tx_desc_ring[j];
1133
1134                                BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1135                                          i, cos, j, tx_bd[0], tx_bd[1],
1136                                          tx_bd[2], tx_bd[3]);
1137                        }
1138                }
1139        }
1140#endif
1141        if (IS_PF(bp)) {
1142                bnx2x_fw_dump(bp);
1143                bnx2x_mc_assert(bp);
1144        }
1145        BNX2X_ERR("end crash dump -----------------\n");
1146}
1147
1148/*
1149 * FLR Support for E2
1150 *
1151 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1152 * initialization.
1153 */
1154#define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1155#define FLR_WAIT_INTERVAL       50      /* usec */
1156#define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1157
1158struct pbf_pN_buf_regs {
1159        int pN;
1160        uint32_t init_crd;
1161        uint32_t crd;
1162        uint32_t crd_freed;
1163};
1164
1165struct pbf_pN_cmd_regs {
1166        int pN;
1167        uint32_t lines_occup;
1168        uint32_t lines_freed;
1169};
1170
1171static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1172                                     struct pbf_pN_buf_regs *regs,
1173                                     uint32_t poll_count)
1174{
1175        uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
1176        uint32_t cur_cnt = poll_count;
1177
1178        crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1179        crd = crd_start = REG_RD(bp, regs->crd);
1180        init_crd = REG_RD(bp, regs->init_crd);
1181
1182        DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1183        DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1184        DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1185
1186        while ((crd != init_crd) && ((uint32_t)SUB_S32(crd_freed, crd_freed_start) <
1187               (init_crd - crd_start))) {
1188                if (cur_cnt--) {
1189                        udelay(FLR_WAIT_INTERVAL);
1190                        crd = REG_RD(bp, regs->crd);
1191                        crd_freed = REG_RD(bp, regs->crd_freed);
1192                } else {
1193                        DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1194                           regs->pN);
1195                        DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1196                           regs->pN, crd);
1197                        DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1198                           regs->pN, crd_freed);
1199                        break;
1200                }
1201        }
1202        DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1203           poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1204}
1205
1206static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1207                                     struct pbf_pN_cmd_regs *regs,
1208                                     uint32_t poll_count)
1209{
1210        uint32_t occup, to_free, freed, freed_start;
1211        uint32_t cur_cnt = poll_count;
1212
1213        occup = to_free = REG_RD(bp, regs->lines_occup);
1214        freed = freed_start = REG_RD(bp, regs->lines_freed);
1215
1216        DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1217        DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1218
1219        while (occup && ((uint32_t)SUB_S32(freed, freed_start) < to_free)) {
1220                if (cur_cnt--) {
1221                        udelay(FLR_WAIT_INTERVAL);
1222                        occup = REG_RD(bp, regs->lines_occup);
1223                        freed = REG_RD(bp, regs->lines_freed);
1224                } else {
1225                        DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1226                           regs->pN);
1227                        DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1228                           regs->pN, occup);
1229                        DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1230                           regs->pN, freed);
1231                        break;
1232                }
1233        }
1234        DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1235           poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1236}
1237
1238static uint32_t bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, uint32_t reg,
1239                                    uint32_t expected, uint32_t poll_count)
1240{
1241        uint32_t cur_cnt = poll_count;
1242        uint32_t val;
1243
1244        while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1245                udelay(FLR_WAIT_INTERVAL);
1246
1247        return val;
1248}
1249
1250int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, uint32_t reg,
1251                                    char *msg, uint32_t poll_cnt)
1252{
1253        uint32_t val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1254        if (val != 0) {
1255                BNX2X_ERR("%s usage count=%d\n", msg, val);
1256                return 1;
1257        }
1258        return 0;
1259}
1260
1261/* Common routines with VF FLR cleanup */
1262uint32_t bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1263{
1264        /* adjust polling timeout */
1265        if (CHIP_REV_IS_EMUL(bp))
1266                return FLR_POLL_CNT * 2000;
1267
1268        if (CHIP_REV_IS_FPGA(bp))
1269                return FLR_POLL_CNT * 120;
1270
1271        return FLR_POLL_CNT;
1272}
1273
1274void bnx2x_tx_hw_flushed(struct bnx2x *bp, uint32_t poll_count)
1275{
1276        struct pbf_pN_cmd_regs cmd_regs[] = {
1277                {0, (CHIP_IS_E3B0(bp)) ?
1278                        PBF_REG_TQ_OCCUPANCY_Q0 :
1279                        PBF_REG_P0_TQ_OCCUPANCY,
1280                    (CHIP_IS_E3B0(bp)) ?
1281                        PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1282                        PBF_REG_P0_TQ_LINES_FREED_CNT},
1283                {1, (CHIP_IS_E3B0(bp)) ?
1284                        PBF_REG_TQ_OCCUPANCY_Q1 :
1285                        PBF_REG_P1_TQ_OCCUPANCY,
1286                    (CHIP_IS_E3B0(bp)) ?
1287                        PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1288                        PBF_REG_P1_TQ_LINES_FREED_CNT},
1289                {4, (CHIP_IS_E3B0(bp)) ?
1290                        PBF_REG_TQ_OCCUPANCY_LB_Q :
1291                        PBF_REG_P4_TQ_OCCUPANCY,
1292                    (CHIP_IS_E3B0(bp)) ?
1293                        PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1294                        PBF_REG_P4_TQ_LINES_FREED_CNT}
1295        };
1296
1297        struct pbf_pN_buf_regs buf_regs[] = {
1298                {0, (CHIP_IS_E3B0(bp)) ?
1299                        PBF_REG_INIT_CRD_Q0 :
1300                        PBF_REG_P0_INIT_CRD ,
1301                    (CHIP_IS_E3B0(bp)) ?
1302                        PBF_REG_CREDIT_Q0 :
1303                        PBF_REG_P0_CREDIT,
1304                    (CHIP_IS_E3B0(bp)) ?
1305                        PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1306                        PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1307                {1, (CHIP_IS_E3B0(bp)) ?
1308                        PBF_REG_INIT_CRD_Q1 :
1309                        PBF_REG_P1_INIT_CRD,
1310                    (CHIP_IS_E3B0(bp)) ?
1311                        PBF_REG_CREDIT_Q1 :
1312                        PBF_REG_P1_CREDIT,
1313                    (CHIP_IS_E3B0(bp)) ?
1314                        PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1315                        PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1316                {4, (CHIP_IS_E3B0(bp)) ?
1317                        PBF_REG_INIT_CRD_LB_Q :
1318                        PBF_REG_P4_INIT_CRD,
1319                    (CHIP_IS_E3B0(bp)) ?
1320                        PBF_REG_CREDIT_LB_Q :
1321                        PBF_REG_P4_CREDIT,
1322                    (CHIP_IS_E3B0(bp)) ?
1323                        PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1324                        PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1325        };
1326
1327        int i;
1328
1329        /* Verify the command queues are flushed P0, P1, P4 */
1330        for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1331                bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1332
1333        /* Verify the transmission buffers are flushed P0, P1, P4 */
1334        for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1335                bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1336}
1337
1338#define OP_GEN_PARAM(param) \
1339        (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1340
1341#define OP_GEN_TYPE(type) \
1342        (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1343
1344#define OP_GEN_AGG_VECT(index) \
1345        (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1346
1347int bnx2x_send_final_clnup(struct bnx2x *bp, uint8_t clnup_func,
1348                           uint32_t poll_cnt)
1349{
1350        uint32_t op_gen_command = 0;
1351        uint32_t comp_addr = BAR_CSTRORM_INTMEM +
1352                        CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1353        int ret = 0;
1354
1355        if (REG_RD(bp, comp_addr)) {
1356                BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1357                return 1;
1358        }
1359
1360        op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1361        op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1362        op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1363        op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1364
1365        DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1366        REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1367
1368        if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1369                BNX2X_ERR("FW final cleanup did not succeed\n");
1370                DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1371                   (REG_RD(bp, comp_addr)));
1372                bnx2x_panic();
1373                return 1;
1374        }
1375        /* Zero completion for next FLR */
1376        REG_WR(bp, comp_addr, 0);
1377
1378        return ret;
1379}
1380
1381uint8_t bnx2x_is_pcie_pending(struct pci_device *dev)
1382{
1383        uint16_t status;
1384
1385        pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1386        return status & PCI_EXP_DEVSTA_TRPND;
1387}
1388
1389/* PF FLR specific routines
1390*/
1391static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, uint32_t poll_cnt)
1392{
1393        /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1394        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1395                        CFC_REG_NUM_LCIDS_INSIDE_PF,
1396                        "CFC PF usage counter timed out",
1397                        poll_cnt))
1398                return 1;
1399
1400        /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1401        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1402                        DORQ_REG_PF_USAGE_CNT,
1403                        "DQ PF usage counter timed out",
1404                        poll_cnt))
1405                return 1;
1406
1407        /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1408        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1409                        QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1410                        "QM PF usage counter timed out",
1411                        poll_cnt))
1412                return 1;
1413
1414        /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1415        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1416                        TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1417                        "Timers VNIC usage counter timed out",
1418                        poll_cnt))
1419                return 1;
1420        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1421                        TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1422                        "Timers NUM_SCANS usage counter timed out",
1423                        poll_cnt))
1424                return 1;
1425
1426        /* Wait DMAE PF usage counter to zero */
1427        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1428                        dmae_reg_go_c[INIT_DMAE_C(bp)],
1429                        "DMAE command register timed out",
1430                        poll_cnt))
1431                return 1;
1432
1433        return 0;
1434}
1435
1436static void bnx2x_hw_enable_status(struct bnx2x *bp)
1437{
1438        uint32_t val;
1439
1440        val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1441        DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1442
1443        val = REG_RD(bp, PBF_REG_DISABLE_PF);
1444        DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1445
1446        val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1447        DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1448
1449        val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1450        DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1451
1452        val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1453        DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1454
1455        val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1456        DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1457
1458        val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1459        DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1460
1461        val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1462        DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1463           val);
1464}
1465
1466static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1467{
1468        uint32_t poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1469
1470        DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1471
1472        /* Re-enable PF target read access */
1473        REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1474
1475        /* Poll HW usage counters */
1476        DP(BNX2X_MSG_SP, "Polling usage counters\n");
1477        if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1478                return -EBUSY;
1479
1480        /* Zero the igu 'trailing edge' and 'leading edge' */
1481
1482        /* Send the FW cleanup command */
1483        if (bnx2x_send_final_clnup(bp, (uint8_t)BP_FUNC(bp), poll_cnt))
1484                return -EBUSY;
1485
1486        /* ATC cleanup */
1487
1488        /* Verify TX hw is flushed */
1489        bnx2x_tx_hw_flushed(bp, poll_cnt);
1490
1491        /* Wait 100ms (not adjusted according to platform) */
1492        kthread_usleep(1000 * 100);
1493
1494        /* Verify no pending pci transactions */
1495        if (bnx2x_is_pcie_pending(bp->pdev))
1496                BNX2X_ERR("PCIE Transactions still pending\n");
1497
1498        /* Debug */
1499        bnx2x_hw_enable_status(bp);
1500
1501        /*
1502         * Master enable - Due to WB DMAE writes performed before this
1503         * register is re-initialized as part of the regular function init
1504         */
1505        REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1506
1507        return 0;
1508}
1509
1510static void bnx2x_hc_int_enable(struct bnx2x *bp)
1511{
1512        int port = BP_PORT(bp);
1513        uint32_t addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1514        uint32_t val = REG_RD(bp, addr);
1515        bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1516        bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1517        bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1518
1519        if (msix) {
1520                val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1521                         HC_CONFIG_0_REG_INT_LINE_EN_0);
1522                val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1523                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1524                if (single_msix)
1525                        val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1526        } else if (msi) {
1527                val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1528                val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1529                        HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1530                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1531        } else {
1532                val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1533                        HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1534                        HC_CONFIG_0_REG_INT_LINE_EN_0 |
1535                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1536
1537                if (!CHIP_IS_E1(bp)) {
1538                        DP(NETIF_MSG_IFUP,
1539                           "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1540
1541                        REG_WR(bp, addr, val);
1542
1543                        val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1544                }
1545        }
1546
1547        if (CHIP_IS_E1(bp))
1548                REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1549
1550        DP(NETIF_MSG_IFUP,
1551           "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1552           (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1553
1554        REG_WR(bp, addr, val);
1555        /*
1556         * Ensure that HC_CONFIG is written before leading/trailing edge config
1557         */
1558        bus_wmb();
1559        cmb();
1560
1561        if (!CHIP_IS_E1(bp)) {
1562                /* init leading/trailing edge */
1563                if (IS_MF(bp)) {
1564                        val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1565                        if (bp->port.pmf)
1566                                /* enable nig and gpio3 attention */
1567                                val |= 0x1100;
1568                } else
1569                        val = 0xffff;
1570
1571                REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1572                REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1573        }
1574
1575        /* Make sure that interrupts are indeed enabled from here on */
1576        bus_wmb();
1577}
1578
1579static void bnx2x_igu_int_enable(struct bnx2x *bp)
1580{
1581panic("Not implemented");
1582#if 0 // AKAROS_PORT
1583        uint32_t val;
1584        bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1585        bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1586        bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1587
1588        val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1589
1590        if (msix) {
1591                val &= ~(IGU_PF_CONF_INT_LINE_EN |
1592                         IGU_PF_CONF_SINGLE_ISR_EN);
1593                val |= (IGU_PF_CONF_MSI_MSIX_EN |
1594                        IGU_PF_CONF_ATTN_BIT_EN);
1595
1596                if (single_msix)
1597                        val |= IGU_PF_CONF_SINGLE_ISR_EN;
1598        } else if (msi) {
1599                val &= ~IGU_PF_CONF_INT_LINE_EN;
1600                val |= (IGU_PF_CONF_MSI_MSIX_EN |
1601                        IGU_PF_CONF_ATTN_BIT_EN |
1602                        IGU_PF_CONF_SINGLE_ISR_EN);
1603        } else {
1604                val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1605                val |= (IGU_PF_CONF_INT_LINE_EN |
1606                        IGU_PF_CONF_ATTN_BIT_EN |
1607                        IGU_PF_CONF_SINGLE_ISR_EN);
1608        }
1609
1610        /* Clean previous status - need to configure igu prior to ack*/
1611        if ((!msix) || single_msix) {
1612                REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1613                bnx2x_ack_int(bp);
1614        }
1615
1616        val |= IGU_PF_CONF_FUNC_EN;
1617
1618        DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1619           val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1620
1621        REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1622
1623        if (val & IGU_PF_CONF_INT_LINE_EN)
1624                pci_intx(bp->pdev, true);
1625
1626        cmb();
1627
1628        /* init leading/trailing edge */
1629        if (IS_MF(bp)) {
1630                val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1631                if (bp->port.pmf)
1632                        /* enable nig and gpio3 attention */
1633                        val |= 0x1100;
1634        } else
1635                val = 0xffff;
1636
1637        REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1638        REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1639
1640        /* Make sure that interrupts are indeed enabled from here on */
1641        bus_wmb();
1642#endif
1643}
1644
1645void bnx2x_int_enable(struct bnx2x *bp)
1646{
1647        if (bp->common.int_block == INT_BLOCK_HC)
1648                bnx2x_hc_int_enable(bp);
1649        else
1650                bnx2x_igu_int_enable(bp);
1651}
1652
1653void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1654{
1655        int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1656        int i, offset;
1657
1658        if (disable_hw)
1659                /* prevent the HW from sending interrupts */
1660                bnx2x_int_disable(bp);
1661
1662        /* make sure all ISRs are done */
1663        if (msix) {
1664                synchronize_irq(bp->msix_table[0].vector);
1665                offset = 1;
1666                if (CNIC_SUPPORT(bp))
1667                        offset++;
1668                for_each_eth_queue(bp, i)
1669                        synchronize_irq(bp->msix_table[offset++].vector);
1670        } else
1671                synchronize_irq(bp->pdev->irqline);
1672
1673        /* make sure sp_task is not running */
1674        cancel_delayed_work(&bp->sp_task);
1675        cancel_delayed_work(&bp->period_task);
1676        flush_workqueue(bnx2x_wq);
1677}
1678
1679/* fast path */
1680
1681/*
1682 * General service functions
1683 */
1684
1685/* Return true if succeeded to acquire the lock */
1686static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, uint32_t resource)
1687{
1688        uint32_t lock_status;
1689        uint32_t resource_bit = (1 << resource);
1690        int func = BP_FUNC(bp);
1691        uint32_t hw_lock_control_reg;
1692
1693        DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1694           "Trying to take a lock on resource %d\n", resource);
1695
1696        /* Validating that the resource is within range */
1697        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1698                DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1699                   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1700                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
1701                return false;
1702        }
1703
1704        if (func <= 5)
1705                hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1706        else
1707                hw_lock_control_reg =
1708                                (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1709
1710        /* Try to acquire the lock */
1711        REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1712        lock_status = REG_RD(bp, hw_lock_control_reg);
1713        if (lock_status & resource_bit)
1714                return true;
1715
1716        DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1717           "Failed to get a lock on resource %d\n", resource);
1718        return false;
1719}
1720
1721/**
1722 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1723 *
1724 * @bp: driver handle
1725 *
1726 * Returns the recovery leader resource id according to the engine this function
1727 * belongs to. Currently only only 2 engines is supported.
1728 */
1729static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1730{
1731        if (BP_PATH(bp))
1732                return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1733        else
1734                return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1735}
1736
1737/**
1738 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1739 *
1740 * @bp: driver handle
1741 *
1742 * Tries to acquire a leader lock for current engine.
1743 */
1744static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1745{
1746        return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1747}
1748
1749static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, uint8_t err);
1750
1751/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1752static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1753{
1754        /* Set the interrupt occurred bit for the sp-task to recognize it
1755         * must ack the interrupt and transition according to the IGU
1756         * state machine.
1757         */
1758        atomic_set(&bp->interrupt_occurred, 1);
1759
1760        /* The sp_task must execute only after this bit
1761         * is set, otherwise we will get out of sync and miss all
1762         * further interrupts. Hence, the barrier.
1763         */
1764        wmb();
1765
1766        /* schedule sp_task to workqueue */
1767        return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1768}
1769
1770void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1771{
1772        struct bnx2x *bp = fp->bp;
1773        int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1774        int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1775        enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1776        struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1777
1778        DP(BNX2X_MSG_SP,
1779           "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1780           fp->index, cid, command, bp->state,
1781           rr_cqe->ramrod_cqe.ramrod_type);
1782
1783        /* If cid is within VF range, replace the slowpath object with the
1784         * one corresponding to this VF
1785         */
1786        if (cid >= BNX2X_FIRST_VF_CID  &&
1787            cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1788                bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1789
1790        switch (command) {
1791        case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1792                DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1793                drv_cmd = BNX2X_Q_CMD_UPDATE;
1794                break;
1795
1796        case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1797                DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1798                drv_cmd = BNX2X_Q_CMD_SETUP;
1799                break;
1800
1801        case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1802                DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1803                drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1804                break;
1805
1806        case (RAMROD_CMD_ID_ETH_HALT):
1807                DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1808                drv_cmd = BNX2X_Q_CMD_HALT;
1809                break;
1810
1811        case (RAMROD_CMD_ID_ETH_TERMINATE):
1812                DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1813                drv_cmd = BNX2X_Q_CMD_TERMINATE;
1814                break;
1815
1816        case (RAMROD_CMD_ID_ETH_EMPTY):
1817                DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1818                drv_cmd = BNX2X_Q_CMD_EMPTY;
1819                break;
1820
1821        case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1822                DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1823                drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1824                break;
1825
1826        default:
1827                BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1828                          command, fp->index);
1829                return;
1830        }
1831
1832        if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1833            q_obj->complete_cmd(bp, q_obj, drv_cmd))
1834                /* q_obj->complete_cmd() failure means that this was
1835                 * an unexpected completion.
1836                 *
1837                 * In this case we don't want to increase the bp->spq_left
1838                 * because apparently we haven't sent this command the first
1839                 * place.
1840                 */
1841#ifdef BNX2X_STOP_ON_ERROR
1842                bnx2x_panic();
1843#else
1844                return;
1845#endif
1846
1847        cmb();
1848        atomic_inc(&bp->cq_spq_left);
1849        /* push the change in bp->spq_left and towards the memory */
1850        cmb();
1851
1852        DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1853
1854        if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1855            (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1856                /* if Q update ramrod is completed for last Q in AFEX vif set
1857                 * flow, then ACK MCP at the end
1858                 *
1859                 * mark pending ACK to MCP bit.
1860                 * prevent case that both bits are cleared.
1861                 * At the end of load/unload driver checks that
1862                 * sp_state is cleared, and this order prevents
1863                 * races
1864                 */
1865                cmb();
1866                set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1867                wmb();
1868                clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1869                cmb();
1870
1871                /* schedule the sp task as mcp ack is required */
1872                bnx2x_schedule_sp_task(bp);
1873        }
1874
1875        return;
1876}
1877
1878void bnx2x_interrupt(struct hw_trapframe *hw_tf, void *dev_instance)
1879{
1880panic("Not implemented");
1881#if 0 // AKAROS_PORT
1882        struct bnx2x *bp = netdev_priv(dev_instance);
1883        uint16_t status = bnx2x_ack_int(bp);
1884        uint16_t mask;
1885        int i;
1886        uint8_t cos;
1887
1888        /* Return here if interrupt is shared and it's not for us */
1889        if (unlikely(status == 0)) {
1890                DP(NETIF_MSG_INTR, "not our interrupt!\n");
1891                return;
1892        }
1893        DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1894
1895#ifdef BNX2X_STOP_ON_ERROR
1896        if (unlikely(bp->panic))
1897                return;
1898#endif
1899
1900        for_each_eth_queue(bp, i) {
1901                struct bnx2x_fastpath *fp = &bp->fp[i];
1902
1903                mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1904                if (status & mask) {
1905                        /* Handle Rx or Tx according to SB id */
1906                        for_each_cos_in_tx_queue(fp, cos)
1907                                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1908                        prefetch(&fp->sb_running_index[SM_RX_ID]);
1909                        napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1910                        status &= ~mask;
1911                }
1912        }
1913
1914        if (CNIC_SUPPORT(bp)) {
1915                mask = 0x2;
1916                if (status & (mask | 0x1)) {
1917                        struct cnic_ops *c_ops = NULL;
1918
1919                        rcu_read_lock();
1920                        c_ops = rcu_dereference(bp->cnic_ops);
1921                        if (c_ops && (bp->cnic_eth_dev.drv_state &
1922                                      CNIC_DRV_STATE_HANDLES_IRQ))
1923                                c_ops->cnic_handler(bp->cnic_data, NULL);
1924                        rcu_read_unlock();
1925
1926                        status &= ~mask;
1927                }
1928        }
1929
1930        if (unlikely(status & 0x1)) {
1931
1932                /* schedule sp task to perform default status block work, ack
1933                 * attentions and enable interrupts.
1934                 */
1935                bnx2x_schedule_sp_task(bp);
1936
1937                status &= ~0x1;
1938                if (!status)
1939                        return;
1940        }
1941
1942        if (unlikely(status))
1943                DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1944                   status);
1945
1946        return;
1947#endif
1948}
1949
1950/* Link */
1951
1952/*
1953 * General service functions
1954 */
1955
1956int bnx2x_acquire_hw_lock(struct bnx2x *bp, uint32_t resource)
1957{
1958        uint32_t lock_status;
1959        uint32_t resource_bit = (1 << resource);
1960        int func = BP_FUNC(bp);
1961        uint32_t hw_lock_control_reg;
1962        int cnt;
1963
1964        /* Validating that the resource is within range */
1965        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1966                BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1967                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
1968                return -EINVAL;
1969        }
1970
1971        if (func <= 5) {
1972                hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1973        } else {
1974                hw_lock_control_reg =
1975                                (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1976        }
1977
1978        /* Validating that the resource is not already taken */
1979        lock_status = REG_RD(bp, hw_lock_control_reg);
1980        if (lock_status & resource_bit) {
1981                BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
1982                   lock_status, resource_bit);
1983                return -EEXIST;
1984        }
1985
1986        /* Try for 5 second every 5ms */
1987        for (cnt = 0; cnt < 1000; cnt++) {
1988                /* Try to acquire the lock */
1989                REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1990                lock_status = REG_RD(bp, hw_lock_control_reg);
1991                if (lock_status & resource_bit)
1992                        return 0;
1993
1994                kthread_usleep(5000);
1995        }
1996        BNX2X_ERR("Timeout\n");
1997        return -EAGAIN;
1998}
1999
2000int bnx2x_release_leader_lock(struct bnx2x *bp)
2001{
2002        return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2003}
2004
2005int bnx2x_release_hw_lock(struct bnx2x *bp, uint32_t resource)
2006{
2007        uint32_t lock_status;
2008        uint32_t resource_bit = (1 << resource);
2009        int func = BP_FUNC(bp);
2010        uint32_t hw_lock_control_reg;
2011
2012        /* Validating that the resource is within range */
2013        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2014                BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2015                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
2016                return -EINVAL;
2017        }
2018
2019        if (func <= 5) {
2020                hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2021        } else {
2022                hw_lock_control_reg =
2023                                (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2024        }
2025
2026        /* Validating that the resource is currently taken */
2027        lock_status = REG_RD(bp, hw_lock_control_reg);
2028        if (!(lock_status & resource_bit)) {
2029                BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2030                          lock_status, resource_bit);
2031                return -EFAULT;
2032        }
2033
2034        REG_WR(bp, hw_lock_control_reg, resource_bit);
2035        return 0;
2036}
2037
2038int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, uint8_t port)
2039{
2040        /* The GPIO should be swapped if swap register is set and active */
2041        int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2042                         REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2043        int gpio_shift = gpio_num +
2044                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2045        uint32_t gpio_mask = (1 << gpio_shift);
2046        uint32_t gpio_reg;
2047        int value;
2048
2049        if (gpio_num > MISC_REGISTERS_GPIO_3) {
2050                BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2051                return -EINVAL;
2052        }
2053
2054        /* read GPIO value */
2055        gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2056
2057        /* get the requested pin value */
2058        if ((gpio_reg & gpio_mask) == gpio_mask)
2059                value = 1;
2060        else
2061                value = 0;
2062
2063        return value;
2064}
2065
2066int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, uint32_t mode,
2067                   uint8_t port)
2068{
2069        /* The GPIO should be swapped if swap register is set and active */
2070        int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2071                         REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2072        int gpio_shift = gpio_num +
2073                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2074        uint32_t gpio_mask = (1 << gpio_shift);
2075        uint32_t gpio_reg;
2076
2077        if (gpio_num > MISC_REGISTERS_GPIO_3) {
2078                BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2079                return -EINVAL;
2080        }
2081
2082        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083        /* read GPIO and mask except the float bits */
2084        gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2085
2086        switch (mode) {
2087        case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2088                DP(NETIF_MSG_LINK,
2089                   "Set GPIO %d (shift %d) -> output low\n",
2090                   gpio_num, gpio_shift);
2091                /* clear FLOAT and set CLR */
2092                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2093                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2094                break;
2095
2096        case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2097                DP(NETIF_MSG_LINK,
2098                   "Set GPIO %d (shift %d) -> output high\n",
2099                   gpio_num, gpio_shift);
2100                /* clear FLOAT and set SET */
2101                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2102                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2103                break;
2104
2105        case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2106                DP(NETIF_MSG_LINK,
2107                   "Set GPIO %d (shift %d) -> input\n",
2108                   gpio_num, gpio_shift);
2109                /* set FLOAT */
2110                gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2111                break;
2112
2113        default:
2114                break;
2115        }
2116
2117        REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2118        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2119
2120        return 0;
2121}
2122
2123int bnx2x_set_mult_gpio(struct bnx2x *bp, uint8_t pins, uint32_t mode)
2124{
2125        uint32_t gpio_reg = 0;
2126        int rc = 0;
2127
2128        /* Any port swapping should be handled by caller. */
2129
2130        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2131        /* read GPIO and mask except the float bits */
2132        gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2133        gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2134        gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2135        gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2136
2137        switch (mode) {
2138        case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2139                DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2140                /* set CLR */
2141                gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2142                break;
2143
2144        case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2145                DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2146                /* set SET */
2147                gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2148                break;
2149
2150        case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2151                DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2152                /* set FLOAT */
2153                gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2154                break;
2155
2156        default:
2157                BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2158                rc = -EINVAL;
2159                break;
2160        }
2161
2162        if (rc == 0)
2163                REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2164
2165        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2166
2167        return rc;
2168}
2169
2170int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, uint32_t mode,
2171                       uint8_t port)
2172{
2173        /* The GPIO should be swapped if swap register is set and active */
2174        int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2175                         REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2176        int gpio_shift = gpio_num +
2177                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2178        uint32_t gpio_mask = (1 << gpio_shift);
2179        uint32_t gpio_reg;
2180
2181        if (gpio_num > MISC_REGISTERS_GPIO_3) {
2182                BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2183                return -EINVAL;
2184        }
2185
2186        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2187        /* read GPIO int */
2188        gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2189
2190        switch (mode) {
2191        case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2192                DP(NETIF_MSG_LINK,
2193                   "Clear GPIO INT %d (shift %d) -> output low\n",
2194                   gpio_num, gpio_shift);
2195                /* clear SET and set CLR */
2196                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2197                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2198                break;
2199
2200        case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2201                DP(NETIF_MSG_LINK,
2202                   "Set GPIO INT %d (shift %d) -> output high\n",
2203                   gpio_num, gpio_shift);
2204                /* clear CLR and set SET */
2205                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2206                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2207                break;
2208
2209        default:
2210                break;
2211        }
2212
2213        REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2214        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2215
2216        return 0;
2217}
2218
2219static int bnx2x_set_spio(struct bnx2x *bp, int spio, uint32_t mode)
2220{
2221        uint32_t spio_reg;
2222
2223        /* Only 2 SPIOs are configurable */
2224        if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2225                BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2226                return -EINVAL;
2227        }
2228
2229        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2230        /* read SPIO and mask except the float bits */
2231        spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2232
2233        switch (mode) {
2234        case MISC_SPIO_OUTPUT_LOW:
2235                DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2236                /* clear FLOAT and set CLR */
2237                spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2238                spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2239                break;
2240
2241        case MISC_SPIO_OUTPUT_HIGH:
2242                DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2243                /* clear FLOAT and set SET */
2244                spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2245                spio_reg |=  (spio << MISC_SPIO_SET_POS);
2246                break;
2247
2248        case MISC_SPIO_INPUT_HI_Z:
2249                DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2250                /* set FLOAT */
2251                spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2252                break;
2253
2254        default:
2255                break;
2256        }
2257
2258        REG_WR(bp, MISC_REG_SPIO, spio_reg);
2259        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2260
2261        return 0;
2262}
2263
2264void bnx2x_calc_fc_adv(struct bnx2x *bp)
2265{
2266        uint8_t cfg_idx = bnx2x_get_link_cfg_idx(bp);
2267        switch (bp->link_vars.ieee_fc &
2268                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2269        case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2270                bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2271                                                   ADVERTISED_Pause);
2272                break;
2273
2274        case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2275                bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2276                                                  ADVERTISED_Pause);
2277                break;
2278
2279        case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2280                bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2281                break;
2282
2283        default:
2284                bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2285                                                   ADVERTISED_Pause);
2286                break;
2287        }
2288}
2289
2290static void bnx2x_set_requested_fc(struct bnx2x *bp)
2291{
2292        /* Initialize link parameters structure variables
2293         * It is recommended to turn off RX FC for jumbo frames
2294         *  for better performance
2295         */
2296        if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2297                bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2298        else
2299                bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2300}
2301
2302static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2303{
2304        uint32_t pause_enabled = 0;
2305
2306        if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2307                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2308                        pause_enabled = 1;
2309
2310                REG_WR(bp, BAR_USTRORM_INTMEM +
2311                           USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2312                       pause_enabled);
2313        }
2314
2315        DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2316           pause_enabled ? "enabled" : "disabled");
2317}
2318
2319int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2320{
2321        int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2322        uint16_t req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2323
2324        if (!BP_NOMCP(bp)) {
2325                bnx2x_set_requested_fc(bp);
2326                bnx2x_acquire_phy_lock(bp);
2327
2328                if (load_mode == LOAD_DIAG) {
2329                        struct link_params *lp = &bp->link_params;
2330                        lp->loopback_mode = LOOPBACK_XGXS;
2331                        /* do PHY loopback at 10G speed, if possible */
2332                        if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2333                                if (lp->speed_cap_mask[cfx_idx] &
2334                                    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2335                                        lp->req_line_speed[cfx_idx] =
2336                                        SPEED_10000;
2337                                else
2338                                        lp->req_line_speed[cfx_idx] =
2339                                        SPEED_1000;
2340                        }
2341                }
2342
2343                if (load_mode == LOAD_LOOPBACK_EXT) {
2344                        struct link_params *lp = &bp->link_params;
2345                        lp->loopback_mode = LOOPBACK_EXT;
2346                }
2347
2348                rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2349
2350                bnx2x_release_phy_lock(bp);
2351
2352                bnx2x_init_dropless_fc(bp);
2353
2354                bnx2x_calc_fc_adv(bp);
2355
2356                if (bp->link_vars.link_up) {
2357                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2358                        bnx2x_link_report(bp);
2359                }
2360                queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2361                bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2362                return rc;
2363        }
2364        BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2365        return -EINVAL;
2366}
2367
2368void bnx2x_link_set(struct bnx2x *bp)
2369{
2370        if (!BP_NOMCP(bp)) {
2371                bnx2x_acquire_phy_lock(bp);
2372                bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2373                bnx2x_release_phy_lock(bp);
2374
2375                bnx2x_init_dropless_fc(bp);
2376
2377                bnx2x_calc_fc_adv(bp);
2378        } else
2379                BNX2X_ERR("Bootcode is missing - can not set link\n");
2380}
2381
2382static void bnx2x__link_reset(struct bnx2x *bp)
2383{
2384        if (!BP_NOMCP(bp)) {
2385                bnx2x_acquire_phy_lock(bp);
2386                bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2387                bnx2x_release_phy_lock(bp);
2388        } else
2389                BNX2X_ERR("Bootcode is missing - can not reset link\n");
2390}
2391
2392void bnx2x_force_link_reset(struct bnx2x *bp)
2393{
2394        bnx2x_acquire_phy_lock(bp);
2395        bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2396        bnx2x_release_phy_lock(bp);
2397}
2398
2399uint8_t bnx2x_link_test(struct bnx2x *bp, uint8_t is_serdes)
2400{
2401        uint8_t rc = 0;
2402
2403        if (!BP_NOMCP(bp)) {
2404                bnx2x_acquire_phy_lock(bp);
2405                rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2406                                     is_serdes);
2407                bnx2x_release_phy_lock(bp);
2408        } else
2409                BNX2X_ERR("Bootcode is missing - can not test link\n");
2410
2411        return rc;
2412}
2413
2414/* Calculates the sum of vn_min_rates.
2415   It's needed for further normalizing of the min_rates.
2416   Returns:
2417     sum of vn_min_rates.
2418       or
2419     0 - if all the min_rates are 0.
2420     In the later case fairness algorithm should be deactivated.
2421     If not all min_rates are zero then those that are zeroes will be set to 1.
2422 */
2423static void bnx2x_calc_vn_min(struct bnx2x *bp,
2424                                      struct cmng_init_input *input)
2425{
2426        int all_zero = 1;
2427        int vn;
2428
2429        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2430                uint32_t vn_cfg = bp->mf_config[vn];
2431                uint32_t vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2432                                   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2433
2434                /* Skip hidden vns */
2435                if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2436                        vn_min_rate = 0;
2437                /* If min rate is zero - set it to 1 */
2438                else if (!vn_min_rate)
2439                        vn_min_rate = DEF_MIN_RATE;
2440                else
2441                        all_zero = 0;
2442
2443                input->vnic_min_rate[vn] = vn_min_rate;
2444        }
2445
2446        /* if ETS or all min rates are zeros - disable fairness */
2447        if (BNX2X_IS_ETS_ENABLED(bp)) {
2448                input->flags.cmng_enables &=
2449                                        ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2450                DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2451        } else if (all_zero) {
2452                input->flags.cmng_enables &=
2453                                        ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2454                DP(NETIF_MSG_IFUP,
2455                   "All MIN values are zeroes fairness will be disabled\n");
2456        } else
2457                input->flags.cmng_enables |=
2458                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2459}
2460
2461static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2462                                    struct cmng_init_input *input)
2463{
2464        uint16_t vn_max_rate;
2465        uint32_t vn_cfg = bp->mf_config[vn];
2466
2467        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2468                vn_max_rate = 0;
2469        else {
2470                uint32_t maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2471
2472                if (IS_MF_SI(bp)) {
2473                        /* maxCfg in percents of linkspeed */
2474                        vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2475                } else /* SD modes */
2476                        /* maxCfg is absolute in 100Mb units */
2477                        vn_max_rate = maxCfg * 100;
2478        }
2479
2480        DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2481
2482        input->vnic_max_rate[vn] = vn_max_rate;
2483}
2484
2485static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2486{
2487        if (CHIP_REV_IS_SLOW(bp))
2488                return CMNG_FNS_NONE;
2489        if (IS_MF(bp))
2490                return CMNG_FNS_MINMAX;
2491
2492        return CMNG_FNS_NONE;
2493}
2494
2495void bnx2x_read_mf_cfg(struct bnx2x *bp)
2496{
2497        int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2498
2499        if (BP_NOMCP(bp))
2500                return; /* what should be the default value in this case */
2501
2502        /* For 2 port configuration the absolute function number formula
2503         * is:
2504         *      abs_func = 2 * vn + BP_PORT + BP_PATH
2505         *
2506         *      and there are 4 functions per port
2507         *
2508         * For 4 port configuration it is
2509         *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2510         *
2511         *      and there are 2 functions per port
2512         */
2513        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2514                int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2515
2516                if (func >= E1H_FUNC_MAX)
2517                        break;
2518
2519                bp->mf_config[vn] =
2520                        MF_CFG_RD(bp, func_mf_config[func].config);
2521        }
2522        if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2523                DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2524                bp->flags |= MF_FUNC_DIS;
2525        } else {
2526                DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2527                bp->flags &= ~MF_FUNC_DIS;
2528        }
2529}
2530
2531static void bnx2x_cmng_fns_init(struct bnx2x *bp, uint8_t read_cfg,
2532                                uint8_t cmng_type)
2533{
2534        struct cmng_init_input input;
2535        memset(&input, 0, sizeof(struct cmng_init_input));
2536
2537        input.port_rate = bp->link_vars.line_speed;
2538
2539        if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2540                int vn;
2541
2542                /* read mf conf from shmem */
2543                if (read_cfg)
2544                        bnx2x_read_mf_cfg(bp);
2545
2546                /* vn_weight_sum and enable fairness if not 0 */
2547                bnx2x_calc_vn_min(bp, &input);
2548
2549                /* calculate and set min-max rate for each vn */
2550                if (bp->port.pmf)
2551                        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2552                                bnx2x_calc_vn_max(bp, vn, &input);
2553
2554                /* always enable rate shaping and fairness */
2555                input.flags.cmng_enables |=
2556                                        CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2557
2558                bnx2x_init_cmng(&input, &bp->cmng);
2559                return;
2560        }
2561
2562        /* rate shaping and fairness are disabled */
2563        DP(NETIF_MSG_IFUP,
2564           "rate shaping and fairness are disabled\n");
2565}
2566
2567static void storm_memset_cmng(struct bnx2x *bp,
2568                              struct cmng_init *cmng,
2569                              uint8_t port)
2570{
2571        int vn;
2572        size_t size = sizeof(struct cmng_struct_per_port);
2573
2574        uint32_t addr = BAR_XSTRORM_INTMEM +
2575                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2576
2577        __storm_memset_struct(bp, addr, size, (uint32_t *)&cmng->port);
2578
2579        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2580                int func = func_by_vn(bp, vn);
2581
2582                addr = BAR_XSTRORM_INTMEM +
2583                       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2584                size = sizeof(struct rate_shaping_vars_per_vn);
2585                __storm_memset_struct(bp, addr, size,
2586                                      (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
2587
2588                addr = BAR_XSTRORM_INTMEM +
2589                       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2590                size = sizeof(struct fairness_vars_per_vn);
2591                __storm_memset_struct(bp, addr, size,
2592                                      (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
2593        }
2594}
2595
2596/* init cmng mode in HW according to local configuration */
2597void bnx2x_set_local_cmng(struct bnx2x *bp)
2598{
2599        int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2600
2601        if (cmng_fns != CMNG_FNS_NONE) {
2602                bnx2x_cmng_fns_init(bp, false, cmng_fns);
2603                storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2604        } else {
2605                /* rate shaping and fairness are disabled */
2606                DP(NETIF_MSG_IFUP,
2607                   "single function mode without fairness\n");
2608        }
2609}
2610
2611/* This function is called upon link interrupt */
2612static void bnx2x_link_attn(struct bnx2x *bp)
2613{
2614        /* Make sure that we are synced with the current statistics */
2615        bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2616
2617        bnx2x_link_update(&bp->link_params, &bp->link_vars);
2618
2619        bnx2x_init_dropless_fc(bp);
2620
2621        if (bp->link_vars.link_up) {
2622
2623                if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2624                        struct host_port_stats *pstats;
2625
2626                        pstats = bnx2x_sp(bp, port_stats);
2627                        /* reset old mac stats */
2628                        memset(&(pstats->mac_stx[0]), 0,
2629                               sizeof(struct mac_stx));
2630                }
2631                if (bp->state == BNX2X_STATE_OPEN)
2632                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2633        }
2634
2635        if (bp->link_vars.link_up && bp->link_vars.line_speed)
2636                bnx2x_set_local_cmng(bp);
2637
2638        __bnx2x_link_report(bp);
2639
2640        if (IS_MF(bp))
2641                bnx2x_link_sync_notify(bp);
2642}
2643
2644void bnx2x__link_status_update(struct bnx2x *bp)
2645{
2646panic("Not implemented");
2647#if 0 // AKAROS_PORT
2648        if (bp->state != BNX2X_STATE_OPEN)
2649                return;
2650
2651        /* read updated dcb configuration */
2652        if (IS_PF(bp)) {
2653                bnx2x_dcbx_pmf_update(bp);
2654                bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2655                if (bp->link_vars.link_up)
2656                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2657                else
2658                        bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2659                        /* indicate link status */
2660                bnx2x_link_report(bp);
2661
2662        } else { /* VF */
2663                bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2664                                          SUPPORTED_10baseT_Full |
2665                                          SUPPORTED_100baseT_Half |
2666                                          SUPPORTED_100baseT_Full |
2667                                          SUPPORTED_1000baseT_Full |
2668                                          SUPPORTED_2500baseX_Full |
2669                                          SUPPORTED_10000baseT_Full |
2670                                          SUPPORTED_TP |
2671                                          SUPPORTED_FIBRE |
2672                                          SUPPORTED_Autoneg |
2673                                          SUPPORTED_Pause |
2674                                          SUPPORTED_Asym_Pause);
2675                bp->port.advertising[0] = bp->port.supported[0];
2676
2677                bp->link_params.bp = bp;
2678                bp->link_params.port = BP_PORT(bp);
2679                bp->link_params.req_duplex[0] = DUPLEX_FULL;
2680                bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2681                bp->link_params.req_line_speed[0] = SPEED_10000;
2682                bp->link_params.speed_cap_mask[0] = 0x7f0000;
2683                bp->link_params.switch_cfg = SWITCH_CFG_10G;
2684                bp->link_vars.mac_type = MAC_TYPE_BMAC;
2685                bp->link_vars.line_speed = SPEED_10000;
2686                bp->link_vars.link_status =
2687                        (LINK_STATUS_LINK_UP |
2688                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2689                bp->link_vars.link_up = 1;
2690                bp->link_vars.duplex = DUPLEX_FULL;
2691                bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2692                __bnx2x_link_report(bp);
2693
2694                bnx2x_sample_bulletin(bp);
2695
2696                /* if bulletin board did not have an update for link status
2697                 * __bnx2x_link_report will report current status
2698                 * but it will NOT duplicate report in case of already reported
2699                 * during sampling bulletin board.
2700                 */
2701                bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2702        }
2703#endif
2704}
2705
2706static int bnx2x_afex_func_update(struct bnx2x *bp, uint16_t vifid,
2707                                  uint16_t vlan_val, uint8_t allowed_prio)
2708{
2709        struct bnx2x_func_state_params func_params = {NULL};
2710        struct bnx2x_func_afex_update_params *f_update_params =
2711                &func_params.params.afex_update;
2712
2713        func_params.f_obj = &bp->func_obj;
2714        func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2715
2716        /* no need to wait for RAMROD completion, so don't
2717         * set RAMROD_COMP_WAIT flag
2718         */
2719
2720        f_update_params->vif_id = vifid;
2721        f_update_params->afex_default_vlan = vlan_val;
2722        f_update_params->allowed_priorities = allowed_prio;
2723
2724        /* if ramrod can not be sent, response to MCP immediately */
2725        if (bnx2x_func_state_change(bp, &func_params) < 0)
2726                bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2727
2728        return 0;
2729}
2730
2731static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, uint8_t cmd_type,
2732                                          uint16_t vif_index,
2733                                          uint8_t func_bit_map)
2734{
2735        struct bnx2x_func_state_params func_params = {NULL};
2736        struct bnx2x_func_afex_viflists_params *update_params =
2737                &func_params.params.afex_viflists;
2738        int rc;
2739        uint32_t drv_msg_code;
2740
2741        /* validate only LIST_SET and LIST_GET are received from switch */
2742        if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2743                BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2744                          cmd_type);
2745
2746        func_params.f_obj = &bp->func_obj;
2747        func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2748
2749        /* set parameters according to cmd_type */
2750        update_params->afex_vif_list_command = cmd_type;
2751        update_params->vif_list_index = vif_index;
2752        update_params->func_bit_map =
2753                (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2754        update_params->func_to_clear = 0;
2755        drv_msg_code =
2756                (cmd_type == VIF_LIST_RULE_GET) ?
2757                DRV_MSG_CODE_AFEX_LISTGET_ACK :
2758                DRV_MSG_CODE_AFEX_LISTSET_ACK;
2759
2760        /* if ramrod can not be sent, respond to MCP immediately for
2761         * SET and GET requests (other are not triggered from MCP)
2762         */
2763        rc = bnx2x_func_state_change(bp, &func_params);
2764        if (rc < 0)
2765                bnx2x_fw_command(bp, drv_msg_code, 0);
2766
2767        return 0;
2768}
2769
2770static void bnx2x_handle_afex_cmd(struct bnx2x *bp, uint32_t cmd)
2771{
2772panic("Not implemented");
2773#if 0 // AKAROS_PORT
2774        struct afex_stats afex_stats;
2775        uint32_t func = BP_ABS_FUNC(bp);
2776        uint32_t mf_config;
2777        uint16_t vlan_val;
2778        uint32_t vlan_prio;
2779        uint16_t vif_id;
2780        uint8_t allowed_prio;
2781        uint8_t vlan_mode;
2782        uint32_t addr_to_write, vifid, addrs, stats_type, i;
2783
2784        if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2785                vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2786                DP(BNX2X_MSG_MCP,
2787                   "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2788                bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2789        }
2790
2791        if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2792                vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2793                addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2794                DP(BNX2X_MSG_MCP,
2795                   "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2796                   vifid, addrs);
2797                bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2798                                               addrs);
2799        }
2800
2801        if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2802                addr_to_write = SHMEM2_RD(bp,
2803                        afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2804                stats_type = SHMEM2_RD(bp,
2805                        afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2806
2807                DP(BNX2X_MSG_MCP,
2808                   "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2809                   addr_to_write);
2810
2811                bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2812
2813                /* write response to scratchpad, for MCP */
2814                for (i = 0; i < (sizeof(struct afex_stats)/sizeof(uint32_t)); i++)
2815                        REG_WR(bp, addr_to_write + i*sizeof(uint32_t),
2816                               *(((uint32_t *)(&afex_stats))+i));
2817
2818                /* send ack message to MCP */
2819                bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2820        }
2821
2822        if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2823                mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2824                bp->mf_config[BP_VN(bp)] = mf_config;
2825                DP(BNX2X_MSG_MCP,
2826                   "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2827                   mf_config);
2828
2829                /* if VIF_SET is "enabled" */
2830                if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2831                        /* set rate limit directly to internal RAM */
2832                        struct cmng_init_input cmng_input;
2833                        struct rate_shaping_vars_per_vn m_rs_vn;
2834                        size_t size = sizeof(struct rate_shaping_vars_per_vn);
2835                        uint32_t addr = BAR_XSTRORM_INTMEM +
2836                            XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2837
2838                        bp->mf_config[BP_VN(bp)] = mf_config;
2839
2840                        bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2841                        m_rs_vn.vn_counter.rate =
2842                                cmng_input.vnic_max_rate[BP_VN(bp)];
2843                        m_rs_vn.vn_counter.quota =
2844                                (m_rs_vn.vn_counter.rate *
2845                                 RS_PERIODIC_TIMEOUT_USEC) / 8;
2846
2847                        __storm_memset_struct(bp, addr, size,
2848                                              (uint32_t *)&m_rs_vn);
2849
2850                        /* read relevant values from mf_cfg struct in shmem */
2851                        vif_id =
2852                                (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2853                                 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2854                                FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2855                        vlan_val =
2856                                (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2857                                 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2858                                FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2859                        vlan_prio = (mf_config &
2860                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2861                                    FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2862                        vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2863                        vlan_mode =
2864                                (MF_CFG_RD(bp,
2865                                           func_mf_config[func].afex_config) &
2866                                 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2867                                FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2868                        allowed_prio =
2869                                (MF_CFG_RD(bp,
2870                                           func_mf_config[func].afex_config) &
2871                                 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2872                                FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2873
2874                        /* send ramrod to FW, return in case of failure */
2875                        if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2876                                                   allowed_prio))
2877                                return;
2878
2879                        bp->afex_def_vlan_tag = vlan_val;
2880                        bp->afex_vlan_mode = vlan_mode;
2881                } else {
2882                        /* notify link down because BP->flags is disabled */
2883                        bnx2x_link_report(bp);
2884
2885                        /* send INVALID VIF ramrod to FW */
2886                        bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2887
2888                        /* Reset the default afex VLAN */
2889                        bp->afex_def_vlan_tag = -1;
2890                }
2891        }
2892#endif
2893}
2894
2895static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2896{
2897        struct bnx2x_func_switch_update_params *switch_update_params;
2898        struct bnx2x_func_state_params func_params;
2899
2900        memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2901        switch_update_params = &func_params.params.switch_update;
2902        func_params.f_obj = &bp->func_obj;
2903        func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2904
2905        if (IS_MF_UFP(bp)) {
2906                int func = BP_ABS_FUNC(bp);
2907                uint32_t val;
2908
2909                /* Re-learn the S-tag from shmem */
2910                val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2911                                FUNC_MF_CFG_E1HOV_TAG_MASK;
2912                if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2913                        bp->mf_ov = val;
2914                } else {
2915                        BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2916                        goto fail;
2917                }
2918
2919                /* Configure new S-tag in LLH */
2920                REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2921                       bp->mf_ov);
2922
2923                /* Send Ramrod to update FW of change */
2924                __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2925                          &switch_update_params->changes);
2926                switch_update_params->vlan = bp->mf_ov;
2927
2928                if (bnx2x_func_state_change(bp, &func_params) < 0) {
2929                        BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2930                                  bp->mf_ov);
2931                        goto fail;
2932                }
2933
2934                DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
2935
2936                bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2937
2938                return;
2939        }
2940
2941        /* not supported by SW yet */
2942fail:
2943        bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2944}
2945
2946static void bnx2x_pmf_update(struct bnx2x *bp)
2947{
2948        int port = BP_PORT(bp);
2949        uint32_t val;
2950
2951        bp->port.pmf = 1;
2952        DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2953
2954        /*
2955         * We need the mb() to ensure the ordering between the writing to
2956         * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2957         */
2958        mb();
2959
2960        /* queue a periodic task */
2961        queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2962
2963        bnx2x_dcbx_pmf_update(bp);
2964
2965        /* enable nig attention */
2966        val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2967        if (bp->common.int_block == INT_BLOCK_HC) {
2968                REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2969                REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2970        } else if (!CHIP_IS_E1x(bp)) {
2971                REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2972                REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2973        }
2974
2975        bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2976}
2977
2978/* end of Link */
2979
2980/* slow path */
2981
2982/*
2983 * General service functions
2984 */
2985
2986/* send the MCP a request, block until there is a reply */
2987uint32_t bnx2x_fw_command(struct bnx2x *bp, uint32_t command, uint32_t param)
2988{
2989        int mb_idx = BP_FW_MB_IDX(bp);
2990        uint32_t seq;
2991        uint32_t rc = 0;
2992        uint32_t cnt = 1;
2993        uint8_t delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2994
2995        qlock(&bp->fw_mb_mutex);
2996        seq = ++bp->fw_seq;
2997        SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2998        SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2999
3000        DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3001                        (command | seq), param);
3002
3003        do {
3004                /* let the FW do it's magic ... */
3005                kthread_usleep(1000 * delay);
3006
3007                rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3008
3009                /* Give the FW up to 5 second (500*10ms) */
3010        } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3011
3012        DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3013           cnt*delay, rc, seq);
3014
3015        /* is this a reply to our command? */
3016        if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3017                rc &= FW_MSG_CODE_MASK;
3018        else {
3019                /* FW BUG! */
3020                BNX2X_ERR("FW failed to respond!\n");
3021                bnx2x_fw_dump(bp);
3022                rc = 0;
3023        }
3024        qunlock(&bp->fw_mb_mutex);
3025
3026        return rc;
3027}
3028
3029static void storm_memset_func_cfg(struct bnx2x *bp,
3030                                 struct tstorm_eth_function_common_config *tcfg,
3031                                 uint16_t abs_fid)
3032{
3033        size_t size = sizeof(struct tstorm_eth_function_common_config);
3034
3035        uint32_t addr = BAR_TSTRORM_INTMEM +
3036                        TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3037
3038        __storm_memset_struct(bp, addr, size, (uint32_t *)tcfg);
3039}
3040
3041void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3042{
3043        if (CHIP_IS_E1x(bp)) {
3044                struct tstorm_eth_function_common_config tcfg = {0};
3045
3046                storm_memset_func_cfg(bp, &tcfg, p->func_id);
3047        }
3048
3049        /* Enable the function in the FW */
3050        storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3051        storm_memset_func_en(bp, p->func_id, 1);
3052
3053        /* spq */
3054        if (p->func_flgs & FUNC_FLG_SPQ) {
3055                storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3056                REG_WR(bp, XSEM_REG_FAST_MEMORY +
3057                       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3058        }
3059}
3060
3061/**
3062 * bnx2x_get_common_flags - Return common flags
3063 *
3064 * @bp          device handle
3065 * @fp          queue handle
3066 * @zero_stats  TRUE if statistics zeroing is needed
3067 *
3068 * Return the flags that are common for the Tx-only and not normal connections.
3069 */
3070static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3071                                            struct bnx2x_fastpath *fp,
3072                                            bool zero_stats)
3073{
3074        unsigned long flags = 0;
3075
3076        /* PF driver will always initialize the Queue to an ACTIVE state */
3077        __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3078
3079        /* tx only connections collect statistics (on the same index as the
3080         * parent connection). The statistics are zeroed when the parent
3081         * connection is initialized.
3082         */
3083
3084        __set_bit(BNX2X_Q_FLG_STATS, &flags);
3085        if (zero_stats)
3086                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3087
3088        if (bp->flags & TX_SWITCHING)
3089                __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3090
3091        __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3092        __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3093
3094#ifdef BNX2X_STOP_ON_ERROR
3095        __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3096#endif
3097
3098        return flags;
3099}
3100
3101static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3102                                       struct bnx2x_fastpath *fp,
3103                                       bool leading)
3104{
3105        unsigned long flags = 0;
3106
3107        /* calculate other queue flags */
3108        if (IS_MF_SD(bp))
3109                __set_bit(BNX2X_Q_FLG_OV, &flags);
3110
3111        if (IS_FCOE_FP(fp)) {
3112                __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3113                /* For FCoE - force usage of default priority (for afex) */
3114                __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3115        }
3116
3117        if (!fp->disable_tpa) {
3118                __set_bit(BNX2X_Q_FLG_TPA, &flags);
3119                __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3120                if (fp->mode == TPA_MODE_GRO)
3121                        __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3122        }
3123
3124        if (leading) {
3125                __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3126                __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3127        }
3128
3129        /* Always set HW VLAN stripping */
3130        __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3131
3132        /* configure silent vlan removal */
3133        if (IS_MF_AFEX(bp))
3134                __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3135
3136        return flags | bnx2x_get_common_flags(bp, fp, true);
3137}
3138
3139static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3140        struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3141        uint8_t cos)
3142{
3143        gen_init->stat_id = bnx2x_stats_id(fp);
3144        gen_init->spcl_id = fp->cl_id;
3145
3146        /* Always use mini-jumbo MTU for FCoE L2 ring */
3147        if (IS_FCOE_FP(fp))
3148                gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3149        else
3150                gen_init->mtu = bp->dev->mtu;
3151
3152        gen_init->cos = cos;
3153
3154        gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3155}
3156
3157static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3158        struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3159        struct bnx2x_rxq_setup_params *rxq_init)
3160{
3161        uint8_t max_sge = 0;
3162        uint16_t sge_sz = 0;
3163        uint16_t tpa_agg_size = 0;
3164
3165        if (!fp->disable_tpa) {
3166                pause->sge_th_lo = SGE_TH_LO(bp);
3167                pause->sge_th_hi = SGE_TH_HI(bp);
3168
3169                /* validate SGE ring has enough to cross high threshold */
3170                warn_on(bp->dropless_fc &&
3171                                pause->sge_th_hi + FW_PREFETCH_CNT >
3172                                MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3173
3174                /* TODO XME this is based on MAX_SKB_FRAGS */
3175                tpa_agg_size = TPA_AGG_SIZE;
3176                max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3177                        SGE_PAGE_SHIFT;
3178                max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3179                          (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3180                sge_sz = (uint16_t)MIN_T(uint32_t, SGE_PAGES, 0xffff);
3181        }
3182
3183        /* pause - not for e1 */
3184        if (!CHIP_IS_E1(bp)) {
3185                pause->bd_th_lo = BD_TH_LO(bp);
3186                pause->bd_th_hi = BD_TH_HI(bp);
3187
3188                pause->rcq_th_lo = RCQ_TH_LO(bp);
3189                pause->rcq_th_hi = RCQ_TH_HI(bp);
3190                /*
3191                 * validate that rings have enough entries to cross
3192                 * high thresholds
3193                 */
3194                warn_on(bp->dropless_fc &&
3195                                pause->bd_th_hi + FW_PREFETCH_CNT >
3196                                bp->rx_ring_size);
3197                warn_on(bp->dropless_fc &&
3198                                pause->rcq_th_hi + FW_PREFETCH_CNT >
3199                                NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3200
3201                pause->pri_map = 1;
3202        }
3203
3204        /* rxq setup */
3205        rxq_init->dscr_map = fp->rx_desc_mapping;
3206        rxq_init->sge_map = fp->rx_sge_mapping;
3207        rxq_init->rcq_map = fp->rx_comp_mapping;
3208        rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3209
3210        /* This should be a maximum number of data bytes that may be
3211         * placed on the BD (not including paddings).
3212         */
3213        rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3214                           BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3215
3216        rxq_init->cl_qzone_id = fp->cl_qzone_id;
3217        rxq_init->tpa_agg_sz = tpa_agg_size;
3218        rxq_init->sge_buf_sz = sge_sz;
3219        rxq_init->max_sges_pkt = max_sge;
3220        rxq_init->rss_engine_id = BP_FUNC(bp);
3221        rxq_init->mcast_engine_id = BP_FUNC(bp);
3222
3223        /* Maximum number or simultaneous TPA aggregation for this Queue.
3224         *
3225         * For PF Clients it should be the maximum available number.
3226         * VF driver(s) may want to define it to a smaller value.
3227         */
3228        rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3229
3230        rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3231        rxq_init->fw_sb_id = fp->fw_sb_id;
3232
3233        if (IS_FCOE_FP(fp))
3234                rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3235        else
3236                rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3237        /* configure silent vlan removal
3238         * if multi function mode is afex, then mask default vlan
3239         */
3240        if (IS_MF_AFEX(bp)) {
3241                rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3242                rxq_init->silent_removal_mask = VLAN_VID_MASK;
3243        }
3244}
3245
3246static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3247        struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3248        uint8_t cos)
3249{
3250        txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3251        txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3252        txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3253        txq_init->fw_sb_id = fp->fw_sb_id;
3254
3255        /*
3256         * set the tss leading client id for TX classification ==
3257         * leading RSS client id
3258         */
3259        txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3260
3261        if (IS_FCOE_FP(fp)) {
3262                txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3263                txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3264        }
3265}
3266
3267static void bnx2x_pf_init(struct bnx2x *bp)
3268{
3269        struct bnx2x_func_init_params func_init = {0};
3270        struct event_ring_data eq_data = { {0} };
3271        uint16_t flags;
3272
3273        if (!CHIP_IS_E1x(bp)) {
3274                /* reset IGU PF statistics: MSIX + ATTN */
3275                /* PF */
3276                REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3277                           BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3278                           (CHIP_MODE_IS_4_PORT(bp) ?
3279                                BP_FUNC(bp) : BP_VN(bp))*4, 0);
3280                /* ATTN */
3281                REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3282                           BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3283                           BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3284                           (CHIP_MODE_IS_4_PORT(bp) ?
3285                                BP_FUNC(bp) : BP_VN(bp))*4, 0);
3286        }
3287
3288        /* function setup flags */
3289        flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3290
3291        /* This flag is relevant for E1x only.
3292         * E2 doesn't have a TPA configuration in a function level.
3293         */
3294        flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
3295
3296        func_init.func_flgs = flags;
3297        func_init.pf_id = BP_FUNC(bp);
3298        func_init.func_id = BP_FUNC(bp);
3299        func_init.spq_map = bp->spq_mapping;
3300        func_init.spq_prod = bp->spq_prod_idx;
3301
3302        bnx2x_func_init(bp, &func_init);
3303
3304        memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3305
3306        /*
3307         * Congestion management values depend on the link rate
3308         * There is no active link so initial link rate is set to 10 Gbps.
3309         * When the link comes up The congestion management values are
3310         * re-calculated according to the actual link rate.
3311         */
3312        bp->link_vars.line_speed = SPEED_10000;
3313        bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3314
3315        /* Only the PMF sets the HW */
3316        if (bp->port.pmf)
3317                storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3318
3319        /* init Event Queue - PCI bus guarantees correct endianity*/
3320        eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3321        eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3322        eq_data.producer = bp->eq_prod;
3323        eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3324        eq_data.sb_id = DEF_SB_ID;
3325        storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3326}
3327
3328static void bnx2x_e1h_disable(struct bnx2x *bp)
3329{
3330        int port = BP_PORT(bp);
3331
3332        bnx2x_tx_disable(bp);
3333
3334        REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3335}
3336
3337static void bnx2x_e1h_enable(struct bnx2x *bp)
3338{
3339panic("Not implemented");
3340#if 0 // AKAROS_PORT
3341        int port = BP_PORT(bp);
3342
3343        if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3344                REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3345
3346        /* Tx queue should be only re-enabled */
3347        netif_tx_wake_all_queues(bp->dev);
3348
3349        /*
3350         * Should not call netif_carrier_on since it will be called if the link
3351         * is up when checking for link state
3352         */
3353#endif
3354}
3355
3356#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3357
3358static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3359{
3360panic("Not implemented");
3361#if 0 // AKAROS_PORT
3362        struct eth_stats_info *ether_stat =
3363                &bp->slowpath->drv_info_to_mcp.ether_stat;
3364        struct bnx2x_vlan_mac_obj *mac_obj =
3365                &bp->sp_objs->mac_obj;
3366        int i;
3367
3368        strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3369                ETH_STAT_INFO_VERSION_LEN);
3370
3371        /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3372         * mac_local field in ether_stat struct. The base address is offset by 2
3373         * bytes to account for the field being 8 bytes but a mac address is
3374         * only 6 bytes. Likewise, the stride for the get_n_elements function is
3375         * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3376         * allocated by the ether_stat struct, so the macs will land in their
3377         * proper positions.
3378         */
3379        for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3380                memset(ether_stat->mac_local + i, 0,
3381                       sizeof(ether_stat->mac_local[0]));
3382        mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3383                                DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3384                                ether_stat->mac_local + MAC_PAD, MAC_PAD,
3385                                Eaddrlen);
3386        ether_stat->mtu_size = bp->dev->mtu;
3387        if (bp->dev->feat & NETIF_F_RXCSUM)
3388                ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3389        if (bp->dev->feat & NETIF_F_TSO)
3390                ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3391        ether_stat->feature_flags |= bp->common.boot_mode;
3392
3393#if 0 // AKAROS_PORT
3394        ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3395#endif
3396
3397        ether_stat->txq_size = bp->tx_ring_size;
3398        ether_stat->rxq_size = bp->rx_ring_size;
3399
3400#ifdef CONFIG_BNX2X_SRIOV
3401        ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3402#endif
3403#endif
3404}
3405
3406static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3407{
3408        struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3409        struct fcoe_stats_info *fcoe_stat =
3410                &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3411
3412        if (!CNIC_LOADED(bp))
3413                return;
3414
3415        memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, Eaddrlen);
3416
3417        fcoe_stat->qos_priority =
3418                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3419
3420        /* insert FCoE stats from ramrod response */
3421        if (!NO_FCOE(bp)) {
3422                struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3423                        &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3424                        tstorm_queue_statistics;
3425
3426                struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3427                        &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3428                        xstorm_queue_statistics;
3429
3430                struct fcoe_statistics_params *fw_fcoe_stat =
3431                        &bp->fw_stats_data->fcoe;
3432
3433                ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3434                          fcoe_stat->rx_bytes_lo,
3435                          fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3436
3437                ADD_64_LE(fcoe_stat->rx_bytes_hi,
3438                          fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3439                          fcoe_stat->rx_bytes_lo,
3440                          fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3441
3442                ADD_64_LE(fcoe_stat->rx_bytes_hi,
3443                          fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3444                          fcoe_stat->rx_bytes_lo,
3445                          fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3446
3447                ADD_64_LE(fcoe_stat->rx_bytes_hi,
3448                          fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3449                          fcoe_stat->rx_bytes_lo,
3450                          fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3451
3452                ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3453                          fcoe_stat->rx_frames_lo,
3454                          fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3455
3456                ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3457                          fcoe_stat->rx_frames_lo,
3458                          fcoe_q_tstorm_stats->rcv_ucast_pkts);
3459
3460                ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3461                          fcoe_stat->rx_frames_lo,
3462                          fcoe_q_tstorm_stats->rcv_bcast_pkts);
3463
3464                ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3465                          fcoe_stat->rx_frames_lo,
3466                          fcoe_q_tstorm_stats->rcv_mcast_pkts);
3467
3468                ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3469                          fcoe_stat->tx_bytes_lo,
3470                          fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3471
3472                ADD_64_LE(fcoe_stat->tx_bytes_hi,
3473                          fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3474                          fcoe_stat->tx_bytes_lo,
3475                          fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3476
3477                ADD_64_LE(fcoe_stat->tx_bytes_hi,
3478                          fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3479                          fcoe_stat->tx_bytes_lo,
3480                          fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3481
3482                ADD_64_LE(fcoe_stat->tx_bytes_hi,
3483                          fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3484                          fcoe_stat->tx_bytes_lo,
3485                          fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3486
3487                ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3488                          fcoe_stat->tx_frames_lo,
3489                          fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3490
3491                ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3492                          fcoe_stat->tx_frames_lo,
3493                          fcoe_q_xstorm_stats->ucast_pkts_sent);
3494
3495                ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3496                          fcoe_stat->tx_frames_lo,
3497                          fcoe_q_xstorm_stats->bcast_pkts_sent);
3498
3499                ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3500                          fcoe_stat->tx_frames_lo,
3501                          fcoe_q_xstorm_stats->mcast_pkts_sent);
3502        }
3503
3504        /* ask L5 driver to add data to the struct */
3505        bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3506}
3507
3508static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3509{
3510        struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3511        struct iscsi_stats_info *iscsi_stat =
3512                &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3513
3514        if (!CNIC_LOADED(bp))
3515                return;
3516
3517        memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3518               Eaddrlen);
3519
3520        iscsi_stat->qos_priority =
3521                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3522
3523        /* ask L5 driver to add data to the struct */
3524        bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3525}
3526
3527/* called due to MCP event (on pmf):
3528 *      reread new bandwidth configuration
3529 *      configure FW
3530 *      notify others function about the change
3531 */
3532static void bnx2x_config_mf_bw(struct bnx2x *bp)
3533{
3534        if (bp->link_vars.link_up) {
3535                bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3536                bnx2x_link_sync_notify(bp);
3537        }
3538        storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3539}
3540
3541static void bnx2x_set_mf_bw(struct bnx2x *bp)
3542{
3543        bnx2x_config_mf_bw(bp);
3544        bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3545}
3546
3547static void bnx2x_handle_eee_event(struct bnx2x *bp)
3548{
3549        DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3550        bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3551}
3552
3553#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH        (20)
3554#define BNX2X_UPDATE_DRV_INFO_IND_COUNT         (25)
3555
3556static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3557{
3558        enum drv_info_opcode op_code;
3559        uint32_t drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3560        bool release = false;
3561        int wait;
3562
3563        /* if drv_info version supported by MFW doesn't match - send NACK */
3564        if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3565                bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3566                return;
3567        }
3568
3569        op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3570                  DRV_INFO_CONTROL_OP_CODE_SHIFT;
3571
3572        /* Must prevent other flows from accessing drv_info_to_mcp */
3573        qlock(&bp->drv_info_mutex);
3574
3575        memset(&bp->slowpath->drv_info_to_mcp, 0,
3576               sizeof(union drv_info_to_mcp));
3577
3578        switch (op_code) {
3579        case ETH_STATS_OPCODE:
3580                bnx2x_drv_info_ether_stat(bp);
3581                break;
3582        case FCOE_STATS_OPCODE:
3583                bnx2x_drv_info_fcoe_stat(bp);
3584                break;
3585        case ISCSI_STATS_OPCODE:
3586                bnx2x_drv_info_iscsi_stat(bp);
3587                break;
3588        default:
3589                /* if op code isn't supported - send NACK */
3590                bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3591                goto out;
3592        }
3593
3594        /* if we got drv_info attn from MFW then these fields are defined in
3595         * shmem2 for sure
3596         */
3597        SHMEM2_WR(bp, drv_info_host_addr_lo,
3598                U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3599        SHMEM2_WR(bp, drv_info_host_addr_hi,
3600                U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3601
3602        bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3603
3604        /* Since possible management wants both this and get_driver_version
3605         * need to wait until management notifies us it finished utilizing
3606         * the buffer.
3607         */
3608        if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3609                DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3610        } else if (!bp->drv_info_mng_owner) {
3611                uint32_t bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3612
3613                for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3614                        uint32_t indication = SHMEM2_RD(bp, mfw_drv_indication);
3615
3616                        /* Management is done; need to clear indication */
3617                        if (indication & bit) {
3618                                SHMEM2_WR(bp, mfw_drv_indication,
3619                                          indication & ~bit);
3620                                release = true;
3621                                break;
3622                        }
3623
3624                        kthread_usleep(1000 * BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3625                }
3626        }
3627        if (!release) {
3628                DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3629                bp->drv_info_mng_owner = true;
3630        }
3631
3632out:
3633        qunlock(&bp->drv_info_mutex);
3634}
3635
3636static uint32_t bnx2x_update_mng_version_utility(char *version,
3637                                            bool bnx2x_format)
3638{
3639        uint8_t vals[4];
3640        int i = 0;
3641
3642        if (bnx2x_format) {
3643                i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3644                           &vals[0], &vals[1], &vals[2], &vals[3]);
3645                if (i > 0)
3646                        vals[0] -= '0';
3647        } else {
3648                i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3649                           &vals[0], &vals[1], &vals[2], &vals[3]);
3650        }
3651
3652        while (i < 4)
3653                vals[i++] = 0;
3654
3655        return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3656}
3657
3658void bnx2x_update_mng_version(struct bnx2x *bp)
3659{
3660        uint32_t iscsiver = DRV_VER_NOT_LOADED;
3661        uint32_t fcoever = DRV_VER_NOT_LOADED;
3662        uint32_t ethver = DRV_VER_NOT_LOADED;
3663        int idx = BP_FW_MB_IDX(bp);
3664        char *version; // AKAROS_PORT (type conversion issues)
3665
3666        if (!SHMEM2_HAS(bp, func_os_drv_ver))
3667                return;
3668
3669        qlock(&bp->drv_info_mutex);
3670        /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
3671        if (bp->drv_info_mng_owner)
3672                goto out;
3673
3674        if (bp->state != BNX2X_STATE_OPEN)
3675                goto out;
3676
3677        /* Parse ethernet driver version */
3678        ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3679        if (!CNIC_LOADED(bp))
3680                goto out;
3681
3682        /* Try getting storage driver version via cnic */
3683        memset(&bp->slowpath->drv_info_to_mcp, 0,
3684               sizeof(union drv_info_to_mcp));
3685        bnx2x_drv_info_iscsi_stat(bp);
3686        version = (char*)bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3687        iscsiver = bnx2x_update_mng_version_utility(version, false);
3688
3689        memset(&bp->slowpath->drv_info_to_mcp, 0,
3690               sizeof(union drv_info_to_mcp));
3691        bnx2x_drv_info_fcoe_stat(bp);
3692        version = (char*)bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3693        fcoever = bnx2x_update_mng_version_utility(version, false);
3694
3695out:
3696        SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3697        SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3698        SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3699
3700        qunlock(&bp->drv_info_mutex);
3701
3702        DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3703           ethver, iscsiver, fcoever);
3704}
3705
3706static void bnx2x_oem_event(struct bnx2x *bp, uint32_t event)
3707{
3708        uint32_t cmd_ok, cmd_fail;
3709
3710        /* sanity */
3711        if (event & DRV_STATUS_DCC_EVENT_MASK &&
3712            event & DRV_STATUS_OEM_EVENT_MASK) {
3713                BNX2X_ERR("Received simultaneous events %08x\n", event);
3714                return;
3715        }
3716
3717        if (event & DRV_STATUS_DCC_EVENT_MASK) {
3718                cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3719                cmd_ok = DRV_MSG_CODE_DCC_OK;
3720        } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
3721                cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3722                cmd_ok = DRV_MSG_CODE_OEM_OK;
3723        }
3724
3725        DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3726
3727        if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3728                     DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3729                /* This is the only place besides the function initialization
3730                 * where the bp->flags can change so it is done without any
3731                 * locks
3732                 */
3733                if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3734                        DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3735                        bp->flags |= MF_FUNC_DIS;
3736
3737                        bnx2x_e1h_disable(bp);
3738                } else {
3739                        DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3740                        bp->flags &= ~MF_FUNC_DIS;
3741
3742                        bnx2x_e1h_enable(bp);
3743                }
3744                event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3745                           DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3746        }
3747
3748        if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3749                     DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3750                bnx2x_config_mf_bw(bp);
3751                event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3752                           DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3753        }
3754
3755        /* Report results to MCP */
3756        if (event)
3757                bnx2x_fw_command(bp, cmd_fail, 0);
3758        else
3759                bnx2x_fw_command(bp, cmd_ok, 0);
3760}
3761
3762/* must be called under the spq lock */
3763static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3764{
3765        struct eth_spe *next_spe = bp->spq_prod_bd;
3766
3767        if (bp->spq_prod_bd == bp->spq_last_bd) {
3768                bp->spq_prod_bd = bp->spq;
3769                bp->spq_prod_idx = 0;
3770                DP(BNX2X_MSG_SP, "end of spq\n");
3771        } else {
3772                bp->spq_prod_bd++;
3773                bp->spq_prod_idx++;
3774        }
3775        return next_spe;
3776}
3777
3778/* must be called under the spq lock */
3779static void bnx2x_sp_prod_update(struct bnx2x *bp)
3780{
3781        int func = BP_FUNC(bp);
3782
3783        /*
3784         * Make sure that BD data is updated before writing the producer:
3785         * BD data is written to the memory, the producer is read from the
3786         * memory, thus we need a full memory barrier to ensure the ordering.
3787         */
3788        mb();
3789
3790        REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3791                 bp->spq_prod_idx);
3792        bus_wmb();
3793}
3794
3795/**
3796 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3797 *
3798 * @cmd:        command to check
3799 * @cmd_type:   command type
3800 */
3801static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3802{
3803        if ((cmd_type == NONE_CONNECTION_TYPE) ||
3804            (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3805            (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3806            (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3807            (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3808            (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3809            (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3810                return true;
3811        else
3812                return false;
3813}
3814
3815/**
3816 * bnx2x_sp_post - place a single command on an SP ring
3817 *
3818 * @bp:         driver handle
3819 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
3820 * @cid:        SW CID the command is related to
3821 * @data_hi:    command private data address (high 32 bits)
3822 * @data_lo:    command private data address (low 32 bits)
3823 * @cmd_type:   command type (e.g. NONE, ETH)
3824 *
3825 * SP data is handled as if it's always an address pair, thus data fields are
3826 * not swapped to little endian in upper functions. Instead this function swaps
3827 * data as if it's two u32 fields.
3828 */
3829int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3830                  uint32_t data_hi, uint32_t data_lo, int cmd_type)
3831{
3832        struct eth_spe *spe;
3833        uint16_t type;
3834        bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3835
3836#ifdef BNX2X_STOP_ON_ERROR
3837        if (unlikely(bp->panic)) {
3838                BNX2X_ERR("Can't post SP when there is panic\n");
3839                return -EIO;
3840        }
3841#endif
3842
3843        spin_lock(&bp->spq_lock);
3844
3845        if (common) {
3846                if (!atomic_read(&bp->eq_spq_left)) {
3847                        BNX2X_ERR("BUG! EQ ring full!\n");
3848                        spin_unlock(&bp->spq_lock);
3849                        bnx2x_panic();
3850                        return -EBUSY;
3851                }
3852        } else if (!atomic_read(&bp->cq_spq_left)) {
3853                        BNX2X_ERR("BUG! SPQ ring full!\n");
3854                        spin_unlock(&bp->spq_lock);
3855                        bnx2x_panic();
3856                        return -EBUSY;
3857        }
3858
3859        spe = bnx2x_sp_get_next(bp);
3860
3861        /* CID needs port number to be encoded int it */
3862        spe->hdr.conn_and_cmd_data =
3863                        cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3864                                    HW_CID(bp, cid));
3865
3866        /* In some cases, type may already contain the func-id
3867         * mainly in SRIOV related use cases, so we add it here only
3868         * if it's not already set.
3869         */
3870        if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3871                type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3872                        SPE_HDR_CONN_TYPE;
3873                type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3874                         SPE_HDR_FUNCTION_ID);
3875        } else {
3876                type = cmd_type;
3877        }
3878
3879        spe->hdr.type = cpu_to_le16(type);
3880
3881        spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3882        spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3883
3884        /*
3885         * It's ok if the actual decrement is issued towards the memory
3886         * somewhere between the spin_lock and spin_unlock. Thus no
3887         * more explicit memory barrier is needed.
3888         */
3889        if (common)
3890                atomic_dec(&bp->eq_spq_left);
3891        else
3892                atomic_dec(&bp->cq_spq_left);
3893
3894        DP(BNX2X_MSG_SP,
3895           "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3896           bp->spq_prod_idx, (uint32_t)U64_HI(bp->spq_mapping),
3897           (uint32_t)(U64_LO(bp->spq_mapping) +
3898           (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3899           HW_CID(bp, cid), data_hi, data_lo, type,
3900           atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3901
3902        bnx2x_sp_prod_update(bp);
3903        spin_unlock(&bp->spq_lock);
3904        return 0;
3905}
3906
3907/* acquire split MCP access lock register */
3908static int bnx2x_acquire_alr(struct bnx2x *bp)
3909{
3910        uint32_t j, val;
3911        int rc = 0;
3912
3913        might_sleep();
3914        for (j = 0; j < 1000; j++) {
3915                REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3916                val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3917                if (val & MCPR_ACCESS_LOCK_LOCK)
3918                        break;
3919
3920                kthread_usleep(5000);
3921        }
3922        if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3923                BNX2X_ERR("Cannot acquire MCP access lock register\n");
3924                rc = -EBUSY;
3925        }
3926
3927        return rc;
3928}
3929
3930/* release split MCP access lock register */
3931static void bnx2x_release_alr(struct bnx2x *bp)
3932{
3933        REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3934}
3935
3936#define BNX2X_DEF_SB_ATT_IDX    0x0001
3937#define BNX2X_DEF_SB_IDX        0x0002
3938
3939static uint16_t bnx2x_update_dsb_idx(struct bnx2x *bp)
3940{
3941        struct host_sp_status_block *def_sb = bp->def_status_blk;
3942        uint16_t rc = 0;
3943
3944        cmb(); /* status block is written to by the chip */
3945        if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3946                bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3947                rc |= BNX2X_DEF_SB_ATT_IDX;
3948        }
3949
3950        if (bp->def_idx != def_sb->sp_sb.running_index) {
3951                bp->def_idx = def_sb->sp_sb.running_index;
3952                rc |= BNX2X_DEF_SB_IDX;
3953        }
3954
3955        /* Do not reorder: indices reading should complete before handling */
3956        cmb();
3957        return rc;
3958}
3959
3960/*
3961 * slow path service functions
3962 */
3963
3964static void bnx2x_attn_int_asserted(struct bnx2x *bp, uint32_t asserted)
3965{
3966        int port = BP_PORT(bp);
3967        uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3968                              MISC_REG_AEU_MASK_ATTN_FUNC_0;
3969        uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3970                                       NIG_REG_MASK_INTERRUPT_PORT0;
3971        uint32_t aeu_mask;
3972        uint32_t nig_mask = 0;
3973        uint32_t reg_addr;
3974
3975        if (bp->attn_state & asserted)
3976                BNX2X_ERR("IGU ERROR\n");
3977
3978        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3979        aeu_mask = REG_RD(bp, aeu_addr);
3980
3981        DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
3982           aeu_mask, asserted);
3983        aeu_mask &= ~(asserted & 0x3ff);
3984        DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3985
3986        REG_WR(bp, aeu_addr, aeu_mask);
3987        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3988
3989        DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3990        bp->attn_state |= asserted;
3991        DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3992
3993        if (asserted & ATTN_HARD_WIRED_MASK) {
3994                if (asserted & ATTN_NIG_FOR_FUNC) {
3995
3996                        bnx2x_acquire_phy_lock(bp);
3997
3998                        /* save nig interrupt mask */
3999                        nig_mask = REG_RD(bp, nig_int_mask_addr);
4000
4001                        /* If nig_mask is not set, no need to call the update
4002                         * function.
4003                         */
4004                        if (nig_mask) {
4005                                REG_WR(bp, nig_int_mask_addr, 0);
4006
4007                                bnx2x_link_attn(bp);
4008                        }
4009
4010                        /* handle unicore attn? */
4011                }
4012                if (asserted & ATTN_SW_TIMER_4_FUNC)
4013                        DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4014
4015                if (asserted & GPIO_2_FUNC)
4016                        DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4017
4018                if (asserted & GPIO_3_FUNC)
4019                        DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4020
4021                if (asserted & GPIO_4_FUNC)
4022                        DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4023
4024                if (port == 0) {
4025                        if (asserted & ATTN_GENERAL_ATTN_1) {
4026                                DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4027                                REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4028                        }
4029                        if (asserted & ATTN_GENERAL_ATTN_2) {
4030                                DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4031                                REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4032                        }
4033                        if (asserted & ATTN_GENERAL_ATTN_3) {
4034                                DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4035                                REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4036                        }
4037                } else {
4038                        if (asserted & ATTN_GENERAL_ATTN_4) {
4039                                DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4040                                REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4041                        }
4042                        if (asserted & ATTN_GENERAL_ATTN_5) {
4043                                DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4044                                REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4045                        }
4046                        if (asserted & ATTN_GENERAL_ATTN_6) {
4047                                DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4048                                REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4049                        }
4050                }
4051
4052        } /* if hardwired */
4053
4054        if (bp->common.int_block == INT_BLOCK_HC)
4055                reg_addr = (HC_REG_COMMAND_REG + port*32 +
4056                            COMMAND_REG_ATTN_BITS_SET);
4057        else
4058                reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4059
4060        DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4061           (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4062        REG_WR(bp, reg_addr, asserted);
4063
4064        /* now set back the mask */
4065        if (asserted & ATTN_NIG_FOR_FUNC) {
4066                /* Verify that IGU ack through BAR was written before restoring
4067                 * NIG mask. This loop should exit after 2-3 iterations max.
4068                 */
4069                if (bp->common.int_block != INT_BLOCK_HC) {
4070                        uint32_t cnt = 0, igu_acked;
4071                        do {
4072                                igu_acked = REG_RD(bp,
4073                                                   IGU_REG_ATTENTION_ACK_BITS);
4074                        } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4075                                 (++cnt < MAX_IGU_ATTN_ACK_TO));
4076                        if (!igu_acked)
4077                                DP(NETIF_MSG_HW,
4078                                   "Failed to verify IGU ack on time\n");
4079                        cmb();
4080                }
4081                REG_WR(bp, nig_int_mask_addr, nig_mask);
4082                bnx2x_release_phy_lock(bp);
4083        }
4084}
4085
4086static void bnx2x_fan_failure(struct bnx2x *bp)
4087{
4088        int port = BP_PORT(bp);
4089        uint32_t ext_phy_config;
4090        /* mark the failure */
4091        ext_phy_config =
4092                SHMEM_RD(bp,
4093                         dev_info.port_hw_config[port].external_phy_config);
4094
4095        ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4096        ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4097        SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4098                 ext_phy_config);
4099
4100        /* log the failure */
4101        netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4102                            "Please contact OEM Support for assistance\n");
4103
4104        /* Schedule device reset (unload)
4105         * This is due to some boards consuming sufficient power when driver is
4106         * up to overheat if fan fails.
4107         */
4108        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4109}
4110
4111static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, uint32_t attn)
4112{
4113        int port = BP_PORT(bp);
4114        int reg_offset;
4115        uint32_t val;
4116
4117        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4118                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4119
4120        if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4121
4122                val = REG_RD(bp, reg_offset);
4123                val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4124                REG_WR(bp, reg_offset, val);
4125
4126                BNX2X_ERR("SPIO5 hw attention\n");
4127
4128                /* Fan failure attention */
4129                bnx2x_hw_reset_phy(&bp->link_params);
4130                bnx2x_fan_failure(bp);
4131        }
4132
4133        if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4134                bnx2x_acquire_phy_lock(bp);
4135                bnx2x_handle_module_detect_int(&bp->link_params);
4136                bnx2x_release_phy_lock(bp);
4137        }
4138
4139        if (attn & HW_INTERRUT_ASSERT_SET_0) {
4140
4141                val = REG_RD(bp, reg_offset);
4142                val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
4143                REG_WR(bp, reg_offset, val);
4144
4145                BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4146                          (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_0));
4147                bnx2x_panic();
4148        }
4149}
4150
4151static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, uint32_t attn)
4152{
4153        uint32_t val;
4154
4155        if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4156
4157                val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4158                BNX2X_ERR("DB hw attention 0x%x\n", val);
4159                /* DORQ discard attention */
4160                if (val & 0x2)
4161                        BNX2X_ERR("FATAL error from DORQ\n");
4162        }
4163
4164        if (attn & HW_INTERRUT_ASSERT_SET_1) {
4165
4166                int port = BP_PORT(bp);
4167                int reg_offset;
4168
4169                reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4170                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4171
4172                val = REG_RD(bp, reg_offset);
4173                val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
4174                REG_WR(bp, reg_offset, val);
4175
4176                BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4177                          (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
4178                bnx2x_panic();
4179        }
4180}
4181
4182static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, uint32_t attn)
4183{
4184        uint32_t val;
4185
4186        if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4187
4188                val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4189                BNX2X_ERR("CFC hw attention 0x%x\n", val);
4190                /* CFC error attention */
4191                if (val & 0x2)
4192                        BNX2X_ERR("FATAL error from CFC\n");
4193        }
4194
4195        if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4196                val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4197                BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4198                /* RQ_USDMDP_FIFO_OVERFLOW */
4199                if (val & 0x18000)
4200                        BNX2X_ERR("FATAL error from PXP\n");
4201
4202                if (!CHIP_IS_E1x(bp)) {
4203                        val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4204                        BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4205                }
4206        }
4207
4208        if (attn & HW_INTERRUT_ASSERT_SET_2) {
4209
4210                int port = BP_PORT(bp);
4211                int reg_offset;
4212
4213                reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4214                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4215
4216                val = REG_RD(bp, reg_offset);
4217                val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
4218                REG_WR(bp, reg_offset, val);
4219
4220                BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4221                          (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
4222                bnx2x_panic();
4223        }
4224}
4225
4226static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, uint32_t attn)
4227{
4228        uint32_t val;
4229
4230        if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4231
4232                if (attn & BNX2X_PMF_LINK_ASSERT) {
4233                        int func = BP_FUNC(bp);
4234
4235                        REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4236                        bnx2x_read_mf_cfg(bp);
4237                        bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4238                                        func_mf_config[BP_ABS_FUNC(bp)].config);
4239                        val = SHMEM_RD(bp,
4240                                       func_mb[BP_FW_MB_IDX(bp)].drv_status);
4241
4242                        if (val & (DRV_STATUS_DCC_EVENT_MASK |
4243                                   DRV_STATUS_OEM_EVENT_MASK))
4244                                bnx2x_oem_event(bp,
4245                                        (val & (DRV_STATUS_DCC_EVENT_MASK |
4246                                                DRV_STATUS_OEM_EVENT_MASK)));
4247
4248                        if (val & DRV_STATUS_SET_MF_BW)
4249                                bnx2x_set_mf_bw(bp);
4250
4251                        if (val & DRV_STATUS_DRV_INFO_REQ)
4252                                bnx2x_handle_drv_info_req(bp);
4253
4254                        if (val & DRV_STATUS_VF_DISABLED)
4255                                bnx2x_schedule_iov_task(bp,
4256                                                        BNX2X_IOV_HANDLE_FLR);
4257
4258                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4259                                bnx2x_pmf_update(bp);
4260
4261                        if (bp->port.pmf &&
4262                            (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4263                                bp->dcbx_enabled > 0)
4264                                /* start dcbx state machine */
4265                                bnx2x_dcbx_set_params(bp,
4266                                        BNX2X_DCBX_STATE_NEG_RECEIVED);
4267                        if (val & DRV_STATUS_AFEX_EVENT_MASK)
4268                                bnx2x_handle_afex_cmd(bp,
4269                                        val & DRV_STATUS_AFEX_EVENT_MASK);
4270                        if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4271                                bnx2x_handle_eee_event(bp);
4272
4273                        if (val & DRV_STATUS_OEM_UPDATE_SVID)
4274                                bnx2x_handle_update_svid_cmd(bp);
4275
4276                        if (bp->link_vars.periodic_flags &
4277                            PERIODIC_FLAGS_LINK_EVENT) {
4278                                /*  sync with link */
4279                                bnx2x_acquire_phy_lock(bp);
4280                                bp->link_vars.periodic_flags &=
4281                                        ~PERIODIC_FLAGS_LINK_EVENT;
4282                                bnx2x_release_phy_lock(bp);
4283                                if (IS_MF(bp))
4284                                        bnx2x_link_sync_notify(bp);
4285                                bnx2x_link_report(bp);
4286                        }
4287                        /* Always call it here: bnx2x_link_report() will
4288                         * prevent the link indication duplication.
4289                         */
4290                        bnx2x__link_status_update(bp);
4291                } else if (attn & BNX2X_MC_ASSERT_BITS) {
4292
4293                        BNX2X_ERR("MC assert!\n");
4294                        bnx2x_mc_assert(bp);
4295                        REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4296                        REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4297                        REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4298                        REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4299                        bnx2x_panic();
4300
4301                } else if (attn & BNX2X_MCP_ASSERT) {
4302
4303                        BNX2X_ERR("MCP assert!\n");
4304                        REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4305                        bnx2x_fw_dump(bp);
4306
4307                } else
4308                        BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4309        }
4310
4311        if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4312                BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4313                if (attn & BNX2X_GRC_TIMEOUT) {
4314                        val = CHIP_IS_E1(bp) ? 0 :
4315                                        REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4316                        BNX2X_ERR("GRC time-out 0x%08x\n", val);
4317                }
4318                if (attn & BNX2X_GRC_RSV) {
4319                        val = CHIP_IS_E1(bp) ? 0 :
4320                                        REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4321                        BNX2X_ERR("GRC reserved 0x%08x\n", val);
4322                }
4323                REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4324        }
4325}
4326
4327/*
4328 * Bits map:
4329 * 0-7   - Engine0 load counter.
4330 * 8-15  - Engine1 load counter.
4331 * 16    - Engine0 RESET_IN_PROGRESS bit.
4332 * 17    - Engine1 RESET_IN_PROGRESS bit.
4333 * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4334 *         on the engine
4335 * 19    - Engine1 ONE_IS_LOADED.
4336 * 20    - Chip reset flow bit. When set none-leader must wait for both engines
4337 *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
4338 *         just the one belonging to its engine).
4339 *
4340 */
4341#define BNX2X_RECOVERY_GLOB_REG         MISC_REG_GENERIC_POR_1
4342
4343#define BNX2X_PATH0_LOAD_CNT_MASK       0x000000ff
4344#define BNX2X_PATH0_LOAD_CNT_SHIFT      0
4345#define BNX2X_PATH1_LOAD_CNT_MASK       0x0000ff00
4346#define BNX2X_PATH1_LOAD_CNT_SHIFT      8
4347#define BNX2X_PATH0_RST_IN_PROG_BIT     0x00010000
4348#define BNX2X_PATH1_RST_IN_PROG_BIT     0x00020000
4349#define BNX2X_GLOBAL_RESET_BIT          0x00040000
4350
4351/*
4352 * Set the GLOBAL_RESET bit.
4353 *
4354 * Should be run under rtnl lock
4355 */
4356void bnx2x_set_reset_global(struct bnx2x *bp)
4357{
4358        uint32_t val;
4359        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4360        val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4361        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4362        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4363}
4364
4365/*
4366 * Clear the GLOBAL_RESET bit.
4367 *
4368 * Should be run under rtnl lock
4369 */
4370static void bnx2x_clear_reset_global(struct bnx2x *bp)
4371{
4372        uint32_t val;
4373        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4374        val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4375        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4376        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4377}
4378
4379/*
4380 * Checks the GLOBAL_RESET bit.
4381 *
4382 * should be run under rtnl lock
4383 */
4384static bool bnx2x_reset_is_global(struct bnx2x *bp)
4385{
4386        uint32_t val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4387
4388        DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4389        return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4390}
4391
4392/*
4393 * Clear RESET_IN_PROGRESS bit for the current engine.
4394 *
4395 * Should be run under rtnl lock
4396 */
4397static void bnx2x_set_reset_done(struct bnx2x *bp)
4398{
4399        uint32_t val;
4400        uint32_t bit = BP_PATH(bp) ?
4401                BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4402        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4403        val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4404
4405        /* Clear the bit */
4406        val &= ~bit;
4407        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4408
4409        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4410}
4411
4412/*
4413 * Set RESET_IN_PROGRESS for the current engine.
4414 *
4415 * should be run under rtnl lock
4416 */
4417void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4418{
4419        uint32_t val;
4420        uint32_t bit = BP_PATH(bp) ?
4421                BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4422        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4423        val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4424
4425        /* Set the bit */
4426        val |= bit;
4427        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4428        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4429}
4430
4431/*
4432 * Checks the RESET_IN_PROGRESS bit for the given engine.
4433 * should be run under rtnl lock
4434 */
4435bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4436{
4437        uint32_t val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4438        uint32_t bit = engine ?
4439                BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4440
4441        /* return false if bit is set */
4442        return (val & bit) ? false : true;
4443}
4444
4445/*
4446 * set pf load for the current pf.
4447 *
4448 * should be run under rtnl lock
4449 */
4450void bnx2x_set_pf_load(struct bnx2x *bp)
4451{
4452        uint32_t val1, val;
4453        uint32_t mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4454                             BNX2X_PATH0_LOAD_CNT_MASK;
4455        uint32_t shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4456                             BNX2X_PATH0_LOAD_CNT_SHIFT;
4457
4458        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4459        val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4460
4461        DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4462
4463        /* get the current counter value */
4464        val1 = (val & mask) >> shift;
4465
4466        /* set bit of that PF */
4467        val1 |= (1 << bp->pf_num);
4468
4469        /* clear the old value */
4470        val &= ~mask;
4471
4472        /* set the new one */
4473        val |= ((val1 << shift) & mask);
4474
4475        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4476        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4477}
4478
4479/**
4480 * bnx2x_clear_pf_load - clear pf load mark
4481 *
4482 * @bp:         driver handle
4483 *
4484 * Should be run under rtnl lock.
4485 * Decrements the load counter for the current engine. Returns
4486 * whether other functions are still loaded
4487 */
4488bool bnx2x_clear_pf_load(struct bnx2x *bp)
4489{
4490        uint32_t val1, val;
4491        uint32_t mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4492                             BNX2X_PATH0_LOAD_CNT_MASK;
4493        uint32_t shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4494                             BNX2X_PATH0_LOAD_CNT_SHIFT;
4495
4496        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4497        val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4498        DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4499
4500        /* get the current counter value */
4501        val1 = (val & mask) >> shift;
4502
4503        /* clear bit of that PF */
4504        val1 &= ~(1 << bp->pf_num);
4505
4506        /* clear the old value */
4507        val &= ~mask;
4508
4509        /* set the new one */
4510        val |= ((val1 << shift) & mask);
4511
4512        REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4513        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4514        return val1 != 0;
4515}
4516
4517/*
4518 * Read the load status for the current engine.
4519 *
4520 * should be run under rtnl lock
4521 */
4522static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4523{
4524        uint32_t mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4525                             BNX2X_PATH0_LOAD_CNT_MASK);
4526        uint32_t shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4527                             BNX2X_PATH0_LOAD_CNT_SHIFT);
4528        uint32_t val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4529
4530        DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4531
4532        val = (val & mask) >> shift;
4533
4534        DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4535           engine, val);
4536
4537        return val != 0;
4538}
4539
4540static void _print_parity(struct bnx2x *bp, uint32_t reg)
4541{
4542        pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4543}
4544
4545static void _print_next_block(int idx, const char *blk)
4546{
4547        pr_cont("%s%s", idx ? ", " : "", blk);
4548}
4549
4550static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, uint32_t sig,
4551                                            int *par_num, bool print)
4552{
4553        uint32_t cur_bit;
4554        bool res;
4555        int i;
4556
4557        res = false;
4558
4559        for (i = 0; sig; i++) {
4560                cur_bit = (0x1UL << i);
4561                if (sig & cur_bit) {
4562                        res |= true; /* Each bit is real error! */
4563
4564                        if (print) {
4565                                switch (cur_bit) {
4566                                case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4567                                        _print_next_block((*par_num)++, "BRB");
4568                                        _print_parity(bp,
4569                                                      BRB1_REG_BRB1_PRTY_STS);
4570                                        break;
4571                                case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4572                                        _print_next_block((*par_num)++,
4573                                                          "PARSER");
4574                                        _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4575                                        break;
4576                                case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4577                                        _print_next_block((*par_num)++, "TSDM");
4578                                        _print_parity(bp,
4579                                                      TSDM_REG_TSDM_PRTY_STS);
4580                                        break;
4581                                case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4582                                        _print_next_block((*par_num)++,
4583                                                          "SEARCHER");
4584                                        _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4585                                        break;
4586                                case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4587                                        _print_next_block((*par_num)++, "TCM");
4588                                        _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4589                                        break;
4590                                case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4591                                        _print_next_block((*par_num)++,
4592                                                          "TSEMI");
4593                                        _print_parity(bp,
4594                                                      TSEM_REG_TSEM_PRTY_STS_0);
4595                                        _print_parity(bp,
4596                                                      TSEM_REG_TSEM_PRTY_STS_1);
4597                                        break;
4598                                case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4599                                        _print_next_block((*par_num)++, "XPB");
4600                                        _print_parity(bp, GRCBASE_XPB +
4601                                                          PB_REG_PB_PRTY_STS);
4602                                        break;
4603                                }
4604                        }
4605
4606                        /* Clear the bit */
4607                        sig &= ~cur_bit;
4608                }
4609        }
4610
4611        return res;
4612}
4613
4614static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, uint32_t sig,
4615                                            int *par_num, bool *global,
4616                                            bool print)
4617{
4618        uint32_t cur_bit;
4619        bool res;
4620        int i;
4621
4622        res = false;
4623
4624        for (i = 0; sig; i++) {
4625                cur_bit = (0x1UL << i);
4626                if (sig & cur_bit) {
4627                        res |= true; /* Each bit is real error! */
4628                        switch (cur_bit) {
4629                        case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4630                                if (print) {
4631                                        _print_next_block((*par_num)++, "PBF");
4632                                        _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4633                                }
4634                                break;
4635                        case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4636                                if (print) {
4637                                        _print_next_block((*par_num)++, "QM");
4638                                        _print_parity(bp, QM_REG_QM_PRTY_STS);
4639                                }
4640                                break;
4641                        case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4642                                if (print) {
4643                                        _print_next_block((*par_num)++, "TM");
4644                                        _print_parity(bp, TM_REG_TM_PRTY_STS);
4645                                }
4646                                break;
4647                        case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4648                                if (print) {
4649                                        _print_next_block((*par_num)++, "XSDM");
4650                                        _print_parity(bp,
4651                                                      XSDM_REG_XSDM_PRTY_STS);
4652                                }
4653                                break;
4654                        case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4655                                if (print) {
4656                                        _print_next_block((*par_num)++, "XCM");
4657                                        _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4658                                }
4659                                break;
4660                        case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4661                                if (print) {
4662                                        _print_next_block((*par_num)++,
4663                                                          "XSEMI");
4664                                        _print_parity(bp,
4665                                                      XSEM_REG_XSEM_PRTY_STS_0);
4666                                        _print_parity(bp,
4667                                                      XSEM_REG_XSEM_PRTY_STS_1);
4668                                }
4669                                break;
4670                        case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4671                                if (print) {
4672                                        _print_next_block((*par_num)++,
4673                                                          "DOORBELLQ");
4674                                        _print_parity(bp,
4675                                                      DORQ_REG_DORQ_PRTY_STS);
4676                                }
4677                                break;
4678                        case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4679                                if (print) {
4680                                        _print_next_block((*par_num)++, "NIG");
4681                                        if (CHIP_IS_E1x(bp)) {
4682                                                _print_parity(bp,
4683                                                        NIG_REG_NIG_PRTY_STS);
4684                                        } else {
4685                                                _print_parity(bp,
4686                                                        NIG_REG_NIG_PRTY_STS_0);
4687                                                _print_parity(bp,
4688                                                        NIG_REG_NIG_PRTY_STS_1);
4689                                        }
4690                                }
4691                                break;
4692                        case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4693                                if (print)
4694                                        _print_next_block((*par_num)++,
4695                                                          "VAUX PCI CORE");
4696                                *global = true;
4697                                break;
4698                        case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4699                                if (print) {
4700                                        _print_next_block((*par_num)++,
4701                                                          "DEBUG");
4702                                        _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4703                                }
4704                                break;
4705                        case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4706                                if (print) {
4707                                        _print_next_block((*par_num)++, "USDM");
4708                                        _print_parity(bp,
4709                                                      USDM_REG_USDM_PRTY_STS);
4710                                }
4711                                break;
4712                        case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4713                                if (print) {
4714                                        _print_next_block((*par_num)++, "UCM");
4715                                        _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4716                                }
4717                                break;
4718                        case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4719                                if (print) {
4720                                        _print_next_block((*par_num)++,
4721                                                          "USEMI");
4722                                        _print_parity(bp,
4723                                                      USEM_REG_USEM_PRTY_STS_0);
4724                                        _print_parity(bp,
4725                                                      USEM_REG_USEM_PRTY_STS_1);
4726                                }
4727                                break;
4728                        case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4729                                if (print) {
4730                                        _print_next_block((*par_num)++, "UPB");
4731                                        _print_parity(bp, GRCBASE_UPB +
4732                                                          PB_REG_PB_PRTY_STS);
4733                                }
4734                                break;
4735                        case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4736                                if (print) {
4737                                        _print_next_block((*par_num)++, "CSDM");
4738                                        _print_parity(bp,
4739                                                      CSDM_REG_CSDM_PRTY_STS);
4740                                }
4741                                break;
4742                        case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4743                                if (print) {
4744                                        _print_next_block((*par_num)++, "CCM");
4745                                        _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4746                                }
4747                                break;
4748                        }
4749
4750                        /* Clear the bit */
4751                        sig &= ~cur_bit;
4752                }
4753        }
4754
4755        return res;
4756}
4757
4758static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, uint32_t sig,
4759                                            int *par_num, bool print)
4760{
4761        uint32_t cur_bit;
4762        bool res;
4763        int i;
4764
4765        res = false;
4766
4767        for (i = 0; sig; i++) {
4768                cur_bit = (0x1UL << i);
4769                if (sig & cur_bit) {
4770                        res = true; /* Each bit is real error! */
4771                        if (print) {
4772                                switch (cur_bit) {
4773                                case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4774                                        _print_next_block((*par_num)++,
4775                                                          "CSEMI");
4776                                        _print_parity(bp,
4777                                                      CSEM_REG_CSEM_PRTY_STS_0);
4778                                        _print_parity(bp,
4779                                                      CSEM_REG_CSEM_PRTY_STS_1);
4780                                        break;
4781                                case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4782                                        _print_next_block((*par_num)++, "PXP");
4783                                        _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4784                                        _print_parity(bp,
4785                                                      PXP2_REG_PXP2_PRTY_STS_0);
4786                                        _print_parity(bp,
4787                                                      PXP2_REG_PXP2_PRTY_STS_1);
4788                                        break;
4789                                case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4790                                        _print_next_block((*par_num)++,
4791                                                          "PXPPCICLOCKCLIENT");
4792                                        break;
4793                                case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4794                                        _print_next_block((*par_num)++, "CFC");
4795                                        _print_parity(bp,
4796                                                      CFC_REG_CFC_PRTY_STS);
4797                                        break;
4798                                case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4799                                        _print_next_block((*par_num)++, "CDU");
4800                                        _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4801                                        break;
4802                                case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4803                                        _print_next_block((*par_num)++, "DMAE");
4804                                        _print_parity(bp,
4805                                                      DMAE_REG_DMAE_PRTY_STS);
4806                                        break;
4807                                case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4808                                        _print_next_block((*par_num)++, "IGU");
4809                                        if (CHIP_IS_E1x(bp))
4810                                                _print_parity(bp,
4811                                                        HC_REG_HC_PRTY_STS);
4812                                        else
4813                                                _print_parity(bp,
4814                                                        IGU_REG_IGU_PRTY_STS);
4815                                        break;
4816                                case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4817                                        _print_next_block((*par_num)++, "MISC");
4818                                        _print_parity(bp,
4819                                                      MISC_REG_MISC_PRTY_STS);
4820                                        break;
4821                                }
4822                        }
4823
4824                        /* Clear the bit */
4825                        sig &= ~cur_bit;
4826                }
4827        }
4828
4829        return res;
4830}
4831
4832static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, uint32_t sig,
4833                                            int *par_num, bool *global,
4834                                            bool print)
4835{
4836        bool res = false;
4837        uint32_t cur_bit;
4838        int i;
4839
4840        for (i = 0; sig; i++) {
4841                cur_bit = (0x1UL << i);
4842                if (sig & cur_bit) {
4843                        switch (cur_bit) {
4844                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4845                                if (print)
4846                                        _print_next_block((*par_num)++,
4847                                                          "MCP ROM");
4848                                *global = true;
4849                                res = true;
4850                                break;
4851                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4852                                if (print)
4853                                        _print_next_block((*par_num)++,
4854                                                          "MCP UMP RX");
4855                                *global = true;
4856                                res = true;
4857                                break;
4858                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4859                                if (print)
4860                                        _print_next_block((*par_num)++,
4861                                                          "MCP UMP TX");
4862                                *global = true;
4863                                res = true;
4864                                break;
4865                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4866                                if (print)
4867                                        _print_next_block((*par_num)++,
4868                                                          "MCP SCPAD");
4869                                /* clear latched SCPAD PATIRY from MCP */
4870                                REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4871                                       1UL << 10);
4872                                break;
4873                        }
4874
4875                        /* Clear the bit */
4876                        sig &= ~cur_bit;
4877                }
4878        }
4879
4880        return res;
4881}
4882
4883static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, uint32_t sig,
4884                                            int *par_num, bool print)
4885{
4886        uint32_t cur_bit;
4887        bool res;
4888        int i;
4889
4890        res = false;
4891
4892        for (i = 0; sig; i++) {
4893                cur_bit = (0x1UL << i);
4894                if (sig & cur_bit) {
4895                        res = true; /* Each bit is real error! */
4896                        if (print) {
4897                                switch (cur_bit) {
4898                                case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4899                                        _print_next_block((*par_num)++,
4900                                                          "PGLUE_B");
4901                                        _print_parity(bp,
4902                                                      PGLUE_B_REG_PGLUE_B_PRTY_STS);
4903                                        break;
4904                                case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4905                                        _print_next_block((*par_num)++, "ATC");
4906                                        _print_parity(bp,
4907                                                      ATC_REG_ATC_PRTY_STS);
4908                                        break;
4909                                }
4910                        }
4911                        /* Clear the bit */
4912                        sig &= ~cur_bit;
4913                }
4914        }
4915
4916        return res;
4917}
4918
4919static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4920                              uint32_t *sig)
4921{
4922        bool res = false;
4923
4924        if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4925            (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4926            (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4927            (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4928            (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4929                int par_num = 0;
4930                DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4931                                 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4932                          sig[0] & HW_PRTY_ASSERT_SET_0,
4933                          sig[1] & HW_PRTY_ASSERT_SET_1,
4934                          sig[2] & HW_PRTY_ASSERT_SET_2,
4935                          sig[3] & HW_PRTY_ASSERT_SET_3,
4936                          sig[4] & HW_PRTY_ASSERT_SET_4);
4937                if (print)
4938                        netdev_err(bp->dev,
4939                                   "Parity errors detected in blocks: ");
4940                res |= bnx2x_check_blocks_with_parity0(bp,
4941                        sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4942                res |= bnx2x_check_blocks_with_parity1(bp,
4943                        sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4944                res |= bnx2x_check_blocks_with_parity2(bp,
4945                        sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4946                res |= bnx2x_check_blocks_with_parity3(bp,
4947                        sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4948                res |= bnx2x_check_blocks_with_parity4(bp,
4949                        sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4950
4951                if (print)
4952                        pr_cont("\n");
4953        }
4954
4955        return res;
4956}
4957
4958/**
4959 * bnx2x_chk_parity_attn - checks for parity attentions.
4960 *
4961 * @bp:         driver handle
4962 * @global:     true if there was a global attention
4963 * @print:      show parity attention in syslog
4964 */
4965bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4966{
4967        struct attn_route attn = { {0} };
4968        int port = BP_PORT(bp);
4969
4970        attn.sig[0] = REG_RD(bp,
4971                MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4972                             port*4);
4973        attn.sig[1] = REG_RD(bp,
4974                MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4975                             port*4);
4976        attn.sig[2] = REG_RD(bp,
4977                MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4978                             port*4);
4979        attn.sig[3] = REG_RD(bp,
4980                MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4981                             port*4);
4982        /* Since MCP attentions can't be disabled inside the block, we need to
4983         * read AEU registers to see whether they're currently disabled
4984         */
4985        attn.sig[3] &= ((REG_RD(bp,
4986                                !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4987                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4988                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4989                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4990
4991        if (!CHIP_IS_E1x(bp))
4992                attn.sig[4] = REG_RD(bp,
4993                        MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4994                                     port*4);
4995
4996        return bnx2x_parity_attn(bp, global, print, attn.sig);
4997}
4998
4999static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, uint32_t attn)
5000{
5001        uint32_t val;
5002        if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5003
5004                val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5005                BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5006                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5007                        BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5008                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5009                        BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5010                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5011                        BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5012                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5013                        BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5014                if (val &
5015                    PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5016                        BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5017                if (val &
5018                    PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5019                        BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5020                if (val &