akaros/kern/drivers/net/bnx2x/bnx2x_sriov.c
<<
>>
Prefs
   1/* bnx2x_sriov.c: Broadcom Everest network driver.
   2 *
   3 * Copyright 2009-2013 Broadcom Corporation
   4 *
   5 * Unless you and Broadcom execute a separate written software license
   6 * agreement governing use of this software, this software is licensed to you
   7 * under the terms of the GNU General Public License version 2, available
   8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
   9 *
  10 * Notwithstanding the above, under no circumstances may you combine this
  11 * software in any way with any other Broadcom software provided under a
  12 * license other than the GPL, without Broadcom's express prior written
  13 * consent.
  14 *
  15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  16 * Written by: Shmulik Ravid
  17 *             Ariel Elior <ariel.elior@qlogic.com>
  18 *
  19 */
  20#include <linux_compat.h>
  21
  22#include "bnx2x.h"
  23#include "bnx2x_init.h"
  24#include "bnx2x_cmn.h"
  25#include "bnx2x_sp.h"
  26
  27static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
  28                            struct bnx2x_virtf **vf,
  29                            struct pf_vf_bulletin_content **bulletin,
  30                            bool test_queue);
  31
  32/* General service functions */
  33static void storm_memset_vf_to_pf(struct bnx2x *bp, uint16_t abs_fid,
  34                                         uint16_t pf_id)
  35{
  36        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  37                pf_id);
  38        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  39                pf_id);
  40        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  41                pf_id);
  42        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  43                pf_id);
  44}
  45
  46static void storm_memset_func_en(struct bnx2x *bp, uint16_t abs_fid,
  47                                        uint8_t enable)
  48{
  49        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  50                enable);
  51        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  52                enable);
  53        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  54                enable);
  55        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  56                enable);
  57}
  58
  59int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, uint16_t abs_vfid)
  60{
  61        int idx;
  62
  63        for_each_vf(bp, idx)
  64                if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  65                        break;
  66        return idx;
  67}
  68
  69static
  70struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, uint16_t abs_vfid)
  71{
  72        uint16_t idx =  (uint16_t)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  73        return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  74}
  75
  76static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
  77                                uint8_t igu_sb_id, uint8_t segment,
  78                                uint16_t index, uint8_t op,
  79                                uint8_t update)
  80{
  81        /* acking a VF sb through the PF - use the GRC */
  82        uint32_t ctl;
  83        uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
  84        uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
  85        uint32_t func_encode = vf->abs_vfid;
  86        uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
  87        struct igu_regular cmd_data = {0};
  88
  89        cmd_data.sb_id_and_flags =
  90                        ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
  91                         (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
  92                         (update << IGU_REGULAR_BUPDATE_SHIFT) |
  93                         (op << IGU_REGULAR_ENABLE_INT_SHIFT));
  94
  95        ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
  96              func_encode << IGU_CTRL_REG_FID_SHIFT             |
  97              IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
  98
  99        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 100           cmd_data.sb_id_and_flags, igu_addr_data);
 101        REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
 102        bus_wmb();
 103        cmb();
 104
 105        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 106           ctl, igu_addr_ctl);
 107        REG_WR(bp, igu_addr_ctl, ctl);
 108        bus_wmb();
 109        cmb();
 110}
 111
 112static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
 113                                       struct bnx2x_virtf *vf,
 114                                       bool print_err)
 115{
 116        if (!bnx2x_leading_vfq(vf, sp_initialized)) {
 117                if (print_err)
 118                        BNX2X_ERR("Slowpath objects not yet initialized!\n");
 119                else
 120                        DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
 121                return false;
 122        }
 123        return true;
 124}
 125
 126/* VFOP operations states */
 127void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 128                              struct bnx2x_queue_init_params *init_params,
 129                              struct bnx2x_queue_setup_params *setup_params,
 130                              uint16_t q_idx, uint16_t sb_idx)
 131{
 132        DP(BNX2X_MSG_IOV,
 133           "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
 134           vf->abs_vfid,
 135           q_idx,
 136           sb_idx,
 137           init_params->tx.sb_cq_index,
 138           init_params->tx.hc_rate,
 139           setup_params->flags,
 140           setup_params->txq_params.traffic_type);
 141}
 142
 143void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 144                            struct bnx2x_queue_init_params *init_params,
 145                            struct bnx2x_queue_setup_params *setup_params,
 146                            uint16_t q_idx, uint16_t sb_idx)
 147{
 148        struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
 149
 150        DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
 151           "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
 152           vf->abs_vfid,
 153           q_idx,
 154           sb_idx,
 155           init_params->rx.sb_cq_index,
 156           init_params->rx.hc_rate,
 157           setup_params->gen_params.mtu,
 158           rxq_params->buf_sz,
 159           rxq_params->sge_buf_sz,
 160           rxq_params->max_sges_pkt,
 161           rxq_params->tpa_agg_sz,
 162           setup_params->flags,
 163           rxq_params->drop_flags,
 164           rxq_params->cache_line_log);
 165}
 166
 167void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
 168                           struct bnx2x_virtf *vf,
 169                           struct bnx2x_vf_queue *q,
 170                           struct bnx2x_vf_queue_construct_params *p,
 171                           unsigned long q_type)
 172{
 173        struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
 174        struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
 175
 176        /* INIT */
 177
 178        /* Enable host coalescing in the transition to INIT state */
 179        if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
 180                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
 181
 182        if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
 183                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
 184
 185        /* FW SB ID */
 186        init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 187        init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 188
 189        /* context */
 190        init_p->cxts[0] = q->cxt;
 191
 192        /* SETUP */
 193
 194        /* Setup-op general parameters */
 195        setup_p->gen_params.spcl_id = vf->sp_cl_id;
 196        setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 197        setup_p->gen_params.fp_hsi = vf->fp_hsi;
 198
 199        /* Setup-op pause params:
 200         * Nothing to do, the pause thresholds are set by default to 0 which
 201         * effectively turns off the feature for this queue. We don't want
 202         * one queue (VF) to interfering with another queue (another VF)
 203         */
 204        if (vf->cfg_flags & VF_CFG_FW_FC)
 205                BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
 206                          vf->abs_vfid);
 207        /* Setup-op flags:
 208         * collect statistics, zero statistics, local-switching, security,
 209         * OV for Flex10, RSS and MCAST for leading
 210         */
 211        if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
 212                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
 213
 214        /* for VFs, enable tx switching, bd coherency, and mac address
 215         * anti-spoofing
 216         */
 217        __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
 218        __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
 219        __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 220
 221        /* Setup-op rx parameters */
 222        if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
 223                struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
 224
 225                rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
 226                rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 227                rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
 228
 229                if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
 230                        rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
 231        }
 232
 233        /* Setup-op tx parameters */
 234        if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
 235                setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
 236                setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 237        }
 238}
 239
 240static int bnx2x_vf_queue_create(struct bnx2x *bp,
 241                                 struct bnx2x_virtf *vf, int qid,
 242                                 struct bnx2x_vf_queue_construct_params *qctor)
 243{
 244        struct bnx2x_queue_state_params *q_params;
 245        int rc = 0;
 246
 247        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 248
 249        /* Prepare ramrod information */
 250        q_params = &qctor->qstate;
 251        q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 252        set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
 253
 254        if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
 255            BNX2X_Q_LOGICAL_STATE_ACTIVE) {
 256                DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
 257                goto out;
 258        }
 259
 260        /* Run Queue 'construction' ramrods */
 261        q_params->cmd = BNX2X_Q_CMD_INIT;
 262        rc = bnx2x_queue_state_change(bp, q_params);
 263        if (rc)
 264                goto out;
 265
 266        memcpy(&q_params->params.setup, &qctor->prep_qsetup,
 267               sizeof(struct bnx2x_queue_setup_params));
 268        q_params->cmd = BNX2X_Q_CMD_SETUP;
 269        rc = bnx2x_queue_state_change(bp, q_params);
 270        if (rc)
 271                goto out;
 272
 273        /* enable interrupts */
 274        bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
 275                            USTORM_ID, 0, IGU_INT_ENABLE, 0);
 276out:
 277        return rc;
 278}
 279
 280static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
 281                                  int qid)
 282{
 283        enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
 284                                       BNX2X_Q_CMD_TERMINATE,
 285                                       BNX2X_Q_CMD_CFC_DEL};
 286        struct bnx2x_queue_state_params q_params;
 287        int rc, i;
 288
 289        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 290
 291        /* Prepare ramrod information */
 292        memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
 293        q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 294        set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 295
 296        if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
 297            BNX2X_Q_LOGICAL_STATE_STOPPED) {
 298                DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
 299                goto out;
 300        }
 301
 302        /* Run Queue 'destruction' ramrods */
 303        for (i = 0; i < ARRAY_SIZE(cmds); i++) {
 304                q_params.cmd = cmds[i];
 305                rc = bnx2x_queue_state_change(bp, &q_params);
 306                if (rc) {
 307                        BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
 308                        return rc;
 309                }
 310        }
 311out:
 312        /* Clean Context */
 313        if (bnx2x_vfq(vf, qid, cxt)) {
 314                bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
 315                bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
 316        }
 317
 318        return 0;
 319}
 320
 321static void
 322bnx2x_vf_set_igu_info(struct bnx2x *bp, uint8_t igu_sb_id, uint8_t abs_vfid)
 323{
 324        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 325        if (vf) {
 326                /* the first igu entry belonging to VFs of this PF */
 327                if (!BP_VFDB(bp)->first_vf_igu_entry)
 328                        BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
 329
 330                /* the first igu entry belonging to this VF */
 331                if (!vf_sb_count(vf))
 332                        vf->igu_base_id = igu_sb_id;
 333
 334                ++vf_sb_count(vf);
 335                ++vf->sb_count;
 336        }
 337        BP_VFDB(bp)->vf_sbs_pool++;
 338}
 339
 340static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
 341                                        struct bnx2x_vlan_mac_obj *obj,
 342                                        atomic_t *counter)
 343{
 344        struct list_head *pos;
 345        int read_lock;
 346        int cnt = 0;
 347
 348        read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
 349        if (read_lock)
 350                DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
 351
 352        list_for_each(pos, &obj->head)
 353                cnt++;
 354
 355        if (!read_lock)
 356                bnx2x_vlan_mac_h_read_unlock(bp, obj);
 357
 358        atomic_set(counter, cnt);
 359}
 360
 361static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
 362                                   int qid, bool drv_only, bool mac)
 363{
 364        struct bnx2x_vlan_mac_ramrod_params ramrod;
 365        int rc;
 366
 367        DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
 368           mac ? "MACs" : "VLANs");
 369
 370        /* Prepare ramrod params */
 371        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 372        if (mac) {
 373                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 374                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 375        } else {
 376                set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
 377                        &ramrod.user_req.vlan_mac_flags);
 378                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 379        }
 380        ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 381
 382        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 383        if (drv_only)
 384                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 385        else
 386                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 387
 388        /* Start deleting */
 389        rc = ramrod.vlan_mac_obj->delete_all(bp,
 390                                             ramrod.vlan_mac_obj,
 391                                             &ramrod.user_req.vlan_mac_flags,
 392                                             &ramrod.ramrod_flags);
 393        if (rc) {
 394                BNX2X_ERR("Failed to delete all %s\n",
 395                          mac ? "MACs" : "VLANs");
 396                return rc;
 397        }
 398
 399        /* Clear the vlan counters */
 400        if (!mac)
 401                atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
 402
 403        return 0;
 404}
 405
 406static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 407                                    struct bnx2x_virtf *vf, int qid,
 408                                    struct bnx2x_vf_mac_vlan_filter *filter,
 409                                    bool drv_only)
 410{
 411        struct bnx2x_vlan_mac_ramrod_params ramrod;
 412        int rc;
 413
 414        DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 415           vf->abs_vfid, filter->add ? "Adding" : "Deleting",
 416           filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
 417
 418        /* Prepare ramrod params */
 419        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 420        if (filter->type == BNX2X_VF_FILTER_VLAN) {
 421                set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
 422                        &ramrod.user_req.vlan_mac_flags);
 423                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 424                ramrod.user_req.u.vlan.vlan = filter->vid;
 425        } else {
 426                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 427                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 428                memcpy(&ramrod.user_req.u.mac.mac, filter->mac, Eaddrlen);
 429        }
 430        ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 431                                            BNX2X_VLAN_MAC_DEL;
 432
 433        /* Verify there are available vlan credits */
 434        if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
 435            (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
 436             vf_vlan_rules_cnt(vf))) {
 437                BNX2X_ERR("No credits for vlan [%d >= %d]\n",
 438                          atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
 439                          vf_vlan_rules_cnt(vf));
 440                return -ENOMEM;
 441        }
 442
 443        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 444        if (drv_only)
 445                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 446        else
 447                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 448
 449        /* Add/Remove the filter */
 450        rc = bnx2x_config_vlan_mac(bp, &ramrod);
 451        if (rc && rc != -EEXIST) {
 452                BNX2X_ERR("Failed to %s %s\n",
 453                          filter->add ? "add" : "delete",
 454                          filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
 455                                                                "VLAN");
 456                return rc;
 457        }
 458
 459        /* Update the vlan counters */
 460        if (filter->type == BNX2X_VF_FILTER_VLAN)
 461                bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
 462                                     &bnx2x_vfq(vf, qid, vlan_count));
 463
 464        return 0;
 465}
 466
 467int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
 468                                  struct bnx2x_vf_mac_vlan_filters *filters,
 469                                  int qid, bool drv_only)
 470{
 471        int rc = 0, i;
 472
 473        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 474
 475        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
 476                return -EINVAL;
 477
 478        /* Prepare ramrod params */
 479        for (i = 0; i < filters->count; i++) {
 480                rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
 481                                              &filters->filters[i], drv_only);
 482                if (rc)
 483                        break;
 484        }
 485
 486        /* Rollback if needed */
 487        if (i != filters->count) {
 488                BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
 489                          i, filters->count + 1);
 490                while (--i >= 0) {
 491                        filters->filters[i].add = !filters->filters[i].add;
 492                        bnx2x_vf_mac_vlan_config(bp, vf, qid,
 493                                                 &filters->filters[i],
 494                                                 drv_only);
 495                }
 496        }
 497
 498        /* It's our responsibility to free the filters */
 499        kfree(filters);
 500
 501        return rc;
 502}
 503
 504int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
 505                         struct bnx2x_vf_queue_construct_params *qctor)
 506{
 507        int rc;
 508
 509        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 510
 511        rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
 512        if (rc)
 513                goto op_err;
 514
 515        /* Configure vlan0 for leading queue */
 516        if (!qid) {
 517                struct bnx2x_vf_mac_vlan_filter filter;
 518
 519                memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
 520                filter.type = BNX2X_VF_FILTER_VLAN;
 521                filter.add = true;
 522                filter.vid = 0;
 523                rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
 524                if (rc)
 525                        goto op_err;
 526        }
 527
 528        /* Schedule the configuration of any pending vlan filters */
 529        vf->cfg_flags |= VF_CFG_VLAN;
 530        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 531                               BNX2X_MSG_IOV);
 532        return 0;
 533op_err:
 534        BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 535        return rc;
 536}
 537
 538static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
 539                               int qid)
 540{
 541        int rc;
 542
 543        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 544
 545        /* If needed, clean the filtering data base */
 546        if ((qid == LEADING_IDX) &&
 547            bnx2x_validate_vf_sp_objs(bp, vf, false)) {
 548                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
 549                if (rc)
 550                        goto op_err;
 551                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
 552                if (rc)
 553                        goto op_err;
 554        }
 555
 556        /* Terminate queue */
 557        if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
 558                struct bnx2x_queue_state_params qstate;
 559
 560                memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
 561                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 562                qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
 563                qstate.cmd = BNX2X_Q_CMD_TERMINATE;
 564                set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
 565                rc = bnx2x_queue_state_change(bp, &qstate);
 566                if (rc)
 567                        goto op_err;
 568        }
 569
 570        return 0;
 571op_err:
 572        BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 573        return rc;
 574}
 575
 576int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
 577                   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
 578{
 579        struct bnx2x_mcast_list_elem *mc = NULL;
 580        struct bnx2x_mcast_ramrod_params mcast;
 581        int rc, i;
 582
 583        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 584
 585        /* Prepare Multicast command */
 586        memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
 587        mcast.mcast_obj = &vf->mcast_obj;
 588        if (drv_only)
 589                set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
 590        else
 591                set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
 592        if (mc_num) {
 593                mc = kzmalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
 594                              MEM_WAIT);
 595                if (!mc) {
 596                        BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
 597                        return -ENOMEM;
 598                }
 599        }
 600
 601        /* clear existing mcasts */
 602        mcast.mcast_list_len = vf->mcast_list_len;
 603        vf->mcast_list_len = mc_num;
 604        rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
 605        if (rc) {
 606                BNX2X_ERR("Failed to remove multicasts\n");
 607                kfree(mc);
 608                return rc;
 609        }
 610
 611        /* update mcast list on the ramrod params */
 612        if (mc_num) {
 613                INIT_LIST_HEAD(&mcast.mcast_list);
 614                for (i = 0; i < mc_num; i++) {
 615                        mc[i].mac = mcasts[i];
 616                        list_add_tail(&mc[i].link,
 617                                      &mcast.mcast_list);
 618                }
 619
 620                /* add new mcasts */
 621                mcast.mcast_list_len = mc_num;
 622                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
 623                if (rc)
 624                        BNX2X_ERR("Faled to add multicasts\n");
 625                kfree(mc);
 626        }
 627
 628        return rc;
 629}
 630
 631static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, uint8_t qid,
 632                                  struct bnx2x_rx_mode_ramrod_params *ramrod,
 633                                  struct bnx2x_virtf *vf,
 634                                  unsigned long accept_flags)
 635{
 636        struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
 637
 638        memset(ramrod, 0, sizeof(*ramrod));
 639        ramrod->cid = vfq->cid;
 640        ramrod->cl_id = vfq_cl_id(vf, vfq);
 641        ramrod->rx_mode_obj = &bp->rx_mode_obj;
 642        ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
 643        ramrod->rx_accept_flags = accept_flags;
 644        ramrod->tx_accept_flags = accept_flags;
 645        ramrod->pstate = &vf->filter_state;
 646        ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
 647
 648        set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
 649        set_bit(RAMROD_RX, &ramrod->ramrod_flags);
 650        set_bit(RAMROD_TX, &ramrod->ramrod_flags);
 651
 652        ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
 653        ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
 654}
 655
 656int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
 657                    int qid, unsigned long accept_flags)
 658{
 659        struct bnx2x_rx_mode_ramrod_params ramrod;
 660
 661        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 662
 663        bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
 664        set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 665        vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
 666        return bnx2x_config_rx_mode(bp, &ramrod);
 667}
 668
 669int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
 670{
 671        int rc;
 672
 673        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 674
 675        /* Remove all classification configuration for leading queue */
 676        if (qid == LEADING_IDX) {
 677                rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
 678                if (rc)
 679                        goto op_err;
 680
 681                /* Remove filtering if feasible */
 682                if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 683                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 684                                                     false, false);
 685                        if (rc)
 686                                goto op_err;
 687                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 688                                                     false, true);
 689                        if (rc)
 690                                goto op_err;
 691                        rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
 692                        if (rc)
 693                                goto op_err;
 694                }
 695        }
 696
 697        /* Destroy queue */
 698        rc = bnx2x_vf_queue_destroy(bp, vf, qid);
 699        if (rc)
 700                goto op_err;
 701        return rc;
 702op_err:
 703        BNX2X_ERR("vf[%d:%d] error: rc %d\n",
 704                  vf->abs_vfid, qid, rc);
 705        return rc;
 706}
 707
 708/* VF enable primitives
 709 * when pretend is required the caller is responsible
 710 * for calling pretend prior to calling these routines
 711 */
 712
 713/* internal vf enable - until vf is enabled internally all transactions
 714 * are blocked. This routine should always be called last with pretend.
 715 */
 716static void bnx2x_vf_enable_internal(struct bnx2x *bp, uint8_t enable)
 717{
 718        REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
 719}
 720
 721/* clears vf error in all semi blocks */
 722static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, uint8_t abs_vfid)
 723{
 724        REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
 725        REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
 726        REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
 727        REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
 728}
 729
 730static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, uint8_t abs_vfid)
 731{
 732        uint32_t was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
 733        uint32_t was_err_reg = 0;
 734
 735        switch (was_err_group) {
 736        case 0:
 737            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
 738            break;
 739        case 1:
 740            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
 741            break;
 742        case 2:
 743            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
 744            break;
 745        case 3:
 746            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
 747            break;
 748        }
 749        REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
 750}
 751
 752static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
 753{
 754        int i;
 755        uint32_t val;
 756
 757        /* Set VF masks and configuration - pretend */
 758        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 759
 760        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
 761        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
 762        REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
 763        REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
 764        REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
 765        REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
 766
 767        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 768        val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
 769        if (vf->cfg_flags & VF_CFG_INT_SIMD)
 770                val |= IGU_VF_CONF_SINGLE_ISR_EN;
 771        val &= ~IGU_VF_CONF_PARENT_MASK;
 772        val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 773        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
 774
 775        DP(BNX2X_MSG_IOV,
 776           "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
 777           vf->abs_vfid, val);
 778
 779        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 780
 781        /* iterate over all queues, clear sb consumer */
 782        for (i = 0; i < vf_sb_count(vf); i++) {
 783                uint8_t igu_sb_id = vf_igu_sb(vf, i);
 784
 785                /* zero prod memory */
 786                REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
 787
 788                /* clear sb state machine */
 789                bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
 790                                       false /* VF */);
 791
 792                /* disable + update */
 793                bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
 794                                    IGU_INT_DISABLE, 1);
 795        }
 796}
 797
 798void bnx2x_vf_enable_access(struct bnx2x *bp, uint8_t abs_vfid)
 799{
 800        /* set the VF-PF association in the FW */
 801        storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
 802        storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
 803
 804        /* clear vf errors*/
 805        bnx2x_vf_semi_clear_err(bp, abs_vfid);
 806        bnx2x_vf_pglue_clear_err(bp, abs_vfid);
 807
 808        /* internal vf-enable - pretend */
 809        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
 810        DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
 811        bnx2x_vf_enable_internal(bp, true);
 812        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 813}
 814
 815static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
 816{
 817        /* Reset vf in IGU  interrupts are still disabled */
 818        bnx2x_vf_igu_reset(bp, vf);
 819
 820        /* pretend to enable the vf with the PBF */
 821        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 822        REG_WR(bp, PBF_REG_DISABLE_VF, 0);
 823        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 824}
 825
 826static uint8_t bnx2x_vf_is_pcie_pending(struct bnx2x *bp, uint8_t abs_vfid)
 827{
 828        struct pci_device *dev;
 829        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 830
 831        if (!vf)
 832                return false;
 833
 834        dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
 835        if (dev)
 836                return bnx2x_is_pcie_pending(dev);
 837        return false;
 838}
 839
 840int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, uint8_t abs_vfid)
 841{
 842        /* Verify no pending pci transactions */
 843        if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
 844                BNX2X_ERR("PCIE Transactions still pending\n");
 845
 846        return 0;
 847}
 848
 849static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
 850                                          struct bnx2x_virtf *vf,
 851                                          int new)
 852{
 853        int num = vf_vlan_rules_cnt(vf);
 854        int diff = new - num;
 855        bool rc = true;
 856
 857        DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
 858           vf->abs_vfid, new, num);
 859
 860        if (diff > 0)
 861                rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
 862        else if (diff < 0)
 863                rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
 864
 865        if (rc)
 866                vf_vlan_rules_cnt(vf) = new;
 867        else
 868                DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
 869                   vf->abs_vfid);
 870}
 871
 872/* must be called after the number of PF queues and the number of VFs are
 873 * both known
 874 */
 875static void
 876bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 877{
 878        struct vf_pf_resc_request *resc = &vf->alloc_resc;
 879        uint16_t vlan_count = 0;
 880
 881        /* will be set only during VF-ACQUIRE */
 882        resc->num_rxqs = 0;
 883        resc->num_txqs = 0;
 884
 885        /* no credit calculations for macs (just yet) */
 886        resc->num_mac_filters = 1;
 887
 888        /* divvy up vlan rules */
 889        bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
 890        vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
 891        vlan_count = 1 << LOG2_UP(vlan_count);
 892        bnx2x_iov_re_set_vlan_filters(bp, vf,
 893                                      vlan_count / BNX2X_NR_VIRTFN(bp));
 894
 895        /* no real limitation */
 896        resc->num_mc_filters = 0;
 897
 898        /* num_sbs already set */
 899        resc->num_sbs = vf->sb_count;
 900}
 901
 902/* FLR routines: */
 903static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 904{
 905        /* reset the state variables */
 906        bnx2x_iov_static_resc(bp, vf);
 907        vf->state = VF_FREE;
 908}
 909
 910static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
 911{
 912        uint32_t poll_cnt = bnx2x_flr_clnup_poll_count(bp);
 913
 914        /* DQ usage counter */
 915        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 916        bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
 917                                        "DQ VF usage counter timed out",
 918                                        poll_cnt);
 919        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 920
 921        /* FW cleanup command - poll for the results */
 922        if (bnx2x_send_final_clnup(bp, (uint8_t)FW_VF_HANDLE(vf->abs_vfid),
 923                                   poll_cnt))
 924                BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
 925
 926        /* verify TX hw is flushed */
 927        bnx2x_tx_hw_flushed(bp, poll_cnt);
 928}
 929
 930static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 931{
 932        int rc, i;
 933
 934        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 935
 936        /* the cleanup operations are valid if and only if the VF
 937         * was first acquired.
 938         */
 939        for (i = 0; i < vf_rxq_count(vf); i++) {
 940                rc = bnx2x_vf_queue_flr(bp, vf, i);
 941                if (rc)
 942                        goto out;
 943        }
 944
 945        /* remove multicasts */
 946        bnx2x_vf_mcast(bp, vf, NULL, 0, true);
 947
 948        /* dispatch final cleanup and wait for HW queues to flush */
 949        bnx2x_vf_flr_clnup_hw(bp, vf);
 950
 951        /* release VF resources */
 952        bnx2x_vf_free_resc(bp, vf);
 953
 954        /* re-open the mailbox */
 955        bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
 956        return;
 957out:
 958        BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
 959                  vf->abs_vfid, i, rc);
 960}
 961
 962static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
 963{
 964        struct bnx2x_virtf *vf;
 965        int i;
 966
 967        for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
 968                /* VF should be RESET & in FLR cleanup states */
 969                if (bnx2x_vf(bp, i, state) != VF_RESET ||
 970                    !bnx2x_vf(bp, i, flr_clnup_stage))
 971                        continue;
 972
 973                DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
 974                   i, BNX2X_NR_VIRTFN(bp));
 975
 976                vf = BP_VF(bp, i);
 977
 978                /* lock the vf pf channel */
 979                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 980
 981                /* invoke the VF FLR SM */
 982                bnx2x_vf_flr(bp, vf);
 983
 984                /* mark the VF to be ACKED and continue */
 985                vf->flr_clnup_stage = false;
 986                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 987        }
 988
 989        /* Acknowledge the handled VFs.
 990         * we are acknowledge all the vfs which an flr was requested for, even
 991         * if amongst them there are such that we never opened, since the mcp
 992         * will interrupt us immediately again if we only ack some of the bits,
 993         * resulting in an endless loop. This can happen for example in KVM
 994         * where an 'all ones' flr request is sometimes given by hyper visor
 995         */
 996        DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
 997           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 998        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 999                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1000                          bp->vfdb->flrd_vfs[i]);
1001
1002        bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1003
1004        /* clear the acked bits - better yet if the MCP implemented
1005         * write to clear semantics
1006         */
1007        for (i = 0; i < FLRD_VFS_DWORDS; i++)
1008                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1009}
1010
1011void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1012{
1013        int i;
1014
1015        /* Read FLR'd VFs */
1016        for (i = 0; i < FLRD_VFS_DWORDS; i++)
1017                bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1018
1019        DP(BNX2X_MSG_MCP,
1020           "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1021           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1022
1023        for_each_vf(bp, i) {
1024                struct bnx2x_virtf *vf = BP_VF(bp, i);
1025                uint32_t reset = 0;
1026
1027                if (vf->abs_vfid < 32)
1028                        reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1029                else
1030                        reset = bp->vfdb->flrd_vfs[1] &
1031                                (1 << (vf->abs_vfid - 32));
1032
1033                if (reset) {
1034                        /* set as reset and ready for cleanup */
1035                        vf->state = VF_RESET;
1036                        vf->flr_clnup_stage = true;
1037
1038                        DP(BNX2X_MSG_IOV,
1039                           "Initiating Final cleanup for VF %d\n",
1040                           vf->abs_vfid);
1041                }
1042        }
1043
1044        /* do the FLR cleanup for all marked VFs*/
1045        bnx2x_vf_flr_clnup(bp);
1046}
1047
1048/* IOV global initialization routines  */
1049void bnx2x_iov_init_dq(struct bnx2x *bp)
1050{
1051        if (!IS_SRIOV(bp))
1052                return;
1053
1054        /* Set the DQ such that the CID reflect the abs_vfid */
1055        REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1056        REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, LOG2_UP(BNX2X_MAX_NUM_OF_VFS));
1057
1058        /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1059         * the PF L2 queues
1060         */
1061        REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1062
1063        /* The VF window size is the log2 of the max number of CIDs per VF */
1064        REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1065
1066        /* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1067         * the Pf doorbell size although the 2 are independent.
1068         */
1069        REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1070
1071        /* No security checks for now -
1072         * configure single rule (out of 16) mask = 0x1, value = 0x0,
1073         * CID range 0 - 0x1ffff
1074         */
1075        REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1076        REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1077        REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1078        REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1079
1080        /* set the VF doorbell threshold. This threshold represents the amount
1081         * of doorbells allowed in the main DORQ fifo for a specific VF.
1082         */
1083        REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1084}
1085
1086void bnx2x_iov_init_dmae(struct bnx2x *bp)
1087{
1088        if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1089                REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1090}
1091
1092static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1093{
1094        struct pci_device *dev = bp->pdev;
1095        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1096
1097        return dev->bus->number + ((dev->devfn + iov->offset +
1098                                    iov->stride * vfid) >> 8);
1099}
1100
1101static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1102{
1103        struct pci_device *dev = bp->pdev;
1104        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1105
1106        return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1107}
1108
1109static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1110{
1111        int i, n;
1112        struct pci_device *dev = bp->pdev;
1113        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1114
1115        for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1116                uint64_t start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1117                uint32_t size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1118
1119                size /= iov->total;
1120                vf->bars[n].bar = start + size * vf->abs_vfid;
1121                vf->bars[n].size = size;
1122        }
1123}
1124
1125static int bnx2x_ari_enabled(struct pci_device *dev)
1126{
1127        return dev->bus->self && dev->bus->self->ari_enabled;
1128}
1129
1130static int
1131bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1132{
1133        int sb_id;
1134        uint32_t val;
1135        uint8_t fid, current_pf = 0;
1136
1137        /* IGU in normal mode - read CAM */
1138        for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1139                val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1140                if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1141                        continue;
1142                fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1143                if (fid & IGU_FID_ENCODE_IS_PF)
1144                        current_pf = fid & IGU_FID_PF_NUM_MASK;
1145                else if (current_pf == BP_FUNC(bp))
1146                        bnx2x_vf_set_igu_info(bp, sb_id,
1147                                              (fid & IGU_FID_VF_NUM_MASK));
1148                DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1149                   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1150                   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1151                   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1152                   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1153        }
1154        DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1155        return BP_VFDB(bp)->vf_sbs_pool;
1156}
1157
1158static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1159{
1160        if (bp->vfdb) {
1161                kfree(bp->vfdb->vfqs);
1162                kfree(bp->vfdb->vfs);
1163                kfree(bp->vfdb);
1164        }
1165        bp->vfdb = NULL;
1166}
1167
1168static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1169{
1170        int pos;
1171        struct pci_device *dev = bp->pdev;
1172
1173        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1174        if (!pos) {
1175                BNX2X_ERR("failed to find SRIOV capability in device\n");
1176                return -ENODEV;
1177        }
1178
1179        iov->pos = pos;
1180        DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1181        pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1182        pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1183        pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1184        pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1185        pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1186        pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1187        pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1188        pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1189
1190        return 0;
1191}
1192
1193static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1194{
1195        uint32_t val;
1196
1197        /* read the SRIOV capability structure
1198         * The fields can be read via configuration read or
1199         * directly from the device (starting at offset PCICFG_OFFSET)
1200         */
1201        if (bnx2x_sriov_pci_cfg_info(bp, iov))
1202                return -ENODEV;
1203
1204        /* get the number of SRIOV bars */
1205        iov->nres = 0;
1206
1207        /* read the first_vfid */
1208        val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1209        iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1210                               * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1211
1212        DP(BNX2X_MSG_IOV,
1213           "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1214           BP_FUNC(bp),
1215           iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1216           iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1217
1218        return 0;
1219}
1220
1221/* must be called after PF bars are mapped */
1222int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1223                       int num_vfs_param)
1224{
1225        int err, i;
1226        struct bnx2x_sriov *iov;
1227        struct pci_device *dev = bp->pdev;
1228
1229        bp->vfdb = NULL;
1230
1231        /* verify is pf */
1232        if (IS_VF(bp))
1233                return 0;
1234
1235        /* verify sriov capability is present in configuration space */
1236        if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1237                return 0;
1238
1239        /* verify chip revision */
1240        if (CHIP_IS_E1x(bp))
1241                return 0;
1242
1243        /* check if SRIOV support is turned off */
1244        if (!num_vfs_param)
1245                return 0;
1246
1247        /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1248        if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1249                BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1250                          BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1251                return 0;
1252        }
1253
1254        /* SRIOV can be enabled only with MSIX */
1255        if (int_mode_param == BNX2X_INT_MODE_MSI ||
1256            int_mode_param == BNX2X_INT_MODE_INTX) {
1257                BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1258                return 0;
1259        }
1260
1261        err = -EIO;
1262        /* verify ari is enabled */
1263        if (!bnx2x_ari_enabled(bp->pdev)) {
1264                BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1265                return 0;
1266        }
1267
1268        /* verify igu is in normal mode */
1269        if (CHIP_INT_MODE_IS_BC(bp)) {
1270                BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1271                return 0;
1272        }
1273
1274        /* allocate the vfs database */
1275        bp->vfdb = kzmalloc(sizeof(*(bp->vfdb)), MEM_WAIT);
1276        if (!bp->vfdb) {
1277                BNX2X_ERR("failed to allocate vf database\n");
1278                err = -ENOMEM;
1279                goto failed;
1280        }
1281
1282        /* get the sriov info - Linux already collected all the pertinent
1283         * information, however the sriov structure is for the private use
1284         * of the pci module. Also we want this information regardless
1285         * of the hyper-visor.
1286         */
1287        iov = &(bp->vfdb->sriov);
1288        err = bnx2x_sriov_info(bp, iov);
1289        if (err)
1290                goto failed;
1291
1292        /* SR-IOV capability was enabled but there are no VFs*/
1293        if (iov->total == 0)
1294                goto failed;
1295
1296        iov->nr_virtfn = MIN_T(uint16_t, iov->total, num_vfs_param);
1297
1298        DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1299           num_vfs_param, iov->nr_virtfn);
1300
1301        /* allocate the vf array */
1302        bp->vfdb->vfs = kzmalloc(sizeof(struct bnx2x_virtf) * BNX2X_NR_VIRTFN(bp),
1303                                 MEM_WAIT);
1304        if (!bp->vfdb->vfs) {
1305                BNX2X_ERR("failed to allocate vf array\n");
1306                err = -ENOMEM;
1307                goto failed;
1308        }
1309
1310        /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1311        for_each_vf(bp, i) {
1312                bnx2x_vf(bp, i, index) = i;
1313                bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1314                bnx2x_vf(bp, i, state) = VF_FREE;
1315                mutex_init(&bnx2x_vf(bp, i, op_mutex));
1316                bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1317        }
1318
1319        /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1320        if (!bnx2x_get_vf_igu_cam_info(bp)) {
1321                BNX2X_ERR("No entries in IGU CAM for vfs\n");
1322                err = -EINVAL;
1323                goto failed;
1324        }
1325
1326        /* allocate the queue arrays for all VFs */
1327        bp->vfdb->vfqs = kzmalloc(BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
1328                                  MEM_WAIT);
1329
1330        if (!bp->vfdb->vfqs) {
1331                BNX2X_ERR("failed to allocate vf queue array\n");
1332                err = -ENOMEM;
1333                goto failed;
1334        }
1335
1336        /* Prepare the VFs event synchronization mechanism */
1337        mutex_init(&bp->vfdb->event_mutex);
1338
1339        mutex_init(&bp->vfdb->bulletin_mutex);
1340
1341        return 0;
1342failed:
1343        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1344        __bnx2x_iov_free_vfdb(bp);
1345        return err;
1346}
1347
1348void bnx2x_iov_remove_one(struct bnx2x *bp)
1349{
1350        int vf_idx;
1351
1352        /* if SRIOV is not enabled there's nothing to do */
1353        if (!IS_SRIOV(bp))
1354                return;
1355
1356        bnx2x_disable_sriov(bp);
1357
1358        /* disable access to all VFs */
1359        for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1360                bnx2x_pretend_func(bp,
1361                                   HW_VF_HANDLE(bp,
1362                                                bp->vfdb->sriov.first_vf_in_pf +
1363                                                vf_idx));
1364                DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1365                   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1366                bnx2x_vf_enable_internal(bp, 0);
1367                bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1368        }
1369
1370        /* free vf database */
1371        __bnx2x_iov_free_vfdb(bp);
1372}
1373
1374void bnx2x_iov_free_mem(struct bnx2x *bp)
1375{
1376        int i;
1377
1378        if (!IS_SRIOV(bp))
1379                return;
1380
1381        /* free vfs hw contexts */
1382        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1383                struct hw_dma *cxt = &bp->vfdb->context[i];
1384                BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1385        }
1386
1387        BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1388                       BP_VFDB(bp)->sp_dma.mapping,
1389                       BP_VFDB(bp)->sp_dma.size);
1390
1391        BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1392                       BP_VF_MBX_DMA(bp)->mapping,
1393                       BP_VF_MBX_DMA(bp)->size);
1394
1395        BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1396                       BP_VF_BULLETIN_DMA(bp)->mapping,
1397                       BP_VF_BULLETIN_DMA(bp)->size);
1398}
1399
1400int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1401{
1402        size_t tot_size;
1403        int i, rc = 0;
1404
1405        if (!IS_SRIOV(bp))
1406                return rc;
1407
1408        /* allocate vfs hw contexts */
1409        tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1410                BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1411
1412        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1413                struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1414                cxt->size = MIN_T(size_t, tot_size, CDU_ILT_PAGE_SZ);
1415
1416                if (cxt->size) {
1417                        cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1418                        if (!cxt->addr)
1419                                goto alloc_mem_err;
1420                } else {
1421                        cxt->addr = NULL;
1422                        cxt->mapping = 0;
1423                }
1424                tot_size -= cxt->size;
1425        }
1426
1427        /* allocate vfs ramrods dma memory - client_init and set_mac */
1428        tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1429        BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1430                                                   tot_size);
1431        if (!BP_VFDB(bp)->sp_dma.addr)
1432                goto alloc_mem_err;
1433        BP_VFDB(bp)->sp_dma.size = tot_size;
1434
1435        /* allocate mailboxes */
1436        tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1437        BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1438                                                  tot_size);
1439        if (!BP_VF_MBX_DMA(bp)->addr)
1440                goto alloc_mem_err;
1441
1442        BP_VF_MBX_DMA(bp)->size = tot_size;
1443
1444        /* allocate local bulletin boards */
1445        tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1446        BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1447                                                       tot_size);
1448        if (!BP_VF_BULLETIN_DMA(bp)->addr)
1449                goto alloc_mem_err;
1450
1451        BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1452
1453        return 0;
1454
1455alloc_mem_err:
1456        return -ENOMEM;
1457}
1458
1459static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1460                           struct bnx2x_vf_queue *q)
1461{
1462        uint8_t cl_id = vfq_cl_id(vf, q);
1463        uint8_t func_id = FW_VF_HANDLE(vf->abs_vfid);
1464        unsigned long q_type = 0;
1465
1466        set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1467        set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1468
1469        /* Queue State object */
1470        bnx2x_init_queue_obj(bp, &q->sp_obj,
1471                             cl_id, &q->cid, 1, func_id,
1472                             bnx2x_vf_sp(bp, vf, q_data),
1473                             bnx2x_vf_sp_map(bp, vf, q_data),
1474                             q_type);
1475
1476        /* sp indication is set only when vlan/mac/etc. are initialized */
1477        q->sp_initialized = false;
1478
1479        DP(BNX2X_MSG_IOV,
1480           "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1481           vf->abs_vfid, q->sp_obj.func_id, q->cid);
1482}
1483
1484static int bnx2x_max_speed_cap(struct bnx2x *bp)
1485{
1486        uint32_t supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1487
1488        if (supported &
1489            (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1490                return 20000;
1491
1492        return 10000; /* assume lowest supported speed is 10G */
1493}
1494
1495int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1496{
1497        struct bnx2x_link_report_data *state = &bp->last_reported_link;
1498        struct pf_vf_bulletin_content *bulletin;
1499        struct bnx2x_virtf *vf;
1500        bool update = true;
1501        int rc = 0;
1502
1503        /* sanity and init */
1504        rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1505        if (rc)
1506                return rc;
1507
1508        qlock(&bp->vfdb->bulletin_mutex);
1509
1510        if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1511                bulletin->valid_bitmap |= 1 << LINK_VALID;
1512
1513                bulletin->link_speed = state->line_speed;
1514                bulletin->link_flags = 0;
1515                if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1516                             &state->link_report_flags))
1517                        bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1518                if (test_bit(BNX2X_LINK_REPORT_FD,
1519                             &state->link_report_flags))
1520                        bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1521                if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1522                             &state->link_report_flags))
1523                        bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1524                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1525                             &state->link_report_flags))
1526                        bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1527        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1528                   !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1529                bulletin->valid_bitmap |= 1 << LINK_VALID;
1530                bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1531        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1532                   (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1533                bulletin->valid_bitmap |= 1 << LINK_VALID;
1534                bulletin->link_speed = bnx2x_max_speed_cap(bp);
1535                bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1536        } else {
1537                update = false;
1538        }
1539
1540        if (update) {
1541                DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1542                   "vf %d mode %u speed %d flags %x\n", idx,
1543                   vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1544
1545                /* Post update on VF's bulletin board */
1546                rc = bnx2x_post_vf_bulletin(bp, idx);
1547                if (rc) {
1548                        BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1549                        goto out;
1550                }
1551        }
1552
1553out:
1554        qunlock(&bp->vfdb->bulletin_mutex);
1555        return rc;
1556}
1557
1558int bnx2x_set_vf_link_state(struct ether *dev, int idx, int link_state)
1559{
1560        struct bnx2x *bp = netdev_priv(dev);
1561        struct bnx2x_virtf *vf = BP_VF(bp, idx);
1562
1563        if (!vf)
1564                return -EINVAL;
1565
1566        if (vf->link_cfg == link_state)
1567                return 0; /* nothing todo */
1568
1569        vf->link_cfg = link_state;
1570
1571        return bnx2x_iov_link_update_vf(bp, idx);
1572}
1573
1574void bnx2x_iov_link_update(struct bnx2x *bp)
1575{
1576        int vfid;
1577
1578        if (!IS_SRIOV(bp))
1579                return;
1580
1581        for_each_vf(bp, vfid)
1582                bnx2x_iov_link_update_vf(bp, vfid);
1583}
1584
1585/* called by bnx2x_nic_load */
1586int bnx2x_iov_nic_init(struct bnx2x *bp)
1587{
1588        int vfid;
1589
1590        if (!IS_SRIOV(bp)) {
1591                DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1592                return 0;
1593        }
1594
1595        DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1596
1597        /* let FLR complete ... */
1598        kthread_usleep(1000 * 100);
1599
1600        /* initialize vf database */
1601        for_each_vf(bp, vfid) {
1602                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1603
1604                int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1605                        BNX2X_CIDS_PER_VF;
1606
1607                union cdu_context *base_cxt = (union cdu_context *)
1608                        BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1609                        (base_vf_cid & (ILT_PAGE_CIDS-1));
1610
1611                DP(BNX2X_MSG_IOV,
1612                   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1613                   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1614                   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1615
1616                /* init statically provisioned resources */
1617                bnx2x_iov_static_resc(bp, vf);
1618
1619                /* queues are initialized during VF-ACQUIRE */
1620                vf->filter_state = 0;
1621                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1622
1623                /*  init mcast object - This object will be re-initialized
1624                 *  during VF-ACQUIRE with the proper cl_id and cid.
1625                 *  It needs to be initialized here so that it can be safely
1626                 *  handled by a subsequent FLR flow.
1627                 */
1628                vf->mcast_list_len = 0;
1629                bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1630                                     0xFF, 0xFF, 0xFF,
1631                                     bnx2x_vf_sp(bp, vf, mcast_rdata),
1632                                     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1633                                     BNX2X_FILTER_MCAST_PENDING,
1634                                     &vf->filter_state,
1635                                     BNX2X_OBJ_TYPE_RX_TX);
1636
1637                /* set the mailbox message addresses */
1638                BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1639                        (((uint8_t *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1640                        MBX_MSG_ALIGNED_SIZE);
1641
1642                BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1643                        vfid * MBX_MSG_ALIGNED_SIZE;
1644
1645                /* Enable vf mailbox */
1646                bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1647        }
1648
1649        /* Final VF init */
1650        for_each_vf(bp, vfid) {
1651                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1652
1653                /* fill in the BDF and bars */
1654                vf->bus = bnx2x_vf_bus(bp, vfid);
1655                vf->devfn = bnx2x_vf_devfn(bp, vfid);
1656                bnx2x_vf_set_bars(bp, vf);
1657
1658                DP(BNX2X_MSG_IOV,
1659                   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1660                   vf->abs_vfid, vf->bus, vf->devfn,
1661                   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1662                   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1663                   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1664        }
1665
1666        return 0;
1667}
1668
1669/* called by bnx2x_chip_cleanup */
1670int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1671{
1672        int i;
1673
1674        if (!IS_SRIOV(bp))
1675                return 0;
1676
1677        /* release all the VFs */
1678        for_each_vf(bp, i)
1679                bnx2x_vf_release(bp, BP_VF(bp, i));
1680
1681        return 0;
1682}
1683
1684/* called by bnx2x_init_hw_func, returns the next ilt line */
1685int bnx2x_iov_init_ilt(struct bnx2x *bp, uint16_t line)
1686{
1687        int i;
1688        struct bnx2x_ilt *ilt = BP_ILT(bp);
1689
1690        if (!IS_SRIOV(bp))
1691                return line;
1692
1693        /* set vfs ilt lines */
1694        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1695                struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1696
1697                ilt->lines[line+i].page = hw_cxt->addr;
1698                ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1699                ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1700        }
1701        return line + i;
1702}
1703
1704static uint8_t bnx2x_iov_is_vf_cid(struct bnx2x *bp, uint16_t cid)
1705{
1706        return ((cid >= BNX2X_FIRST_VF_CID) &&
1707                ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1708}
1709
1710static
1711void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1712                                        struct bnx2x_vf_queue *vfq,
1713                                        union event_ring_elem *elem)
1714{
1715        unsigned long ramrod_flags = 0;
1716        int rc = 0;
1717
1718        /* Always push next commands out, don't wait here */
1719        set_bit(RAMROD_CONT, &ramrod_flags);
1720
1721        switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
1722        case BNX2X_FILTER_MAC_PENDING:
1723                rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1724                                           &ramrod_flags);
1725                break;
1726        case BNX2X_FILTER_VLAN_PENDING:
1727                rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1728                                            &ramrod_flags);
1729                break;
1730        default:
1731                BNX2X_ERR("Unsupported classification command: %d\n",
1732                          elem->message.data.eth_event.echo);
1733                return;
1734        }
1735        if (rc < 0)
1736                BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1737        else if (rc > 0)
1738                DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1739}
1740
1741static
1742void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1743                               struct bnx2x_virtf *vf)
1744{
1745        struct bnx2x_mcast_ramrod_params rparam = {NULL};
1746        int rc;
1747
1748        rparam.mcast_obj = &vf->mcast_obj;
1749        vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1750
1751        /* If there are pending mcast commands - send them */
1752        if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1753                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1754                if (rc < 0)
1755                        BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1756                                  rc);
1757        }
1758}
1759
1760static
1761void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1762                                 struct bnx2x_virtf *vf)
1763{
1764        cmb();
1765        clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1766        cmb();
1767}
1768
1769static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1770                                           struct bnx2x_virtf *vf)
1771{
1772        vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1773}
1774
1775int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1776{
1777        struct bnx2x_virtf *vf;
1778        int qidx = 0, abs_vfid;
1779        uint8_t opcode;
1780        uint16_t cid = 0xffff;
1781
1782        if (!IS_SRIOV(bp))
1783                return 1;
1784
1785        /* first get the cid - the only events we handle here are cfc-delete
1786         * and set-mac completion
1787         */
1788        opcode = elem->message.opcode;
1789
1790        switch (opcode) {
1791        case EVENT_RING_OPCODE_CFC_DEL:
1792                cid = SW_CID((__force __le32)
1793                             elem->message.data.cfc_del_event.cid);
1794                DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1795                break;
1796        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1797        case EVENT_RING_OPCODE_MULTICAST_RULES:
1798        case EVENT_RING_OPCODE_FILTERS_RULES:
1799        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1800                cid = (elem->message.data.eth_event.echo &
1801                       BNX2X_SWCID_MASK);
1802                DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1803                break;
1804        case EVENT_RING_OPCODE_VF_FLR:
1805                abs_vfid = elem->message.data.vf_flr_event.vf_id;
1806                DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1807                   abs_vfid);
1808                goto get_vf;
1809        case EVENT_RING_OPCODE_MALICIOUS_VF:
1810                abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1811                BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1812                          abs_vfid,
1813                          elem->message.data.malicious_vf_event.err_id);
1814                goto get_vf;
1815        default:
1816                return 1;
1817        }
1818
1819        /* check if the cid is the VF range */
1820        if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1821                DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1822                return 1;
1823        }
1824
1825        /* extract vf and rxq index from vf_cid - relies on the following:
1826         * 1. vfid on cid reflects the true abs_vfid
1827         * 2. The max number of VFs (per path) is 64
1828         */
1829        qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1830        abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1831get_vf:
1832        vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1833
1834        if (!vf) {
1835                BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1836                          cid, abs_vfid);
1837                return 0;
1838        }
1839
1840        switch (opcode) {
1841        case EVENT_RING_OPCODE_CFC_DEL:
1842                DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1843                   vf->abs_vfid, qidx);
1844                vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1845                                                       &vfq_get(vf,
1846                                                                qidx)->sp_obj,
1847                                                       BNX2X_Q_CMD_CFC_DEL);
1848                break;
1849        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1850                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1851                   vf->abs_vfid, qidx);
1852                bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1853                break;
1854        case EVENT_RING_OPCODE_MULTICAST_RULES:
1855                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1856                   vf->abs_vfid, qidx);
1857                bnx2x_vf_handle_mcast_eqe(bp, vf);
1858                break;
1859        case EVENT_RING_OPCODE_FILTERS_RULES:
1860                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1861                   vf->abs_vfid, qidx);
1862                bnx2x_vf_handle_filters_eqe(bp, vf);
1863                break;
1864        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1865                DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1866                   vf->abs_vfid, qidx);
1867                bnx2x_vf_handle_rss_update_eqe(bp, vf);
1868        case EVENT_RING_OPCODE_VF_FLR:
1869        case EVENT_RING_OPCODE_MALICIOUS_VF:
1870                /* Do nothing for now */
1871                return 0;
1872        }
1873
1874        return 0;
1875}
1876
1877static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1878{
1879        /* extract the vf from vf_cid - relies on the following:
1880         * 1. vfid on cid reflects the true abs_vfid
1881         * 2. The max number of VFs (per path) is 64
1882         */
1883        int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1884        return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1885}
1886
1887void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1888                                struct bnx2x_queue_sp_obj **q_obj)
1889{
1890        struct bnx2x_virtf *vf;
1891
1892        if (!IS_SRIOV(bp))
1893                return;
1894
1895        vf = bnx2x_vf_by_cid(bp, vf_cid);
1896
1897        if (vf) {
1898                /* extract queue index from vf_cid - relies on the following:
1899                 * 1. vfid on cid reflects the true abs_vfid
1900                 * 2. The max number of VFs (per path) is 64
1901                 */
1902                int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1903                *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1904        } else {
1905                BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1906        }
1907}
1908
1909void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1910{
1911        int i;
1912        int first_queue_query_index, num_queues_req;
1913        dma_addr_t cur_data_offset;
1914        struct stats_query_entry *cur_query_entry;
1915        uint8_t stats_count = 0;
1916        bool is_fcoe = false;
1917
1918        if (!IS_SRIOV(bp))
1919                return;
1920
1921        if (!NO_FCOE(bp))
1922                is_fcoe = true;
1923
1924        /* fcoe adds one global request and one queue request */
1925        num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1926        first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1927                (is_fcoe ? 0 : 1);
1928
1929        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1930               "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1931               BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1932               first_queue_query_index + num_queues_req);
1933
1934        cur_data_offset = bp->fw_stats_data_mapping +
1935                offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1936                num_queues_req * sizeof(struct per_queue_stats);
1937
1938        cur_query_entry = &bp->fw_stats_req->
1939                query[first_queue_query_index + num_queues_req];
1940
1941        for_each_vf(bp, i) {
1942                int j;
1943                struct bnx2x_virtf *vf = BP_VF(bp, i);
1944
1945                if (vf->state != VF_ENABLED) {
1946                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1947                               "vf %d not enabled so no stats for it\n",
1948                               vf->abs_vfid);
1949                        continue;
1950                }
1951
1952                DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
1953                for_each_vfq(vf, j) {
1954                        struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1955
1956                        dma_addr_t q_stats_addr =
1957                                vf->fw_stat_map + j * vf->stats_stride;
1958
1959                        /* collect stats fro active queues only */
1960                        if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1961                            BNX2X_Q_LOGICAL_STATE_STOPPED)
1962                                continue;
1963
1964                        /* create stats query entry for this queue */
1965                        cur_query_entry->kind = STATS_TYPE_QUEUE;
1966                        cur_query_entry->index = vfq_stat_id(vf, rxq);
1967                        cur_query_entry->funcID =
1968                                cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1969                        cur_query_entry->address.hi =
1970                                cpu_to_le32(U64_HI(q_stats_addr));
1971                        cur_query_entry->address.lo =
1972                                cpu_to_le32(U64_LO(q_stats_addr));
1973                        DP(BNX2X_MSG_IOV,
1974                           "added address %x %x for vf %d queue %d client %d\n",
1975                           cur_query_entry->address.hi,
1976                           cur_query_entry->address.lo, cur_query_entry->funcID,
1977                           j, cur_query_entry->index);
1978                        cur_query_entry++;
1979                        cur_data_offset += sizeof(struct per_queue_stats);
1980                        stats_count++;
1981
1982                        /* all stats are coalesced to the leading queue */
1983                        if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1984                                break;
1985                }
1986        }
1987        bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1988}
1989
1990/* VF API helpers */
1991static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, uint8_t abs_vfid,
1992                                uint8_t qid,
1993                                uint8_t enable)
1994{
1995        uint32_t reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1996        uint32_t val = enable ? (abs_vfid | (1 << 6)) : 0;
1997
1998        REG_WR(bp, reg, val);
1999}
2000
2001static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2002{
2003        int i;
2004
2005        for_each_vfq(vf, i)
2006                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2007                                    vfq_qzone_id(vf, vfq_get(vf, i)), false);
2008}
2009
2010static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2011{
2012        uint32_t val;
2013
2014        /* clear the VF configuration - pretend */
2015        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2016        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2017        val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2018                 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2019        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2020        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2021}
2022
2023uint8_t bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2024{
2025        return MIN_T(uint8_t,
2026                     MIN_T(uint8_t, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2027                     BNX2X_VF_MAX_QUEUES);
2028}
2029
2030static
2031int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2032                            struct vf_pf_resc_request *req_resc)
2033{
2034        uint8_t rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2035        uint8_t txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2036
2037        /* Save a vlan filter for the Hypervisor */
2038        return ((req_resc->num_rxqs <= rxq_cnt) &&
2039                (req_resc->num_txqs <= txq_cnt) &&
2040                (req_resc->num_sbs <= vf_sb_count(vf))   &&
2041                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2042                (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
2043}
2044
2045/* CORE VF API */
2046int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2047                     struct vf_pf_resc_request *resc)
2048{
2049        int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2050                BNX2X_CIDS_PER_VF;
2051
2052        union cdu_context *base_cxt = (union cdu_context *)
2053                BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2054                (base_vf_cid & (ILT_PAGE_CIDS-1));
2055        int i;
2056
2057        /* if state is 'acquired' the VF was not released or FLR'd, in
2058         * this case the returned resources match the acquired already
2059         * acquired resources. Verify that the requested numbers do
2060         * not exceed the already acquired numbers.
2061         */
2062        if (vf->state == VF_ACQUIRED) {
2063                DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2064                   vf->abs_vfid);
2065
2066                if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2067                        BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2068                                  vf->abs_vfid);
2069                        return -EINVAL;
2070                }
2071                return 0;
2072        }
2073
2074        /* Otherwise vf state must be 'free' or 'reset' */
2075        if (vf->state != VF_FREE && vf->state != VF_RESET) {
2076                BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2077                          vf->abs_vfid, vf->state);
2078                return -EINVAL;
2079        }
2080
2081        /* static allocation:
2082         * the global maximum number are fixed per VF. Fail the request if
2083         * requested number exceed these globals
2084         */
2085        if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2086                DP(BNX2X_MSG_IOV,
2087                   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2088                /* set the max resource in the vf */
2089                return -ENOMEM;
2090        }
2091
2092        /* Set resources counters - 0 request means max available */
2093        vf_sb_count(vf) = resc->num_sbs;
2094        vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2095        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2096        if (resc->num_mac_filters)
2097                vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2098        /* Add an additional vlan filter credit for the hypervisor */
2099        bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
2100
2101        DP(BNX2X_MSG_IOV,
2102           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2103           vf_sb_count(vf), vf_rxq_count(vf),
2104           vf_txq_count(vf), vf_mac_rules_cnt(vf),
2105           vf_vlan_rules_visible_cnt(vf));
2106
2107        /* Initialize the queues */
2108        if (!vf->vfqs) {
2109                DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2110                return -EINVAL;
2111        }
2112
2113        for_each_vfq(vf, i) {
2114                struct bnx2x_vf_queue *q = vfq_get(vf, i);
2115
2116                if (!q) {
2117                        BNX2X_ERR("q number %d was not allocated\n", i);
2118                        return -EINVAL;
2119                }
2120
2121                q->index = i;
2122                q->cxt = &((base_cxt + i)->eth);
2123                q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2124
2125                DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2126                   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2127
2128                /* init SP objects */
2129                bnx2x_vfq_init(bp, vf, q);
2130        }
2131        vf->state = VF_ACQUIRED;
2132        return 0;
2133}
2134
2135int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2136{
2137        struct bnx2x_func_init_params func_init = {0};
2138        uint16_t flags = 0;
2139        int i;
2140
2141        /* the sb resources are initialized at this point, do the
2142         * FW/HW initializations
2143         */
2144        for_each_vf_sb(vf, i)
2145                bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2146                              vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2147
2148        /* Sanity checks */
2149        if (vf->state != VF_ACQUIRED) {
2150                DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2151                   vf->abs_vfid, vf->state);
2152                return -EINVAL;
2153        }
2154
2155        /* let FLR complete ... */
2156        kthread_usleep(1000 * 100);
2157
2158        /* FLR cleanup epilogue */
2159        if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2160                return -EBUSY;
2161
2162        /* reset IGU VF statistics: MSIX */
2163        REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2164
2165        /* vf init */
2166        if (vf->cfg_flags & VF_CFG_STATS)
2167                flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2168
2169        if (vf->cfg_flags & VF_CFG_TPA)
2170                flags |= FUNC_FLG_TPA;
2171
2172        if (is_vf_multi(vf))
2173                flags |= FUNC_FLG_RSS;
2174
2175        /* function setup */
2176        func_init.func_flgs = flags;
2177        func_init.pf_id = BP_FUNC(bp);
2178        func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2179        func_init.fw_stat_map = vf->fw_stat_map;
2180        func_init.spq_map = vf->spq_map;
2181        func_init.spq_prod = 0;
2182        bnx2x_func_init(bp, &func_init);
2183
2184        /* Enable the vf */
2185        bnx2x_vf_enable_access(bp, vf->abs_vfid);
2186        bnx2x_vf_enable_traffic(bp, vf);
2187
2188        /* queue protection table */
2189        for_each_vfq(vf, i)
2190                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2191                                    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2192
2193        vf->state = VF_ENABLED;
2194
2195        /* update vf bulletin board */
2196        bnx2x_post_vf_bulletin(bp, vf->index);
2197
2198        return 0;
2199}
2200
2201struct set_vf_state_cookie {
2202        struct bnx2x_virtf *vf;
2203        uint8_t state;
2204};
2205
2206static void bnx2x_set_vf_state(void *cookie)
2207{
2208        struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2209
2210        p->vf->state = p->state;
2211}
2212
2213int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2214{
2215        int rc = 0, i;
2216
2217        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2218
2219        /* Close all queues */
2220        for (i = 0; i < vf_rxq_count(vf); i++) {
2221                rc = bnx2x_vf_queue_teardown(bp, vf, i);
2222                if (rc)
2223                        goto op_err;
2224        }
2225
2226        /* disable the interrupts */
2227        DP(BNX2X_MSG_IOV, "disabling igu\n");
2228        bnx2x_vf_igu_disable(bp, vf);
2229
2230        /* disable the VF */
2231        DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2232        bnx2x_vf_clr_qtbl(bp, vf);
2233
2234        /* need to make sure there are no outstanding stats ramrods which may
2235         * cause the device to access the VF's stats buffer which it will free
2236         * as soon as we return from the close flow.
2237         */
2238        {
2239                struct set_vf_state_cookie cookie;
2240
2241                cookie.vf = vf;
2242                cookie.state = VF_ACQUIRED;
2243                bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2244        }
2245
2246        DP(BNX2X_MSG_IOV, "set state to acquired\n");
2247
2248        return 0;
2249op_err:
2250        BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2251        return rc;
2252}
2253
2254/* VF release can be called either: 1. The VF was acquired but
2255 * not enabled 2. the vf was enabled or in the process of being
2256 * enabled
2257 */
2258int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2259{
2260        int rc;
2261
2262        DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2263           vf->state == VF_FREE ? "Free" :
2264           vf->state == VF_ACQUIRED ? "Acquired" :
2265           vf->state == VF_ENABLED ? "Enabled" :
2266           vf->state == VF_RESET ? "Reset" :
2267           "Unknown");
2268
2269        switch (vf->state) {
2270        case VF_ENABLED:
2271                rc = bnx2x_vf_close(bp, vf);
2272                if (rc)
2273                        goto op_err;
2274                /* Fallthrough to release resources */
2275        case VF_ACQUIRED:
2276                DP(BNX2X_MSG_IOV, "about to free resources\n");
2277                bnx2x_vf_free_resc(bp, vf);
2278                break;
2279
2280        case VF_FREE:
2281        case VF_RESET:
2282        default:
2283                break;
2284        }
2285        return 0;
2286op_err:
2287        BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2288        return rc;
2289}
2290
2291int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2292                        struct bnx2x_config_rss_params *rss)
2293{
2294        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2295        set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2296        return bnx2x_config_rss(bp, rss);
2297}
2298
2299int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2300                        struct vfpf_tpa_tlv *tlv,
2301                        struct bnx2x_queue_update_tpa_params *params)
2302{
2303        aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2304        struct bnx2x_queue_state_params qstate;
2305        int qid, rc = 0;
2306
2307        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2308
2309        /* Set ramrod params */
2310        memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2311        memcpy(&qstate.params.update_tpa, params,
2312               sizeof(struct bnx2x_queue_update_tpa_params));
2313        qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2314        set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2315
2316        for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2317                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2318                qstate.params.update_tpa.sge_map = sge_addr[qid];
2319                DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2320                   vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2321                   U64_LO(sge_addr[qid]));
2322                rc = bnx2x_queue_state_change(bp, &qstate);
2323                if (rc) {
2324                        BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2325                                  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2326                                  vf->abs_vfid, qid);
2327                        return rc;
2328                }
2329        }
2330
2331        return rc;
2332}
2333
2334/* VF release ~ VF close + VF release-resources
2335 * Release is the ultimate SW shutdown and is called whenever an
2336 * irrecoverable error is encountered.
2337 */
2338int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2339{
2340        int rc;
2341
2342        DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2343        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2344
2345        rc = bnx2x_vf_free(bp, vf);
2346        if (rc)
2347                warn(rc,
2348                     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2349                     vf->abs_vfid, rc);
2350        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2351        return rc;
2352}
2353
2354void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2355                              enum channel_tlvs tlv)
2356{
2357        /* we don't lock the channel for unsupported tlvs */
2358        if (!bnx2x_tlv_supported(tlv)) {
2359                BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2360                return;
2361        }
2362
2363        /* lock the channel */
2364        qlock(&vf->op_mutex);
2365
2366        /* record the locking op */
2367        vf->op_current = tlv;
2368
2369        /* log the lock */
2370        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2371           vf->abs_vfid, tlv);
2372}
2373
2374void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2375                                enum channel_tlvs expected_tlv)
2376{
2377        enum channel_tlvs current_tlv;
2378
2379        if (!vf) {
2380                BNX2X_ERR("VF was %p\n", vf);
2381                return;
2382        }
2383
2384        current_tlv = vf->op_current;
2385
2386        /* we don't unlock the channel for unsupported tlvs */
2387        if (!bnx2x_tlv_supported(expected_tlv))
2388                return;
2389
2390        warn(expected_tlv != vf->op_current,
2391             "lock mismatch: expected %d found %d", expected_tlv,
2392             vf->op_current);
2393
2394        /* record the locking op */
2395        vf->op_current = CHANNEL_TLV_NONE;
2396
2397        /* lock the channel */
2398        qunlock(&vf->op_mutex);
2399
2400        /* log the unlock */
2401        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2402           vf->abs_vfid, current_tlv);
2403}
2404
2405static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2406{
2407        struct bnx2x_queue_state_params q_params;
2408        uint32_t prev_flags;
2409        int i, rc;
2410
2411        /* Verify changes are needed and record current Tx switching state */
2412        prev_flags = bp->flags;
2413        if (enable)
2414                bp->flags |= TX_SWITCHING;
2415        else
2416                bp->flags &= ~TX_SWITCHING;
2417        if (prev_flags == bp->flags)
2418                return 0;
2419
2420        /* Verify state enables the sending of queue ramrods */
2421        if ((bp->state != BNX2X_STATE_OPEN) ||
2422            (bnx2x_get_q_logical_state(bp,
2423                                      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2424             BNX2X_Q_LOGICAL_STATE_ACTIVE))
2425                return 0;
2426
2427        /* send q. update ramrod to configure Tx switching */
2428        memset(&q_params, 0, sizeof(q_params));
2429        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2430        q_params.cmd = BNX2X_Q_CMD_UPDATE;
2431        __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2432                  &q_params.params.update.update_flags);
2433        if (enable)
2434                __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2435                          &q_params.params.update.update_flags);
2436        else
2437                __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2438                            &q_params.params.update.update_flags);
2439
2440        /* send the ramrod on all the queues of the PF */
2441        for_each_eth_queue(bp, i) {
2442                struct bnx2x_fastpath *fp = &bp->fp[i];
2443
2444                /* Set the appropriate Queue object */
2445                q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2446
2447                /* Update the Queue state */
2448                rc = bnx2x_queue_state_change(bp, &q_params);
2449                if (rc) {
2450                        BNX2X_ERR("Failed to configure Tx switching\n");
2451                        return rc;
2452                }
2453        }
2454
2455        DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2456        return 0;
2457}
2458
2459int bnx2x_sriov_configure(struct pci_device *dev, int num_vfs_param)
2460{
2461        struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2462
2463        if (!IS_SRIOV(bp)) {
2464                BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2465                return -EINVAL;
2466        }
2467
2468        DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2469           num_vfs_param, BNX2X_NR_VIRTFN(bp));
2470
2471        /* HW channel is only operational when PF is up */
2472        if (bp->state != BNX2X_STATE_OPEN) {
2473                BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2474                return -EINVAL;
2475        }
2476
2477        /* we are always bound by the total_vfs in the configuration space */
2478        if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2479                BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2480                          num_vfs_param, BNX2X_NR_VIRTFN(bp));
2481                num_vfs_param = BNX2X_NR_VIRTFN(bp);
2482        }
2483
2484        bp->requested_nr_virtfn = num_vfs_param;
2485        if (num_vfs_param == 0) {
2486                bnx2x_set_pf_tx_switching(bp, false);
2487                bnx2x_disable_sriov(bp);
2488                return 0;
2489        } else {
2490                return bnx2x_enable_sriov(bp);
2491        }
2492}
2493
2494#define IGU_ENTRY_SIZE 4
2495
2496int bnx2x_enable_sriov(struct bnx2x *bp)
2497{
2498        int rc = 0, req_vfs = bp->requested_nr_virtfn;
2499        int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2500        uint32_t igu_entry, address;
2501        uint16_t num_vf_queues;
2502
2503        if (req_vfs == 0)
2504                return 0;
2505
2506        first_vf = bp->vfdb->sriov.first_vf_in_pf;
2507
2508        /* statically distribute vf sb pool between VFs */
2509        num_vf_queues = MIN_T(uint16_t, BNX2X_VF_MAX_QUEUES,
2510                              BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2511
2512        /* zero previous values learned from igu cam */
2513        for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2514                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2515
2516                vf->sb_count = 0;
2517                vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2518        }
2519        bp->vfdb->vf_sbs_pool = 0;
2520
2521        /* prepare IGU cam */
2522        sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2523        address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2524        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2525                for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2526                        igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2527                                vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2528                                IGU_REG_MAPPING_MEMORY_VALID;
2529                        DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2530                           sb_idx, vf_idx);
2531                        REG_WR(bp, address, igu_entry);
2532                        sb_idx++;
2533                        address += IGU_ENTRY_SIZE;
2534                }
2535        }
2536
2537        /* Reinitialize vf database according to igu cam */
2538        bnx2x_get_vf_igu_cam_info(bp);
2539
2540        DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2541           BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2542
2543        qcount = 0;
2544        for_each_vf(bp, vf_idx) {
2545                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2546
2547                /* set local queue arrays */
2548                vf->vfqs = &bp->vfdb->vfqs[qcount];
2549                qcount += vf_sb_count(vf);
2550                bnx2x_iov_static_resc(bp, vf);
2551        }
2552
2553        /* prepare msix vectors in VF configuration space - the value in the
2554         * PCI configuration space should be the index of the last entry,
2555         * namely one less than the actual size of the table
2556         */
2557        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2558                bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2559                REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2560                       num_vf_queues - 1);
2561                DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2562                   vf_idx, num_vf_queues - 1);
2563        }
2564        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2565
2566        /* enable sriov. This will probe all the VFs, and consequentially cause
2567         * the "acquire" messages to appear on the VF PF channel.
2568         */
2569        DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2570        bnx2x_disable_sriov(bp);
2571
2572        rc = bnx2x_set_pf_tx_switching(bp, true);
2573        if (rc)
2574                return rc;
2575
2576        rc = pci_enable_sriov(bp->pdev, req_vfs);
2577        if (rc) {
2578                BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2579                return rc;
2580        }
2581        DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2582        return req_vfs;
2583}
2584
2585void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2586{
2587        int vfidx;
2588        struct pf_vf_bulletin_content *bulletin;
2589
2590        DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2591        for_each_vf(bp, vfidx) {
2592        bulletin = BP_VF_BULLETIN(bp, vfidx);
2593                if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
2594                        bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
2595        }
2596}
2597
2598void bnx2x_disable_sriov(struct bnx2x *bp)
2599{
2600        if (pci_vfs_assigned(bp->pdev)) {
2601                DP(BNX2X_MSG_IOV,
2602                   "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2603                return;
2604        }
2605
2606        pci_disable_sriov(bp->pdev);
2607}
2608
2609static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2610                            struct bnx2x_virtf **vf,
2611                            struct pf_vf_bulletin_content **bulletin,
2612                            bool test_queue)
2613{
2614        if (bp->state != BNX2X_STATE_OPEN) {
2615                BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2616                return -EINVAL;
2617        }
2618
2619        if (!IS_SRIOV(bp)) {
2620                BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2621                return -EINVAL;
2622        }
2623
2624        if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2625                BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2626                          vfidx, BNX2X_NR_VIRTFN(bp));
2627                return -EINVAL;
2628        }
2629
2630        /* init members */
2631        *vf = BP_VF(bp, vfidx);
2632        *bulletin = BP_VF_BULLETIN(bp, vfidx);
2633
2634        if (!*vf) {
2635                BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2636                return -EINVAL;
2637        }
2638
2639        if (test_queue && !(*vf)->vfqs) {
2640                BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2641                          vfidx);
2642                return -EINVAL;
2643        }
2644
2645        if (!*bulletin) {
2646                BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2647                          vfidx);
2648                return -EINVAL;
2649        }
2650
2651        return 0;
2652}
2653
2654int bnx2x_get_vf_config(struct ether *dev, int vfidx,
2655                        struct ifla_vf_info *ivi)
2656{
2657        struct bnx2x *bp = netdev_priv(dev);
2658        struct bnx2x_virtf *vf = NULL;
2659        struct pf_vf_bulletin_content *bulletin = NULL;
2660        struct bnx2x_vlan_mac_obj *mac_obj;
2661        struct bnx2x_vlan_mac_obj *vlan_obj;
2662        int rc;
2663
2664        /* sanity and init */
2665        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2666        if (rc)
2667                return rc;
2668
2669        mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2670        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2671        if (!mac_obj || !vlan_obj) {
2672                BNX2X_ERR("VF partially initialized\n");
2673                return -EINVAL;
2674        }
2675
2676        ivi->vf = vfidx;
2677        ivi->qos = 0;
2678        ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2679        ivi->min_tx_rate = 0;
2680        ivi->spoofchk = 1; /*always enabled */
2681        if (vf->state == VF_ENABLED) {
2682                /* mac and vlan are in vlan_mac objects */
2683                if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2684                        mac_obj->get_n_elements(bp, mac_obj, 1,
2685                                                (uint8_t *)&ivi->mac,
2686                                                0, Eaddrlen);
2687                        vlan_obj->get_n_elements(bp, vlan_obj, 1,
2688                                                 (uint8_t *)&ivi->vlan, 0,
2689                                                 VLAN_HLEN);
2690                }
2691        } else {
2692                qlock(&bp->vfdb->bulletin_mutex);
2693                /* mac */
2694                if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2695                        /* mac configured by ndo so its in bulletin board */
2696                        memcpy(&ivi->mac, bulletin->mac, Eaddrlen);
2697                else
2698                        /* function has not been loaded yet. Show mac as 0s */
2699                        memset(&ivi->mac, 0, Eaddrlen);
2700
2701                /* vlan */
2702                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2703                        /* vlan configured by ndo so its in bulletin board */
2704                        memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2705                else
2706                        /* function has not been loaded yet. Show vlans as 0s */
2707                        memset(&ivi->vlan, 0, VLAN_HLEN);
2708
2709                qunlock(&bp->vfdb->bulletin_mutex);
2710        }
2711
2712        return 0;
2713}
2714
2715/* New mac for VF. Consider these cases:
2716 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2717 *    supply at acquire.
2718 * 2. VF has already been acquired but has not yet initialized - store in local
2719 *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
2720 *    will configure this mac when it is ready.
2721 * 3. VF has already initialized but has not yet setup a queue - post the new
2722 *    mac on VF's bulletin board right now. VF will configure this mac when it
2723 *    is ready.
2724 * 4. VF has already set a queue - delete any macs already configured for this
2725 *    queue and manually config the new mac.
2726 * In any event, once this function has been called refuse any attempts by the
2727 * VF to configure any mac for itself except for this mac. In case of a race
2728 * where the VF fails to see the new post on its bulletin board before sending a
2729 * mac configuration request, the PF will simply fail the request and VF can try
2730 * again after consulting its bulletin board.
2731 */
2732int bnx2x_set_vf_mac(struct ether *dev, int vfidx, uint8_t *mac)
2733{
2734        struct bnx2x *bp = netdev_priv(dev);
2735        int rc, q_logical_state;
2736        struct bnx2x_virtf *vf = NULL;
2737        struct pf_vf_bulletin_content *bulletin = NULL;
2738
2739        if (!is_valid_ether_addr(mac)) {
2740                BNX2X_ERR("mac address invalid\n");
2741                return -EINVAL;
2742        }
2743
2744        /* sanity and init */
2745        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2746        if (rc)
2747                return rc;
2748
2749        qlock(&bp->vfdb->bulletin_mutex);
2750
2751        /* update PF's copy of the VF's bulletin. Will no longer accept mac
2752         * configuration requests from vf unless match this mac
2753         */
2754        bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2755        memcpy(bulletin->mac, mac, Eaddrlen);
2756
2757        /* Post update on VF's bulletin board */
2758        rc = bnx2x_post_vf_bulletin(bp, vfidx);
2759
2760        /* release lock before checking return code */
2761        qunlock(&bp->vfdb->bulletin_mutex);
2762
2763        if (rc) {
2764                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2765                return rc;
2766        }
2767
2768        q_logical_state =
2769                bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2770        if (vf->state == VF_ENABLED &&
2771            q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2772                /* configure the mac in device on this vf's queue */
2773                unsigned long ramrod_flags = 0;
2774                struct bnx2x_vlan_mac_obj *mac_obj;
2775
2776                /* User should be able to see failure reason in system logs */
2777                if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2778                        return -EINVAL;
2779
2780                /* must lock vfpf channel to protect against vf flows */
2781                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2782
2783                /* remove existing eth macs */
2784                mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2785                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2786                if (rc) {
2787                        BNX2X_ERR("failed to delete eth macs\n");
2788                        rc = -EINVAL;
2789                        goto out;
2790                }
2791
2792                /* remove existing uc list macs */
2793                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2794                if (rc) {
2795                        BNX2X_ERR("failed to delete uc_list macs\n");
2796                        rc = -EINVAL;
2797                        goto out;
2798                }
2799
2800                /* configure the new mac to device */
2801                __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2802                bnx2x_set_mac_one(bp, (uint8_t *)&bulletin->mac, mac_obj,
2803                                  true,
2804                                  BNX2X_ETH_MAC, &ramrod_flags);
2805
2806out:
2807                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2808        }
2809
2810        return rc;
2811}
2812
2813int bnx2x_set_vf_vlan(struct ether *dev, int vfidx, uint16_t vlan,
2814                      uint8_t qos)
2815{
2816        struct bnx2x_queue_state_params q_params = {NULL};
2817        struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2818        struct bnx2x_queue_update_params *update_params;
2819        struct pf_vf_bulletin_content *bulletin = NULL;
2820        struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2821        struct bnx2x *bp = netdev_priv(dev);
2822        struct bnx2x_vlan_mac_obj *vlan_obj;
2823        unsigned long vlan_mac_flags = 0;
2824        unsigned long ramrod_flags = 0;
2825        struct bnx2x_virtf *vf = NULL;
2826        unsigned long accept_flags;
2827        int rc;
2828
2829        if (vlan > 4095) {
2830                BNX2X_ERR("illegal vlan value %d\n", vlan);
2831                return -EINVAL;
2832        }
2833
2834        DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2835           vfidx, vlan, 0);
2836
2837        /* sanity and init */
2838        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2839        if (rc)
2840                return rc;
2841
2842        /* update PF's copy of the VF's bulletin. No point in posting the vlan
2843         * to the VF since it doesn't have anything to do with it. But it useful
2844         * to store it here in case the VF is not up yet and we can only
2845         * configure the vlan later when it does. Treat vlan id 0 as remove the
2846         * Host tag.
2847         */
2848        qlock(&bp->vfdb->bulletin_mutex);
2849
2850        if (vlan > 0)
2851                bulletin->valid_bitmap |= 1 << VLAN_VALID;
2852        else
2853                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2854        bulletin->vlan = vlan;
2855
2856        qunlock(&bp->vfdb->bulletin_mutex);
2857
2858        /* is vf initialized and queue set up? */
2859        if (vf->state != VF_ENABLED ||
2860            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2861            BNX2X_Q_LOGICAL_STATE_ACTIVE)
2862                return rc;
2863
2864        /* User should be able to see error in system logs */
2865        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2866                return -EINVAL;
2867
2868        /* must lock vfpf channel to protect against vf flows */
2869        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2870
2871        /* remove existing vlans */
2872        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2873        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2874        rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2875                                  &ramrod_flags);
2876        if (rc) {
2877                BNX2X_ERR("failed to delete vlans\n");
2878                rc = -EINVAL;
2879                goto out;
2880        }
2881
2882        /* need to remove/add the VF's accept_any_vlan bit */
2883        accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2884        if (vlan)
2885                clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2886        else
2887                set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2888
2889        bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2890                              accept_flags);
2891        bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2892        bnx2x_config_rx_mode(bp, &rx_ramrod);
2893
2894        /* configure the new vlan to device */
2895        memset(&ramrod_param, 0, sizeof(ramrod_param));
2896        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2897        ramrod_param.vlan_mac_obj = vlan_obj;
2898        ramrod_param.ramrod_flags = ramrod_flags;
2899        set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
2900                &ramrod_param.user_req.vlan_mac_flags);
2901        ramrod_param.user_req.u.vlan.vlan = vlan;
2902        ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
2903        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2904        if (rc) {
2905                BNX2X_ERR("failed to configure vlan\n");
2906                rc =  -EINVAL;
2907                goto out;
2908        }
2909
2910        /* send queue update ramrod to configure default vlan and silent
2911         * vlan removal
2912         */
2913        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2914        q_params.cmd = BNX2X_Q_CMD_UPDATE;
2915        q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
2916        update_params = &q_params.params.update;
2917        __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2918                  &update_params->update_flags);
2919        __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2920                  &update_params->update_flags);
2921        if (vlan == 0) {
2922                /* if vlan is 0 then we want to leave the VF traffic
2923                 * untagged, and leave the incoming traffic untouched
2924                 * (i.e. do not remove any vlan tags).
2925                 */
2926                __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2927                            &update_params->update_flags);
2928                __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2929                            &update_params->update_flags);
2930        } else {
2931                /* configure default vlan to vf queue and set silent
2932                 * vlan removal (the vf remains unaware of this vlan).
2933                 */
2934                __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2935                          &update_params->update_flags);
2936                __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2937                          &update_params->update_flags);
2938                update_params->def_vlan = vlan;
2939                update_params->silent_removal_value =
2940                        vlan & VLAN_VID_MASK;
2941                update_params->silent_removal_mask = VLAN_VID_MASK;
2942        }
2943
2944        /* Update the Queue state */
2945        rc = bnx2x_queue_state_change(bp, &q_params);
2946        if (rc) {
2947                BNX2X_ERR("Failed to configure default VLAN\n");
2948                goto out;
2949        }
2950
2951
2952        /* clear the flag indicating that this VF needs its vlan
2953         * (will only be set if the HV configured the Vlan before vf was
2954         * up and we were called because the VF came up later
2955         */
2956out:
2957        vf->cfg_flags &= ~VF_CFG_VLAN;
2958        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2959
2960        return rc;
2961}
2962
2963/* crc is the first field in the bulletin board. Compute the crc over the
2964 * entire bulletin board excluding the crc field itself. Use the length field
2965 * as the Bulletin Board was posted by a PF with possibly a different version
2966 * from the vf which will sample it. Therefore, the length is computed by the
2967 * PF and then used blindly by the VF.
2968 */
2969uint32_t bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
2970{
2971        return crc32(BULLETIN_CRC_SEED,
2972                 ((uint8_t *)bulletin) + sizeof(bulletin->crc),
2973                 bulletin->length - sizeof(bulletin->crc));
2974}
2975
2976/* Check for new posts on the bulletin board */
2977enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
2978{
2979        struct pf_vf_bulletin_content *bulletin;
2980        int attempts;
2981
2982        /* sampling structure in mid post may result with corrupted data
2983         * validate crc to ensure coherency.
2984         */
2985        for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
2986                uint32_t crc;
2987
2988                /* sample the bulletin board */
2989                memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
2990                       sizeof(union pf_vf_bulletin));
2991
2992                crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
2993
2994                if (bp->shadow_bulletin.content.crc == crc)
2995                        break;
2996
2997                BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
2998                          bp->shadow_bulletin.content.crc, crc);
2999        }
3000
3001        if (attempts >= BULLETIN_ATTEMPTS) {
3002                BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3003                          attempts);
3004                return PFVF_BULLETIN_CRC_ERR;
3005        }
3006        bulletin = &bp->shadow_bulletin.content;
3007
3008        /* bulletin board hasn't changed since last sample */
3009        if (bp->old_bulletin.version == bulletin->version)
3010                return PFVF_BULLETIN_UNCHANGED;
3011
3012        /* the mac address in bulletin board is valid and is new */
3013        if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
3014            !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3015                /* update new mac to net device */
3016                memcpy(bp->dev->dev_addr, bulletin->mac, Eaddrlen);
3017        }
3018
3019        if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3020                DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3021                   bulletin->link_speed, bulletin->link_flags);
3022
3023                bp->vf_link_vars.line_speed = bulletin->link_speed;
3024                bp->vf_link_vars.link_report_flags = 0;
3025                /* Link is down */
3026                if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3027                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3028                                  &bp->vf_link_vars.link_report_flags);
3029                /* Full DUPLEX */
3030                if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3031                        __set_bit(BNX2X_LINK_REPORT_FD,
3032                                  &bp->vf_link_vars.link_report_flags);
3033                /* Rx Flow Control is ON */
3034                if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3035                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3036                                  &bp->vf_link_vars.link_report_flags);
3037                /* Tx Flow Control is ON */
3038                if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3039                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3040                                  &bp->vf_link_vars.link_report_flags);
3041                __bnx2x_link_report(bp);
3042        }
3043
3044        /* copy new bulletin board to bp */
3045        memcpy(&bp->old_bulletin, bulletin,
3046               sizeof(struct pf_vf_bulletin_content));
3047
3048        return PFVF_BULLETIN_UPDATED;
3049}
3050
3051void bnx2x_timer_sriov(struct bnx2x *bp)
3052{
3053        bnx2x_sample_bulletin(bp);
3054
3055        /* if channel is down we need to self destruct */
3056        if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3057                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3058                                       BNX2X_MSG_IOV);
3059}
3060
3061void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3062{
3063        /* vf doorbells are embedded within the regview */
3064        return bp->regview + PXP_VF_ADDR_DB_START;
3065}
3066
3067void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3068{
3069        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3070                       sizeof(struct bnx2x_vf_mbx_msg));
3071        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3072                       sizeof(union pf_vf_bulletin));
3073}
3074
3075int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3076{
3077        mutex_init(&bp->vf2pf_mutex);
3078
3079        /* allocate vf2pf mailbox for vf to pf channel */
3080        bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3081                                         sizeof(struct bnx2x_vf_mbx_msg));
3082        if (!bp->vf2pf_mbox)
3083                goto alloc_mem_err;
3084
3085        /* allocate pf 2 vf bulletin board */
3086        bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3087                                             sizeof(union pf_vf_bulletin));
3088        if (!bp->pf2vf_bulletin)
3089                goto alloc_mem_err;
3090
3091        bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3092
3093        return 0;
3094
3095alloc_mem_err:
3096        bnx2x_vf_pci_dealloc(bp);
3097        return -ENOMEM;
3098}
3099
3100void bnx2x_iov_channel_down(struct bnx2x *bp)
3101{
3102        int vf_idx;
3103        struct pf_vf_bulletin_content *bulletin;
3104
3105        if (!IS_SRIOV(bp))
3106                return;
3107
3108        for_each_vf(bp, vf_idx) {
3109                /* locate this VFs bulletin board and update the channel down
3110                 * bit
3111                 */
3112                bulletin = BP_VF_BULLETIN(bp, vf_idx);
3113                bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3114
3115                /* update vf bulletin board */
3116                bnx2x_post_vf_bulletin(bp, vf_idx);
3117        }
3118}
3119
3120void bnx2x_iov_task(struct work_struct *work)
3121{
3122        struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3123
3124        if (!netif_running(bp->dev))
3125                return;
3126
3127        if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3128                               &bp->iov_task_state))
3129                bnx2x_vf_handle_flr_event(bp);
3130
3131        if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3132                               &bp->iov_task_state))
3133                bnx2x_vf_mbx(bp);
3134}
3135
3136void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3137{
3138        cmb();
3139        set_bit(flag, &bp->iov_task_state);
3140        cmb();
3141        DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3142        queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3143}
3144