akaros/kern/drivers/net/bnx2x/bnx2x_sp.c
<<
>>
Prefs
   1/* bnx2x_sp.c: Broadcom Everest network driver.
   2 *
   3 * Copyright (c) 2011-2013 Broadcom Corporation
   4 *
   5 * Unless you and Broadcom execute a separate written software license
   6 * agreement governing use of this software, this software is licensed to you
   7 * under the terms of the GNU General Public License version 2, available
   8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
   9 *
  10 * Notwithstanding the above, under no circumstances may you combine this
  11 * software in any way with any other Broadcom software provided under a
  12 * license other than the GPL, without Broadcom's express prior written
  13 * consent.
  14 *
  15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  16 * Written by: Vladislav Zolotarov
  17 *
  18 */
  19
  20#include <linux_compat.h>
  21
  22#include "bnx2x.h"
  23#include "bnx2x_cmn.h"
  24#include "bnx2x_sp.h"
  25
  26#define BNX2X_MAX_EMUL_MULTI            16
  27
  28/**** Exe Queue interfaces ****/
  29
  30/**
  31 * bnx2x_exe_queue_init - init the Exe Queue object
  32 *
  33 * @o:          pointer to the object
  34 * @exe_len:    length
  35 * @owner:      pointer to the owner
  36 * @validate:   validate function pointer
  37 * @optimize:   optimize function pointer
  38 * @exec:       execute function pointer
  39 * @get:        get function pointer
  40 */
  41static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
  42                                        struct bnx2x_exe_queue_obj *o,
  43                                        int exe_len,
  44                                        union bnx2x_qable_obj *owner,
  45                                        exe_q_validate validate,
  46                                        exe_q_remove remove,
  47                                        exe_q_optimize optimize,
  48                                        exe_q_execute exec,
  49                                        exe_q_get get)
  50{
  51        memset(o, 0, sizeof(*o));
  52
  53        INIT_LIST_HEAD(&o->exe_queue);
  54        INIT_LIST_HEAD(&o->pending_comp);
  55
  56        spinlock_init_irqsave(&o->lock);
  57
  58        o->exe_chunk_len = exe_len;
  59        o->owner         = owner;
  60
  61        /* Owner specific callbacks */
  62        o->validate      = validate;
  63        o->remove        = remove;
  64        o->optimize      = optimize;
  65        o->execute       = exec;
  66        o->get           = get;
  67
  68        DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
  69           exe_len);
  70}
  71
  72static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
  73                                             struct bnx2x_exeq_elem *elem)
  74{
  75        DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
  76        kfree(elem);
  77}
  78
  79static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
  80{
  81        struct bnx2x_exeq_elem *elem;
  82        int cnt = 0;
  83
  84        spin_lock(&o->lock);
  85
  86        list_for_each_entry(elem, &o->exe_queue, link)
  87                cnt++;
  88
  89        spin_unlock(&o->lock);
  90
  91        return cnt;
  92}
  93
  94/**
  95 * bnx2x_exe_queue_add - add a new element to the execution queue
  96 *
  97 * @bp:         driver handle
  98 * @o:          queue
  99 * @cmd:        new command to add
 100 * @restore:    true - do not optimize the command
 101 *
 102 * If the element is optimized or is illegal, frees it.
 103 */
 104static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
 105                                      struct bnx2x_exe_queue_obj *o,
 106                                      struct bnx2x_exeq_elem *elem,
 107                                      bool restore)
 108{
 109        int rc;
 110
 111        spin_lock(&o->lock);
 112
 113        if (!restore) {
 114                /* Try to cancel this element queue */
 115                rc = o->optimize(bp, o->owner, elem);
 116                if (rc)
 117                        goto free_and_exit;
 118
 119                /* Check if this request is ok */
 120                rc = o->validate(bp, o->owner, elem);
 121                if (rc) {
 122                        DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
 123                        goto free_and_exit;
 124                }
 125        }
 126
 127        /* If so, add it to the execution queue */
 128        list_add_tail(&elem->link, &o->exe_queue);
 129
 130        spin_unlock(&o->lock);
 131
 132        return 0;
 133
 134free_and_exit:
 135        bnx2x_exe_queue_free_elem(bp, elem);
 136
 137        spin_unlock(&o->lock);
 138
 139        return rc;
 140}
 141
 142static inline void __bnx2x_exe_queue_reset_pending(
 143        struct bnx2x *bp,
 144        struct bnx2x_exe_queue_obj *o)
 145{
 146        struct bnx2x_exeq_elem *elem;
 147
 148        while (!list_empty(&o->pending_comp)) {
 149                elem = list_first_entry(&o->pending_comp,
 150                                        struct bnx2x_exeq_elem, link);
 151
 152                list_del(&elem->link);
 153                bnx2x_exe_queue_free_elem(bp, elem);
 154        }
 155}
 156
 157/**
 158 * bnx2x_exe_queue_step - execute one execution chunk atomically
 159 *
 160 * @bp:                 driver handle
 161 * @o:                  queue
 162 * @ramrod_flags:       flags
 163 *
 164 * (Should be called while holding the exe_queue->lock).
 165 */
 166static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
 167                                       struct bnx2x_exe_queue_obj *o,
 168                                       unsigned long *ramrod_flags)
 169{
 170        struct bnx2x_exeq_elem *elem, spacer;
 171        int cur_len = 0, rc;
 172
 173        memset(&spacer, 0, sizeof(spacer));
 174
 175        /* Next step should not be performed until the current is finished,
 176         * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
 177         * properly clear object internals without sending any command to the FW
 178         * which also implies there won't be any completion to clear the
 179         * 'pending' list.
 180         */
 181        if (!list_empty(&o->pending_comp)) {
 182                if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
 183                        DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
 184                        __bnx2x_exe_queue_reset_pending(bp, o);
 185                } else {
 186                        return 1;
 187                }
 188        }
 189
 190        /* Run through the pending commands list and create a next
 191         * execution chunk.
 192         */
 193        while (!list_empty(&o->exe_queue)) {
 194                elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
 195                                        link);
 196                warn_on(!elem->cmd_len);
 197
 198                if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
 199                        cur_len += elem->cmd_len;
 200                        /* Prevent from both lists being empty when moving an
 201                         * element. This will allow the call of
 202                         * bnx2x_exe_queue_empty() without locking.
 203                         */
 204                        list_add_tail(&spacer.link, &o->pending_comp);
 205                        mb();
 206                        list_move_tail(&elem->link, &o->pending_comp);
 207                        list_del(&spacer.link);
 208                } else
 209                        break;
 210        }
 211
 212        /* Sanity check */
 213        if (!cur_len)
 214                return 0;
 215
 216        rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
 217        if (rc < 0)
 218                /* In case of an error return the commands back to the queue
 219                 * and reset the pending_comp.
 220                 */
 221                list_splice_init(&o->pending_comp, &o->exe_queue);
 222        else if (!rc)
 223                /* If zero is returned, means there are no outstanding pending
 224                 * completions and we may dismiss the pending list.
 225                 */
 226                __bnx2x_exe_queue_reset_pending(bp, o);
 227
 228        return rc;
 229}
 230
 231static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
 232{
 233        bool empty = list_empty(&o->exe_queue);
 234
 235        /* Don't reorder!!! */
 236        mb();
 237
 238        return empty && list_empty(&o->pending_comp);
 239}
 240
 241static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
 242        struct bnx2x *bp)
 243{
 244        DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
 245        return kzmalloc(sizeof(struct bnx2x_exeq_elem), 0);
 246}
 247
 248/************************ raw_obj functions ***********************************/
 249static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
 250{
 251        return !!test_bit(o->state, o->pstate);
 252}
 253
 254static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
 255{
 256        cmb();
 257        clear_bit(o->state, o->pstate);
 258        cmb();
 259}
 260
 261static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
 262{
 263        cmb();
 264        set_bit(o->state, o->pstate);
 265        cmb();
 266}
 267
 268/**
 269 * bnx2x_state_wait - wait until the given bit(state) is cleared
 270 *
 271 * @bp:         device handle
 272 * @state:      state which is to be cleared
 273 * @state_p:    state buffer
 274 *
 275 */
 276static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
 277                                   unsigned long *pstate)
 278{
 279        /* can take a while if any port is running */
 280        int cnt = 5000;
 281
 282        if (CHIP_REV_IS_EMUL(bp))
 283                cnt *= 20;
 284
 285        DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
 286
 287        might_sleep();
 288        while (cnt--) {
 289                if (!test_bit(state, pstate)) {
 290#ifdef BNX2X_STOP_ON_ERROR
 291                        DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
 292#endif
 293                        return 0;
 294                }
 295
 296/* HANGING HERE FOR SOME STATES */
 297                kthread_usleep(1000);
 298
 299                if (bp->panic)
 300                        return -EIO;
 301        }
 302
 303        /* timeout! */
 304        BNX2X_ERR("timeout waiting for state %d\n", state);
 305#ifdef BNX2X_STOP_ON_ERROR
 306        bnx2x_panic();
 307#endif
 308
 309        return -EBUSY;
 310}
 311
 312static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
 313{
 314        return bnx2x_state_wait(bp, raw->state, raw->pstate);
 315}
 316
 317/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
 318/* credit handling callbacks */
 319static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
 320{
 321        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 322
 323        warn_on(!mp);
 324
 325        return mp->get_entry(mp, offset);
 326}
 327
 328static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
 329{
 330        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 331
 332        warn_on(!mp);
 333
 334        return mp->get(mp, 1);
 335}
 336
 337static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
 338{
 339        struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 340
 341        warn_on(!vp);
 342
 343        return vp->get_entry(vp, offset);
 344}
 345
 346static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 347{
 348        struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 349
 350        warn_on(!vp);
 351
 352        return vp->get(vp, 1);
 353}
 354static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
 355{
 356        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 357
 358        return mp->put_entry(mp, offset);
 359}
 360
 361static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
 362{
 363        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
 364
 365        return mp->put(mp, 1);
 366}
 367
 368static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
 369{
 370        struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 371
 372        return vp->put_entry(vp, offset);
 373}
 374
 375static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 376{
 377        struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
 378
 379        return vp->put(vp, 1);
 380}
 381
 382/**
 383 * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
 384 *
 385 * @bp:         device handle
 386 * @o:          vlan_mac object
 387 *
 388 * @details: Non-blocking implementation; should be called under execution
 389 *           queue lock.
 390 */
 391static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
 392                                            struct bnx2x_vlan_mac_obj *o)
 393{
 394        if (o->head_reader) {
 395                DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
 396                return -EBUSY;
 397        }
 398
 399        DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
 400        return 0;
 401}
 402
 403/**
 404 * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
 405 *
 406 * @bp:         device handle
 407 * @o:          vlan_mac object
 408 *
 409 * @details Should be called under execution queue lock; notice it might release
 410 *          and reclaim it during its run.
 411 */
 412static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
 413                                            struct bnx2x_vlan_mac_obj *o)
 414{
 415        int rc;
 416        unsigned long ramrod_flags = o->saved_ramrod_flags;
 417
 418        DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
 419           ramrod_flags);
 420        o->head_exe_request = false;
 421        o->saved_ramrod_flags = 0;
 422        rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
 423        if (rc != 0) {
 424                BNX2X_ERR("execution of pending commands failed with rc %d\n",
 425                          rc);
 426#ifdef BNX2X_STOP_ON_ERROR
 427                bnx2x_panic();
 428#endif
 429        }
 430}
 431
 432/**
 433 * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
 434 *
 435 * @bp:                 device handle
 436 * @o:                  vlan_mac object
 437 * @ramrod_flags:       ramrod flags of missed execution
 438 *
 439 * @details Should be called under execution queue lock.
 440 */
 441static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
 442                                    struct bnx2x_vlan_mac_obj *o,
 443                                    unsigned long ramrod_flags)
 444{
 445        o->head_exe_request = true;
 446        o->saved_ramrod_flags = ramrod_flags;
 447        DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
 448           ramrod_flags);
 449}
 450
 451/**
 452 * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
 453 *
 454 * @bp:                 device handle
 455 * @o:                  vlan_mac object
 456 *
 457 * @details Should be called under execution queue lock. Notice if a pending
 458 *          execution exists, it would perform it - possibly releasing and
 459 *          reclaiming the execution queue lock.
 460 */
 461static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
 462                                            struct bnx2x_vlan_mac_obj *o)
 463{
 464        /* It's possible a new pending execution was added since this writer
 465         * executed. If so, execute again. [Ad infinitum]
 466         */
 467        while (o->head_exe_request) {
 468                DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
 469                __bnx2x_vlan_mac_h_exec_pending(bp, o);
 470        }
 471}
 472
 473
 474/**
 475 * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
 476 *
 477 * @bp:                 device handle
 478 * @o:                  vlan_mac object
 479 *
 480 * @details Should be called under the execution queue lock. May sleep. May
 481 *          release and reclaim execution queue lock during its run.
 482 */
 483static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 484                                        struct bnx2x_vlan_mac_obj *o)
 485{
 486        /* If we got here, we're holding lock --> no WRITER exists */
 487        o->head_reader++;
 488        DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
 489           o->head_reader);
 490
 491        return 0;
 492}
 493
 494/**
 495 * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
 496 *
 497 * @bp:                 device handle
 498 * @o:                  vlan_mac object
 499 *
 500 * @details May sleep. Claims and releases execution queue lock during its run.
 501 */
 502int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 503                               struct bnx2x_vlan_mac_obj *o)
 504{
 505        int rc;
 506
 507        spin_lock(&o->exe_queue.lock);
 508        rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
 509        spin_unlock(&o->exe_queue.lock);
 510
 511        return rc;
 512}
 513
 514/**
 515 * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
 516 *
 517 * @bp:                 device handle
 518 * @o:                  vlan_mac object
 519 *
 520 * @details Should be called under execution queue lock. Notice if a pending
 521 *          execution exists, it would be performed if this was the last
 522 *          reader. possibly releasing and reclaiming the execution queue lock.
 523 */
 524static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
 525                                          struct bnx2x_vlan_mac_obj *o)
 526{
 527        if (!o->head_reader) {
 528                BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
 529#ifdef BNX2X_STOP_ON_ERROR
 530                bnx2x_panic();
 531#endif
 532        } else {
 533                o->head_reader--;
 534                DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
 535                   o->head_reader);
 536        }
 537
 538        /* It's possible a new pending execution was added, and that this reader
 539         * was last - if so we need to execute the command.
 540         */
 541        if (!o->head_reader && o->head_exe_request) {
 542                DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
 543
 544                /* Writer release will do the trick */
 545                __bnx2x_vlan_mac_h_write_unlock(bp, o);
 546        }
 547}
 548
 549/**
 550 * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
 551 *
 552 * @bp:                 device handle
 553 * @o:                  vlan_mac object
 554 *
 555 * @details Notice if a pending execution exists, it would be performed if this
 556 *          was the last reader. Claims and releases the execution queue lock
 557 *          during its run.
 558 */
 559void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
 560                                  struct bnx2x_vlan_mac_obj *o)
 561{
 562        spin_lock(&o->exe_queue.lock);
 563        __bnx2x_vlan_mac_h_read_unlock(bp, o);
 564        spin_unlock(&o->exe_queue.lock);
 565}
 566
 567static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
 568                                int n, uint8_t *base, uint8_t stride,
 569                                uint8_t size)
 570{
 571        struct bnx2x_vlan_mac_registry_elem *pos;
 572        uint8_t *next = base;
 573        int counter = 0;
 574        int read_lock;
 575
 576        DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
 577        read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
 578        if (read_lock != 0)
 579                BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
 580
 581        /* traverse list */
 582        list_for_each_entry(pos, &o->head, link) {
 583                if (counter < n) {
 584                        memcpy(next, &pos->u, size);
 585                        counter++;
 586                        DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
 587                           counter, next);
 588                        next += stride + size;
 589                }
 590        }
 591
 592        if (read_lock == 0) {
 593                DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
 594                bnx2x_vlan_mac_h_read_unlock(bp, o);
 595        }
 596
 597        return counter * Eaddrlen;
 598}
 599
 600/* check_add() callbacks */
 601static int bnx2x_check_mac_add(struct bnx2x *bp,
 602                               struct bnx2x_vlan_mac_obj *o,
 603                               union bnx2x_classification_ramrod_data *data)
 604{
 605        struct bnx2x_vlan_mac_registry_elem *pos;
 606
 607        DP(BNX2X_MSG_SP, "Checking MAC %E for ADD command\n", data->mac.mac);
 608
 609        if (!is_valid_ether_addr(data->mac.mac))
 610                return -EINVAL;
 611
 612        /* Check if a requested MAC already exists */
 613        list_for_each_entry(pos, &o->head, link)
 614                if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
 615                    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
 616                        return -EEXIST;
 617
 618        return 0;
 619}
 620
 621static int bnx2x_check_vlan_add(struct bnx2x *bp,
 622                                struct bnx2x_vlan_mac_obj *o,
 623                                union bnx2x_classification_ramrod_data *data)
 624{
 625        struct bnx2x_vlan_mac_registry_elem *pos;
 626
 627        DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
 628
 629        list_for_each_entry(pos, &o->head, link)
 630                if (data->vlan.vlan == pos->u.vlan.vlan)
 631                        return -EEXIST;
 632
 633        return 0;
 634}
 635
 636/* check_del() callbacks */
 637static struct bnx2x_vlan_mac_registry_elem *
 638        bnx2x_check_mac_del(struct bnx2x *bp,
 639                            struct bnx2x_vlan_mac_obj *o,
 640                            union bnx2x_classification_ramrod_data *data)
 641{
 642panic("Not implemented");
 643#if 0 // AKAROS_PORT
 644        struct bnx2x_vlan_mac_registry_elem *pos;
 645
 646        DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
 647
 648        list_for_each_entry(pos, &o->head, link)
 649                if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
 650                    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
 651                        return pos;
 652
 653        return NULL;
 654#endif
 655}
 656
 657static struct bnx2x_vlan_mac_registry_elem *
 658        bnx2x_check_vlan_del(struct bnx2x *bp,
 659                             struct bnx2x_vlan_mac_obj *o,
 660                             union bnx2x_classification_ramrod_data *data)
 661{
 662        struct bnx2x_vlan_mac_registry_elem *pos;
 663
 664        DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
 665
 666        list_for_each_entry(pos, &o->head, link)
 667                if (data->vlan.vlan == pos->u.vlan.vlan)
 668                        return pos;
 669
 670        return NULL;
 671}
 672
 673/* check_move() callback */
 674static bool bnx2x_check_move(struct bnx2x *bp,
 675                             struct bnx2x_vlan_mac_obj *src_o,
 676                             struct bnx2x_vlan_mac_obj *dst_o,
 677                             union bnx2x_classification_ramrod_data *data)
 678{
 679        struct bnx2x_vlan_mac_registry_elem *pos;
 680        int rc;
 681
 682        /* Check if we can delete the requested configuration from the first
 683         * object.
 684         */
 685        pos = src_o->check_del(bp, src_o, data);
 686
 687        /*  check if configuration can be added */
 688        rc = dst_o->check_add(bp, dst_o, data);
 689
 690        /* If this classification can not be added (is already set)
 691         * or can't be deleted - return an error.
 692         */
 693        if (rc || !pos)
 694                return false;
 695
 696        return true;
 697}
 698
 699static bool bnx2x_check_move_always_err(
 700        struct bnx2x *bp,
 701        struct bnx2x_vlan_mac_obj *src_o,
 702        struct bnx2x_vlan_mac_obj *dst_o,
 703        union bnx2x_classification_ramrod_data *data)
 704{
 705        return false;
 706}
 707
 708static inline uint8_t bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
 709{
 710        struct bnx2x_raw_obj *raw = &o->raw;
 711        uint8_t rx_tx_flag = 0;
 712
 713        if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
 714            (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
 715                rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
 716
 717        if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
 718            (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
 719                rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
 720
 721        return rx_tx_flag;
 722}
 723
 724static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
 725                                 bool add, unsigned char *dev_addr, int index)
 726{
 727        uint32_t wb_data[2];
 728        uint32_t reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
 729                         NIG_REG_LLH0_FUNC_MEM;
 730
 731        if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
 732                return;
 733
 734        if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
 735                return;
 736
 737        DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
 738                         (add ? "ADD" : "DELETE"), index);
 739
 740        if (add) {
 741                /* LLH_FUNC_MEM is a u64 WB register */
 742                reg_offset += 8*index;
 743
 744                wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
 745                              (dev_addr[4] <<  8) |  dev_addr[5]);
 746                wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
 747
 748                REG_WR_DMAE(bp, reg_offset, wb_data, 2);
 749        }
 750
 751        REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
 752                                  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
 753}
 754
 755/**
 756 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
 757 *
 758 * @bp:         device handle
 759 * @o:          queue for which we want to configure this rule
 760 * @add:        if true the command is an ADD command, DEL otherwise
 761 * @opcode:     CLASSIFY_RULE_OPCODE_XXX
 762 * @hdr:        pointer to a header to setup
 763 *
 764 */
 765static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
 766        struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
 767        struct eth_classify_cmd_header *hdr)
 768{
 769        struct bnx2x_raw_obj *raw = &o->raw;
 770
 771        hdr->client_id = raw->cl_id;
 772        hdr->func_id = raw->func_id;
 773
 774        /* Rx or/and Tx (internal switching) configuration ? */
 775        hdr->cmd_general_data |=
 776                bnx2x_vlan_mac_get_rx_tx_flag(o);
 777
 778        if (add)
 779                hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
 780
 781        hdr->cmd_general_data |=
 782                (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
 783}
 784
 785/**
 786 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
 787 *
 788 * @cid:        connection id
 789 * @type:       BNX2X_FILTER_XXX_PENDING
 790 * @hdr:        pointer to header to setup
 791 * @rule_cnt:
 792 *
 793 * currently we always configure one rule and echo field to contain a CID and an
 794 * opcode type.
 795 */
 796static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
 797                                struct eth_classify_header *hdr, int rule_cnt)
 798{
 799        hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
 800                                (type << BNX2X_SWCID_SHIFT));
 801        hdr->rule_cnt = (uint8_t)rule_cnt;
 802}
 803
 804/* hw_config() callbacks */
 805static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
 806                                 struct bnx2x_vlan_mac_obj *o,
 807                                 struct bnx2x_exeq_elem *elem, int rule_idx,
 808                                 int cam_offset)
 809{
 810        struct bnx2x_raw_obj *raw = &o->raw;
 811        struct eth_classify_rules_ramrod_data *data =
 812                (struct eth_classify_rules_ramrod_data *)(raw->rdata);
 813        int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
 814        union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
 815        bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
 816        unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
 817        uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
 818
 819        /* Set LLH CAM entry: currently only iSCSI and ETH macs are
 820         * relevant. In addition, current implementation is tuned for a
 821         * single ETH MAC.
 822         *
 823         * When multiple unicast ETH MACs PF configuration in switch
 824         * independent mode is required (NetQ, multiple netdev MACs,
 825         * etc.), consider better utilisation of 8 per function MAC
 826         * entries in the LLH register. There is also
 827         * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
 828         * total number of CAM entries to 16.
 829         *
 830         * Currently we won't configure NIG for MACs other than a primary ETH
 831         * MAC and iSCSI L2 MAC.
 832         *
 833         * If this MAC is moving from one Queue to another, no need to change
 834         * NIG configuration.
 835         */
 836        if (cmd != BNX2X_VLAN_MAC_MOVE) {
 837                if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
 838                        bnx2x_set_mac_in_nig(bp, add, mac,
 839                                             BNX2X_LLH_CAM_ISCSI_ETH_LINE);
 840                else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
 841                        bnx2x_set_mac_in_nig(bp, add, mac,
 842                                             BNX2X_LLH_CAM_ETH_LINE);
 843        }
 844
 845        /* Reset the ramrod data buffer for the first rule */
 846        if (rule_idx == 0)
 847                memset(data, 0, sizeof(*data));
 848
 849        /* Setup a command header */
 850        bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
 851                                      &rule_entry->mac.header);
 852
 853        DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
 854           (add ? "add" : "delete"), mac, raw->cl_id);
 855
 856        /* Set a MAC itself */
 857        bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
 858                              &rule_entry->mac.mac_mid,
 859                              &rule_entry->mac.mac_lsb, mac);
 860        rule_entry->mac.inner_mac =
 861                cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
 862
 863        /* MOVE: Add a rule that will add this MAC to the target Queue */
 864        if (cmd == BNX2X_VLAN_MAC_MOVE) {
 865                rule_entry++;
 866                rule_cnt++;
 867
 868                /* Setup ramrod data */
 869                bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
 870                                        elem->cmd_data.vlan_mac.target_obj,
 871                                              true, CLASSIFY_RULE_OPCODE_MAC,
 872                                              &rule_entry->mac.header);
 873
 874                /* Set a MAC itself */
 875                bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
 876                                      &rule_entry->mac.mac_mid,
 877                                      &rule_entry->mac.mac_lsb, mac);
 878                rule_entry->mac.inner_mac =
 879                        cpu_to_le16(elem->cmd_data.vlan_mac.
 880                                                u.mac.is_inner_mac);
 881        }
 882
 883        /* Set the ramrod data header */
 884        /* TODO: take this to the higher level in order to prevent multiple
 885                 writing */
 886        bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
 887                                        rule_cnt);
 888}
 889
 890/**
 891 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
 892 *
 893 * @bp:         device handle
 894 * @o:          queue
 895 * @type:
 896 * @cam_offset: offset in cam memory
 897 * @hdr:        pointer to a header to setup
 898 *
 899 * E1/E1H
 900 */
 901static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
 902        struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
 903        struct mac_configuration_hdr *hdr)
 904{
 905        struct bnx2x_raw_obj *r = &o->raw;
 906
 907        hdr->length = 1;
 908        hdr->offset = (uint8_t)cam_offset;
 909        hdr->client_id = cpu_to_le16(0xff);
 910        hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
 911                                (type << BNX2X_SWCID_SHIFT));
 912}
 913
 914static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
 915        struct bnx2x_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
 916        uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
 917{
 918        struct bnx2x_raw_obj *r = &o->raw;
 919        uint32_t cl_bit_vec = (1 << r->cl_id);
 920
 921        cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
 922        cfg_entry->pf_id = r->func_id;
 923        cfg_entry->vlan_id = cpu_to_le16(vlan_id);
 924
 925        if (add) {
 926                SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 927                         T_ETH_MAC_COMMAND_SET);
 928                SET_FLAG(cfg_entry->flags,
 929                         MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
 930
 931                /* Set a MAC in a ramrod data */
 932                bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
 933                                      &cfg_entry->middle_mac_addr,
 934                                      &cfg_entry->lsb_mac_addr, mac);
 935        } else
 936                SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 937                         T_ETH_MAC_COMMAND_INVALIDATE);
 938}
 939
 940static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
 941        struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
 942        uint8_t *mac, uint16_t vlan_id, int opcode,
 943                                                struct mac_configuration_cmd *config)
 944{
 945        struct mac_configuration_entry *cfg_entry = &config->config_table[0];
 946        struct bnx2x_raw_obj *raw = &o->raw;
 947
 948        bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
 949                                         &config->hdr);
 950        bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
 951                                         cfg_entry);
 952
 953        DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
 954                         (add ? "setting" : "clearing"),
 955                         mac, raw->cl_id, cam_offset);
 956}
 957
 958/**
 959 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
 960 *
 961 * @bp:         device handle
 962 * @o:          bnx2x_vlan_mac_obj
 963 * @elem:       bnx2x_exeq_elem
 964 * @rule_idx:   rule_idx
 965 * @cam_offset: cam_offset
 966 */
 967static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
 968                                  struct bnx2x_vlan_mac_obj *o,
 969                                  struct bnx2x_exeq_elem *elem, int rule_idx,
 970                                  int cam_offset)
 971{
 972        struct bnx2x_raw_obj *raw = &o->raw;
 973        struct mac_configuration_cmd *config =
 974                (struct mac_configuration_cmd *)(raw->rdata);
 975        /* 57710 and 57711 do not support MOVE command,
 976         * so it's either ADD or DEL
 977         */
 978        bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
 979                true : false;
 980
 981        /* Reset the ramrod data buffer */
 982        memset(config, 0, sizeof(*config));
 983
 984        bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
 985                                     cam_offset, add,
 986                                     elem->cmd_data.vlan_mac.u.mac.mac, 0,
 987                                     ETH_VLAN_FILTER_ANY_VLAN, config);
 988}
 989
 990static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
 991                                  struct bnx2x_vlan_mac_obj *o,
 992                                  struct bnx2x_exeq_elem *elem, int rule_idx,
 993                                  int cam_offset)
 994{
 995        struct bnx2x_raw_obj *raw = &o->raw;
 996        struct eth_classify_rules_ramrod_data *data =
 997                (struct eth_classify_rules_ramrod_data *)(raw->rdata);
 998        int rule_cnt = rule_idx + 1;
 999        union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1000        enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1001        bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
1002        uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1003
1004        /* Reset the ramrod data buffer for the first rule */
1005        if (rule_idx == 0)
1006                memset(data, 0, sizeof(*data));
1007
1008        /* Set a rule header */
1009        bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1010                                      &rule_entry->vlan.header);
1011
1012        DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1013                         vlan);
1014
1015        /* Set a VLAN itself */
1016        rule_entry->vlan.vlan = cpu_to_le16(vlan);
1017
1018        /* MOVE: Add a rule that will add this MAC to the target Queue */
1019        if (cmd == BNX2X_VLAN_MAC_MOVE) {
1020                rule_entry++;
1021                rule_cnt++;
1022
1023                /* Setup ramrod data */
1024                bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1025                                        elem->cmd_data.vlan_mac.target_obj,
1026                                              true, CLASSIFY_RULE_OPCODE_VLAN,
1027                                              &rule_entry->vlan.header);
1028
1029                /* Set a VLAN itself */
1030                rule_entry->vlan.vlan = cpu_to_le16(vlan);
1031        }
1032
1033        /* Set the ramrod data header */
1034        /* TODO: take this to the higher level in order to prevent multiple
1035                 writing */
1036        bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1037                                        rule_cnt);
1038}
1039
1040/**
1041 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1042 *
1043 * @bp:         device handle
1044 * @p:          command parameters
1045 * @ppos:       pointer to the cookie
1046 *
1047 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1048 * previously configured elements list.
1049 *
1050 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1051 * into an account
1052 *
1053 * pointer to the cookie  - that should be given back in the next call to make
1054 * function handle the next element. If *ppos is set to NULL it will restart the
1055 * iterator. If returned *ppos == NULL this means that the last element has been
1056 * handled.
1057 *
1058 */
1059static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1060                           struct bnx2x_vlan_mac_ramrod_params *p,
1061                           struct bnx2x_vlan_mac_registry_elem **ppos)
1062{
1063        struct bnx2x_vlan_mac_registry_elem *pos;
1064        struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1065
1066        /* If list is empty - there is nothing to do here */
1067        if (list_empty(&o->head)) {
1068                *ppos = NULL;
1069                return 0;
1070        }
1071
1072        /* make a step... */
1073        if (*ppos == NULL)
1074                *ppos = list_first_entry(&o->head,
1075                                         struct bnx2x_vlan_mac_registry_elem,
1076                                         link);
1077        else
1078                *ppos = list_next_entry(*ppos, link);
1079
1080        pos = *ppos;
1081
1082        /* If it's the last step - return NULL */
1083        if (list_is_last(&pos->link, &o->head))
1084                *ppos = NULL;
1085
1086        /* Prepare a 'user_req' */
1087        memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1088
1089        /* Set the command */
1090        p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1091
1092        /* Set vlan_mac_flags */
1093        p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1094
1095        /* Set a restore bit */
1096        __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1097
1098        return bnx2x_config_vlan_mac(bp, p);
1099}
1100
1101/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1102 * pointer to an element with a specific criteria and NULL if such an element
1103 * hasn't been found.
1104 */
1105static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1106        struct bnx2x_exe_queue_obj *o,
1107        struct bnx2x_exeq_elem *elem)
1108{
1109        struct bnx2x_exeq_elem *pos;
1110        struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1111
1112        /* Check pending for execution commands */
1113        list_for_each_entry(pos, &o->exe_queue, link)
1114                if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1115                              sizeof(*data)) &&
1116                    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1117                        return pos;
1118
1119        return NULL;
1120}
1121
1122static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1123        struct bnx2x_exe_queue_obj *o,
1124        struct bnx2x_exeq_elem *elem)
1125{
1126        struct bnx2x_exeq_elem *pos;
1127        struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1128
1129        /* Check pending for execution commands */
1130        list_for_each_entry(pos, &o->exe_queue, link)
1131                if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1132                              sizeof(*data)) &&
1133                    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1134                        return pos;
1135
1136        return NULL;
1137}
1138
1139/**
1140 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1141 *
1142 * @bp:         device handle
1143 * @qo:         bnx2x_qable_obj
1144 * @elem:       bnx2x_exeq_elem
1145 *
1146 * Checks that the requested configuration can be added. If yes and if
1147 * requested, consume CAM credit.
1148 *
1149 * The 'validate' is run after the 'optimize'.
1150 *
1151 */
1152static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1153                                              union bnx2x_qable_obj *qo,
1154                                              struct bnx2x_exeq_elem *elem)
1155{
1156        struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1157        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1158        int rc;
1159
1160        /* Check the registry */
1161        rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1162        if (rc) {
1163                DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1164                return rc;
1165        }
1166
1167        /* Check if there is a pending ADD command for this
1168         * MAC/VLAN/VLAN-MAC. Return an error if there is.
1169         */
1170        if (exeq->get(exeq, elem)) {
1171                DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1172                return -EEXIST;
1173        }
1174
1175        /* TODO: Check the pending MOVE from other objects where this
1176         * object is a destination object.
1177         */
1178
1179        /* Consume the credit if not requested not to */
1180        if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1181                       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1182            o->get_credit(o)))
1183                return -EINVAL;
1184
1185        return 0;
1186}
1187
1188/**
1189 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1190 *
1191 * @bp:         device handle
1192 * @qo:         quable object to check
1193 * @elem:       element that needs to be deleted
1194 *
1195 * Checks that the requested configuration can be deleted. If yes and if
1196 * requested, returns a CAM credit.
1197 *
1198 * The 'validate' is run after the 'optimize'.
1199 */
1200static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1201                                              union bnx2x_qable_obj *qo,
1202                                              struct bnx2x_exeq_elem *elem)
1203{
1204        struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1205        struct bnx2x_vlan_mac_registry_elem *pos;
1206        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1207        struct bnx2x_exeq_elem query_elem;
1208
1209        /* If this classification can not be deleted (doesn't exist)
1210         * - return a BNX2X_EXIST.
1211         */
1212        pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1213        if (!pos) {
1214                DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1215                return -EEXIST;
1216        }
1217
1218        /* Check if there are pending DEL or MOVE commands for this
1219         * MAC/VLAN/VLAN-MAC. Return an error if so.
1220         */
1221        memcpy(&query_elem, elem, sizeof(query_elem));
1222
1223        /* Check for MOVE commands */
1224        query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1225        if (exeq->get(exeq, &query_elem)) {
1226                BNX2X_ERR("There is a pending MOVE command already\n");
1227                return -EINVAL;
1228        }
1229
1230        /* Check for DEL commands */
1231        if (exeq->get(exeq, elem)) {
1232                DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1233                return -EEXIST;
1234        }
1235
1236        /* Return the credit to the credit pool if not requested not to */
1237        if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1238                       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1239            o->put_credit(o))) {
1240                BNX2X_ERR("Failed to return a credit\n");
1241                return -EINVAL;
1242        }
1243
1244        return 0;
1245}
1246
1247/**
1248 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1249 *
1250 * @bp:         device handle
1251 * @qo:         quable object to check (source)
1252 * @elem:       element that needs to be moved
1253 *
1254 * Checks that the requested configuration can be moved. If yes and if
1255 * requested, returns a CAM credit.
1256 *
1257 * The 'validate' is run after the 'optimize'.
1258 */
1259static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1260                                               union bnx2x_qable_obj *qo,
1261                                               struct bnx2x_exeq_elem *elem)
1262{
1263        struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1264        struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1265        struct bnx2x_exeq_elem query_elem;
1266        struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1267        struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1268
1269        /* Check if we can perform this operation based on the current registry
1270         * state.
1271         */
1272        if (!src_o->check_move(bp, src_o, dest_o,
1273                               &elem->cmd_data.vlan_mac.u)) {
1274                DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1275                return -EINVAL;
1276        }
1277
1278        /* Check if there is an already pending DEL or MOVE command for the
1279         * source object or ADD command for a destination object. Return an
1280         * error if so.
1281         */
1282        memcpy(&query_elem, elem, sizeof(query_elem));
1283
1284        /* Check DEL on source */
1285        query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1286        if (src_exeq->get(src_exeq, &query_elem)) {
1287                BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1288                return -EINVAL;
1289        }
1290
1291        /* Check MOVE on source */
1292        if (src_exeq->get(src_exeq, elem)) {
1293                DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1294                return -EEXIST;
1295        }
1296
1297        /* Check ADD on destination */
1298        query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1299        if (dest_exeq->get(dest_exeq, &query_elem)) {
1300                BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1301                return -EINVAL;
1302        }
1303
1304        /* Consume the credit if not requested not to */
1305        if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1306                       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1307            dest_o->get_credit(dest_o)))
1308                return -EINVAL;
1309
1310        if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1311                       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1312            src_o->put_credit(src_o))) {
1313                /* return the credit taken from dest... */
1314                dest_o->put_credit(dest_o);
1315                return -EINVAL;
1316        }
1317
1318        return 0;
1319}
1320
1321static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1322                                   union bnx2x_qable_obj *qo,
1323                                   struct bnx2x_exeq_elem *elem)
1324{
1325        switch (elem->cmd_data.vlan_mac.cmd) {
1326        case BNX2X_VLAN_MAC_ADD:
1327                return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1328        case BNX2X_VLAN_MAC_DEL:
1329                return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1330        case BNX2X_VLAN_MAC_MOVE:
1331                return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1332        default:
1333                return -EINVAL;
1334        }
1335}
1336
1337static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1338                                  union bnx2x_qable_obj *qo,
1339                                  struct bnx2x_exeq_elem *elem)
1340{
1341        int rc = 0;
1342
1343        /* If consumption wasn't required, nothing to do */
1344        if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1345                     &elem->cmd_data.vlan_mac.vlan_mac_flags))
1346                return 0;
1347
1348        switch (elem->cmd_data.vlan_mac.cmd) {
1349        case BNX2X_VLAN_MAC_ADD:
1350        case BNX2X_VLAN_MAC_MOVE:
1351                rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1352                break;
1353        case BNX2X_VLAN_MAC_DEL:
1354                rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1355                break;
1356        default:
1357                return -EINVAL;
1358        }
1359
1360        if (rc != true)
1361                return -EINVAL;
1362
1363        return 0;
1364}
1365
1366/**
1367 * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1368 *
1369 * @bp:         device handle
1370 * @o:          bnx2x_vlan_mac_obj
1371 *
1372 */
1373static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1374                               struct bnx2x_vlan_mac_obj *o)
1375{
1376        int cnt = 5000, rc;
1377        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1378        struct bnx2x_raw_obj *raw = &o->raw;
1379
1380        while (cnt--) {
1381                /* Wait for the current command to complete */
1382                rc = raw->wait_comp(bp, raw);
1383                if (rc)
1384                        return rc;
1385
1386                /* Wait until there are no pending commands */
1387                if (!bnx2x_exe_queue_empty(exeq))
1388                        kthread_usleep(1000);
1389                else
1390                        return 0;
1391        }
1392
1393        return -EBUSY;
1394}
1395
1396static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1397                                         struct bnx2x_vlan_mac_obj *o,
1398                                         unsigned long *ramrod_flags)
1399{
1400        int rc = 0;
1401
1402        spin_lock(&o->exe_queue.lock);
1403
1404        DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1405        rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1406
1407        if (rc != 0) {
1408                __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1409
1410                /* Calling function should not diffrentiate between this case
1411                 * and the case in which there is already a pending ramrod
1412                 */
1413                rc = 1;
1414        } else {
1415                rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1416        }
1417        spin_unlock(&o->exe_queue.lock);
1418
1419        return rc;
1420}
1421
1422/**
1423 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1424 *
1425 * @bp:         device handle
1426 * @o:          bnx2x_vlan_mac_obj
1427 * @cqe:
1428 * @cont:       if true schedule next execution chunk
1429 *
1430 */
1431static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1432                                   struct bnx2x_vlan_mac_obj *o,
1433                                   union event_ring_elem *cqe,
1434                                   unsigned long *ramrod_flags)
1435{
1436        struct bnx2x_raw_obj *r = &o->raw;
1437        int rc;
1438
1439        /* Clearing the pending list & raw state should be made
1440         * atomically (as execution flow assumes they represent the same).
1441         */
1442        spin_lock(&o->exe_queue.lock);
1443
1444        /* Reset pending list */
1445        __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1446
1447        /* Clear pending */
1448        r->clear_pending(r);
1449
1450        spin_unlock(&o->exe_queue.lock);
1451
1452        /* If ramrod failed this is most likely a SW bug */
1453        if (cqe->message.error)
1454                return -EINVAL;
1455
1456        /* Run the next bulk of pending commands if requested */
1457        if (test_bit(RAMROD_CONT, ramrod_flags)) {
1458                rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1459
1460                if (rc < 0)
1461                        return rc;
1462        }
1463
1464        /* If there is more work to do return PENDING */
1465        if (!bnx2x_exe_queue_empty(&o->exe_queue))
1466                return 1;
1467
1468        return 0;
1469}
1470
1471/**
1472 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1473 *
1474 * @bp:         device handle
1475 * @o:          bnx2x_qable_obj
1476 * @elem:       bnx2x_exeq_elem
1477 */
1478static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1479                                   union bnx2x_qable_obj *qo,
1480                                   struct bnx2x_exeq_elem *elem)
1481{
1482        struct bnx2x_exeq_elem query, *pos;
1483        struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1484        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1485
1486        memcpy(&query, elem, sizeof(query));
1487
1488        switch (elem->cmd_data.vlan_mac.cmd) {
1489        case BNX2X_VLAN_MAC_ADD:
1490                query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1491                break;
1492        case BNX2X_VLAN_MAC_DEL:
1493                query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1494                break;
1495        default:
1496                /* Don't handle anything other than ADD or DEL */
1497                return 0;
1498        }
1499
1500        /* If we found the appropriate element - delete it */
1501        pos = exeq->get(exeq, &query);
1502        if (pos) {
1503
1504                /* Return the credit of the optimized command */
1505                if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1506                              &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1507                        if ((query.cmd_data.vlan_mac.cmd ==
1508                             BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1509                                BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1510                                return -EINVAL;
1511                        } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1512                                BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1513                                return -EINVAL;
1514                        }
1515                }
1516
1517                DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1518                           (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1519                           "ADD" : "DEL");
1520
1521                list_del(&pos->link);
1522                bnx2x_exe_queue_free_elem(bp, pos);
1523                return 1;
1524        }
1525
1526        return 0;
1527}
1528
1529/**
1530 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1531 *
1532 * @bp:   device handle
1533 * @o:
1534 * @elem:
1535 * @restore:
1536 * @re:
1537 *
1538 * prepare a registry element according to the current command request.
1539 */
1540static inline int bnx2x_vlan_mac_get_registry_elem(
1541        struct bnx2x *bp,
1542        struct bnx2x_vlan_mac_obj *o,
1543        struct bnx2x_exeq_elem *elem,
1544        bool restore,
1545        struct bnx2x_vlan_mac_registry_elem **re)
1546{
1547        enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1548        struct bnx2x_vlan_mac_registry_elem *reg_elem;
1549
1550        /* Allocate a new registry element if needed. */
1551        if (!restore &&
1552            ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1553                reg_elem = kzmalloc(sizeof(*reg_elem), 0);
1554                if (!reg_elem)
1555                        return -ENOMEM;
1556
1557                /* Get a new CAM offset */
1558                if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1559                        /* This shall never happen, because we have checked the
1560                         * CAM availability in the 'validate'.
1561                         */
1562                        warn_on(1);
1563                        kfree(reg_elem);
1564                        return -EINVAL;
1565                }
1566
1567                DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1568
1569                /* Set a VLAN-MAC data */
1570                memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1571                          sizeof(reg_elem->u));
1572
1573                /* Copy the flags (needed for DEL and RESTORE flows) */
1574                reg_elem->vlan_mac_flags =
1575                        elem->cmd_data.vlan_mac.vlan_mac_flags;
1576        } else /* DEL, RESTORE */
1577                reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1578
1579        *re = reg_elem;
1580        return 0;
1581}
1582
1583/**
1584 * bnx2x_execute_vlan_mac - execute vlan mac command
1585 *
1586 * @bp:                 device handle
1587 * @qo:
1588 * @exe_chunk:
1589 * @ramrod_flags:
1590 *
1591 * go and send a ramrod!
1592 */
1593static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1594                                  union bnx2x_qable_obj *qo,
1595                                  struct list_head *exe_chunk,
1596                                  unsigned long *ramrod_flags)
1597{
1598        struct bnx2x_exeq_elem *elem;
1599        struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1600        struct bnx2x_raw_obj *r = &o->raw;
1601        int rc, idx = 0;
1602        bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1603        bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1604        struct bnx2x_vlan_mac_registry_elem *reg_elem;
1605        enum bnx2x_vlan_mac_cmd cmd;
1606
1607        /* If DRIVER_ONLY execution is requested, cleanup a registry
1608         * and exit. Otherwise send a ramrod to FW.
1609         */
1610        if (!drv_only) {
1611                warn_on(r->check_pending(r));
1612
1613                /* Set pending */
1614                r->set_pending(r);
1615
1616                /* Fill the ramrod data */
1617                list_for_each_entry(elem, exe_chunk, link) {
1618                        cmd = elem->cmd_data.vlan_mac.cmd;
1619                        /* We will add to the target object in MOVE command, so
1620                         * change the object for a CAM search.
1621                         */
1622                        if (cmd == BNX2X_VLAN_MAC_MOVE)
1623                                cam_obj = elem->cmd_data.vlan_mac.target_obj;
1624                        else
1625                                cam_obj = o;
1626
1627                        rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1628                                                              elem, restore,
1629                                                              &reg_elem);
1630                        if (rc)
1631                                goto error_exit;
1632
1633                        warn_on(!reg_elem);
1634
1635                        /* Push a new entry into the registry */
1636                        if (!restore &&
1637                            ((cmd == BNX2X_VLAN_MAC_ADD) ||
1638                            (cmd == BNX2X_VLAN_MAC_MOVE)))
1639                                list_add(&reg_elem->link, &cam_obj->head);
1640
1641                        /* Configure a single command in a ramrod data buffer */
1642                        o->set_one_rule(bp, o, elem, idx,
1643                                        reg_elem->cam_offset);
1644
1645                        /* MOVE command consumes 2 entries in the ramrod data */
1646                        if (cmd == BNX2X_VLAN_MAC_MOVE)
1647                                idx += 2;
1648                        else
1649                                idx++;
1650                }
1651
1652                /* No need for an explicit memory barrier here as long we would
1653                 * need to ensure the ordering of writing to the SPQ element
1654                 * and updating of the SPQ producer which involves a memory
1655                 * read and we will have to put a full memory barrier there
1656                 * (inside bnx2x_sp_post()).
1657                 */
1658
1659                rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1660                                   U64_HI(r->rdata_mapping),
1661                                   U64_LO(r->rdata_mapping),
1662                                   ETH_CONNECTION_TYPE);
1663                if (rc)
1664                        goto error_exit;
1665        }
1666
1667        /* Now, when we are done with the ramrod - clean up the registry */
1668        list_for_each_entry(elem, exe_chunk, link) {
1669                cmd = elem->cmd_data.vlan_mac.cmd;
1670                if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1671                    (cmd == BNX2X_VLAN_MAC_MOVE)) {
1672                        reg_elem = o->check_del(bp, o,
1673                                                &elem->cmd_data.vlan_mac.u);
1674
1675                        warn_on(!reg_elem);
1676
1677                        o->put_cam_offset(o, reg_elem->cam_offset);
1678                        list_del(&reg_elem->link);
1679                        kfree(reg_elem);
1680                }
1681        }
1682
1683        if (!drv_only)
1684                return 1;
1685        else
1686                return 0;
1687
1688error_exit:
1689        r->clear_pending(r);
1690
1691        /* Cleanup a registry in case of a failure */
1692        list_for_each_entry(elem, exe_chunk, link) {
1693                cmd = elem->cmd_data.vlan_mac.cmd;
1694
1695                if (cmd == BNX2X_VLAN_MAC_MOVE)
1696                        cam_obj = elem->cmd_data.vlan_mac.target_obj;
1697                else
1698                        cam_obj = o;
1699
1700                /* Delete all newly added above entries */
1701                if (!restore &&
1702                    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1703                    (cmd == BNX2X_VLAN_MAC_MOVE))) {
1704                        reg_elem = o->check_del(bp, cam_obj,
1705                                                &elem->cmd_data.vlan_mac.u);
1706                        if (reg_elem) {
1707                                list_del(&reg_elem->link);
1708                                kfree(reg_elem);
1709                        }
1710                }
1711        }
1712
1713        return rc;
1714}
1715
1716static inline int bnx2x_vlan_mac_push_new_cmd(
1717        struct bnx2x *bp,
1718        struct bnx2x_vlan_mac_ramrod_params *p)
1719{
1720        struct bnx2x_exeq_elem *elem;
1721        struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1722        bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1723
1724        /* Allocate the execution queue element */
1725        elem = bnx2x_exe_queue_alloc_elem(bp);
1726        if (!elem)
1727                return -ENOMEM;
1728
1729        /* Set the command 'length' */
1730        switch (p->user_req.cmd) {
1731        case BNX2X_VLAN_MAC_MOVE:
1732                elem->cmd_len = 2;
1733                break;
1734        default:
1735                elem->cmd_len = 1;
1736        }
1737
1738        /* Fill the object specific info */
1739        memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1740
1741        /* Try to add a new command to the pending list */
1742        return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1743}
1744
1745/**
1746 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1747 *
1748 * @bp:   device handle
1749 * @p:
1750 *
1751 */
1752int bnx2x_config_vlan_mac(struct bnx2x *bp,
1753                           struct bnx2x_vlan_mac_ramrod_params *p)
1754{
1755        int rc = 0;
1756        struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1757        unsigned long *ramrod_flags = &p->ramrod_flags;
1758        bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1759        struct bnx2x_raw_obj *raw = &o->raw;
1760
1761        /*
1762         * Add new elements to the execution list for commands that require it.
1763         */
1764        if (!cont) {
1765                rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1766                if (rc)
1767                        return rc;
1768        }
1769
1770        /* If nothing will be executed further in this iteration we want to
1771         * return PENDING if there are pending commands
1772         */
1773        if (!bnx2x_exe_queue_empty(&o->exe_queue))
1774                rc = 1;
1775
1776        if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1777                DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1778                raw->clear_pending(raw);
1779        }
1780
1781        /* Execute commands if required */
1782        if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1783            test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1784                rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1785                                                   &p->ramrod_flags);
1786                if (rc < 0)
1787                        return rc;
1788        }
1789
1790        /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1791         * then user want to wait until the last command is done.
1792         */
1793        if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1794                /* Wait maximum for the current exe_queue length iterations plus
1795                 * one (for the current pending command).
1796                 */
1797                int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1798
1799                while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1800                       max_iterations--) {
1801
1802                        /* Wait for the current command to complete */
1803                        rc = raw->wait_comp(bp, raw);
1804                        if (rc)
1805                                return rc;
1806
1807                        /* Make a next step */
1808                        rc = __bnx2x_vlan_mac_execute_step(bp,
1809                                                           p->vlan_mac_obj,
1810                                                           &p->ramrod_flags);
1811                        if (rc < 0)
1812                                return rc;
1813                }
1814
1815                return 0;
1816        }
1817
1818        return rc;
1819}
1820
1821/**
1822 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1823 *
1824 * @bp:                 device handle
1825 * @o:
1826 * @vlan_mac_flags:
1827 * @ramrod_flags:       execution flags to be used for this deletion
1828 *
1829 * if the last operation has completed successfully and there are no
1830 * more elements left, positive value if the last operation has completed
1831 * successfully and there are more previously configured elements, negative
1832 * value is current operation has failed.
1833 */
1834static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1835                                  struct bnx2x_vlan_mac_obj *o,
1836                                  unsigned long *vlan_mac_flags,
1837                                  unsigned long *ramrod_flags)
1838{
1839        struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1840        struct bnx2x_vlan_mac_ramrod_params p;
1841        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1842        struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1843        unsigned long flags;
1844        int read_lock;
1845        int rc = 0;
1846
1847        /* Clear pending commands first */
1848
1849        spin_lock(&exeq->lock);
1850
1851        list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1852                flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
1853                if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
1854                    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
1855                        rc = exeq->remove(bp, exeq->owner, exeq_pos);
1856                        if (rc) {
1857                                BNX2X_ERR("Failed to remove command\n");
1858                                spin_unlock(&exeq->lock);
1859                                return rc;
1860                        }
1861                        list_del(&exeq_pos->link);
1862                        bnx2x_exe_queue_free_elem(bp, exeq_pos);
1863                }
1864        }
1865
1866        spin_unlock(&exeq->lock);
1867
1868        /* Prepare a command request */
1869        memset(&p, 0, sizeof(p));
1870        p.vlan_mac_obj = o;
1871        p.ramrod_flags = *ramrod_flags;
1872        p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1873
1874        /* Add all but the last VLAN-MAC to the execution queue without actually
1875         * execution anything.
1876         */
1877        __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1878        __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1879        __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1880
1881        DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
1882        read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
1883        if (read_lock != 0)
1884                return read_lock;
1885
1886        list_for_each_entry(pos, &o->head, link) {
1887                flags = pos->vlan_mac_flags;
1888                if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
1889                    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
1890                        p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1891                        memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1892                        rc = bnx2x_config_vlan_mac(bp, &p);
1893                        if (rc < 0) {
1894                                BNX2X_ERR("Failed to add a new DEL command\n");
1895                                bnx2x_vlan_mac_h_read_unlock(bp, o);
1896                                return rc;
1897                        }
1898                }
1899        }
1900
1901        DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
1902        bnx2x_vlan_mac_h_read_unlock(bp, o);
1903
1904        p.ramrod_flags = *ramrod_flags;
1905        __set_bit(RAMROD_CONT, &p.ramrod_flags);
1906
1907        return bnx2x_config_vlan_mac(bp, &p);
1908}
1909
1910static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw,
1911                                      uint8_t cl_id,
1912        uint32_t cid, uint8_t func_id, void *rdata,
1913                                      dma_addr_t rdata_mapping,
1914                                      int state,
1915        unsigned long *pstate, bnx2x_obj_type type)
1916{
1917        raw->func_id = func_id;
1918        raw->cid = cid;
1919        raw->cl_id = cl_id;
1920        raw->rdata = rdata;
1921        raw->rdata_mapping = rdata_mapping;
1922        raw->state = state;
1923        raw->pstate = pstate;
1924        raw->obj_type = type;
1925        raw->check_pending = bnx2x_raw_check_pending;
1926        raw->clear_pending = bnx2x_raw_clear_pending;
1927        raw->set_pending = bnx2x_raw_set_pending;
1928        raw->wait_comp = bnx2x_raw_wait;
1929}
1930
1931static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1932        uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
1933                                              dma_addr_t rdata_mapping,
1934        int state, unsigned long *pstate, bnx2x_obj_type type,
1935        struct bnx2x_credit_pool_obj *macs_pool,
1936        struct bnx2x_credit_pool_obj *vlans_pool)
1937{
1938        INIT_LIST_HEAD(&o->head);
1939        o->head_reader = 0;
1940        o->head_exe_request = false;
1941        o->saved_ramrod_flags = 0;
1942
1943        o->macs_pool = macs_pool;
1944        o->vlans_pool = vlans_pool;
1945
1946        o->delete_all = bnx2x_vlan_mac_del_all;
1947        o->restore = bnx2x_vlan_mac_restore;
1948        o->complete = bnx2x_complete_vlan_mac;
1949        o->wait = bnx2x_wait_vlan_mac;
1950
1951        bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1952                           state, pstate, type);
1953}
1954
1955void bnx2x_init_mac_obj(struct bnx2x *bp,
1956                        struct bnx2x_vlan_mac_obj *mac_obj,
1957                        uint8_t cl_id, uint32_t cid, uint8_t func_id,
1958                        void *rdata,
1959                        dma_addr_t rdata_mapping, int state,
1960                        unsigned long *pstate, bnx2x_obj_type type,
1961                        struct bnx2x_credit_pool_obj *macs_pool)
1962{
1963        union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1964
1965        bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1966                                   rdata_mapping, state, pstate, type,
1967                                   macs_pool, NULL);
1968
1969        /* CAM credit pool handling */
1970        mac_obj->get_credit = bnx2x_get_credit_mac;
1971        mac_obj->put_credit = bnx2x_put_credit_mac;
1972        mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1973        mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1974
1975        if (CHIP_IS_E1x(bp)) {
1976                mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1977                mac_obj->check_del         = bnx2x_check_mac_del;
1978                mac_obj->check_add         = bnx2x_check_mac_add;
1979                mac_obj->check_move        = bnx2x_check_move_always_err;
1980                mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1981
1982                /* Exe Queue */
1983                bnx2x_exe_queue_init(bp,
1984                                     &mac_obj->exe_queue, 1, qable_obj,
1985                                     bnx2x_validate_vlan_mac,
1986                                     bnx2x_remove_vlan_mac,
1987                                     bnx2x_optimize_vlan_mac,
1988                                     bnx2x_execute_vlan_mac,
1989                                     bnx2x_exeq_get_mac);
1990        } else {
1991                mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1992                mac_obj->check_del         = bnx2x_check_mac_del;
1993                mac_obj->check_add         = bnx2x_check_mac_add;
1994                mac_obj->check_move        = bnx2x_check_move;
1995                mac_obj->ramrod_cmd        =
1996                        RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1997                mac_obj->get_n_elements    = bnx2x_get_n_elements;
1998
1999                /* Exe Queue */
2000                bnx2x_exe_queue_init(bp,
2001                                     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2002                                     qable_obj, bnx2x_validate_vlan_mac,
2003                                     bnx2x_remove_vlan_mac,
2004                                     bnx2x_optimize_vlan_mac,
2005                                     bnx2x_execute_vlan_mac,
2006                                     bnx2x_exeq_get_mac);
2007        }
2008}
2009
2010void bnx2x_init_vlan_obj(struct bnx2x *bp,
2011                         struct bnx2x_vlan_mac_obj *vlan_obj,
2012                         uint8_t cl_id, uint32_t cid, uint8_t func_id,
2013                         void *rdata,
2014                         dma_addr_t rdata_mapping, int state,
2015                         unsigned long *pstate, bnx2x_obj_type type,
2016                         struct bnx2x_credit_pool_obj *vlans_pool)
2017{
2018        union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2019
2020        bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2021                                   rdata_mapping, state, pstate, type, NULL,
2022                                   vlans_pool);
2023
2024        vlan_obj->get_credit = bnx2x_get_credit_vlan;
2025        vlan_obj->put_credit = bnx2x_put_credit_vlan;
2026        vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2027        vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2028
2029        if (CHIP_IS_E1x(bp)) {
2030                BNX2X_ERR("Do not support chips others than E2 and newer\n");
2031                panic("BUG");
2032        } else {
2033                vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2034                vlan_obj->check_del         = bnx2x_check_vlan_del;
2035                vlan_obj->check_add         = bnx2x_check_vlan_add;
2036                vlan_obj->check_move        = bnx2x_check_move;
2037                vlan_obj->ramrod_cmd        =
2038                        RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2039                vlan_obj->get_n_elements    = bnx2x_get_n_elements;
2040
2041                /* Exe Queue */
2042                bnx2x_exe_queue_init(bp,
2043                                     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2044                                     qable_obj, bnx2x_validate_vlan_mac,
2045                                     bnx2x_remove_vlan_mac,
2046                                     bnx2x_optimize_vlan_mac,
2047                                     bnx2x_execute_vlan_mac,
2048                                     bnx2x_exeq_get_vlan);
2049        }
2050}
2051
2052/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2053static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2054                        struct tstorm_eth_mac_filter_config *mac_filters,
2055                        uint16_t pf_id)
2056{
2057        size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2058
2059        uint32_t addr = BAR_TSTRORM_INTMEM +
2060                        TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2061
2062        __storm_memset_struct(bp, addr, size, (uint32_t *)mac_filters);
2063}
2064
2065static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2066                                 struct bnx2x_rx_mode_ramrod_params *p)
2067{
2068        /* update the bp MAC filter structure */
2069        uint32_t mask = (1 << p->cl_id);
2070
2071        struct tstorm_eth_mac_filter_config *mac_filters =
2072                (struct tstorm_eth_mac_filter_config *)p->rdata;
2073
2074        /* initial setting is drop-all */
2075        uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2076        uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2077        uint8_t unmatched_unicast = 0;
2078
2079    /* In e1x there we only take into account rx accept flag since tx switching
2080     * isn't enabled. */
2081        if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2082                /* accept matched ucast */
2083                drop_all_ucast = 0;
2084
2085        if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2086                /* accept matched mcast */
2087                drop_all_mcast = 0;
2088
2089        if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2090                /* accept all mcast */
2091                drop_all_ucast = 0;
2092                accp_all_ucast = 1;
2093        }
2094        if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2095                /* accept all mcast */
2096                drop_all_mcast = 0;
2097                accp_all_mcast = 1;
2098        }
2099        if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2100                /* accept (all) bcast */
2101                accp_all_bcast = 1;
2102        if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2103                /* accept unmatched unicasts */
2104                unmatched_unicast = 1;
2105
2106        mac_filters->ucast_drop_all = drop_all_ucast ?
2107                mac_filters->ucast_drop_all | mask :
2108                mac_filters->ucast_drop_all & ~mask;
2109
2110        mac_filters->mcast_drop_all = drop_all_mcast ?
2111                mac_filters->mcast_drop_all | mask :
2112                mac_filters->mcast_drop_all & ~mask;
2113
2114        mac_filters->ucast_accept_all = accp_all_ucast ?
2115                mac_filters->ucast_accept_all | mask :
2116                mac_filters->ucast_accept_all & ~mask;
2117
2118        mac_filters->mcast_accept_all = accp_all_mcast ?
2119                mac_filters->mcast_accept_all | mask :
2120                mac_filters->mcast_accept_all & ~mask;
2121
2122        mac_filters->bcast_accept_all = accp_all_bcast ?
2123                mac_filters->bcast_accept_all | mask :
2124                mac_filters->bcast_accept_all & ~mask;
2125
2126        mac_filters->unmatched_unicast = unmatched_unicast ?
2127                mac_filters->unmatched_unicast | mask :
2128                mac_filters->unmatched_unicast & ~mask;
2129
2130        DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2131                         "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2132           mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2133           mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2134           mac_filters->bcast_accept_all);
2135
2136        /* write the MAC filter structure*/
2137        __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2138
2139        /* The operation is completed */
2140        clear_bit(p->state, p->pstate);
2141        cmb();
2142
2143        return 0;
2144}
2145
2146/* Setup ramrod data */
2147static inline void bnx2x_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2148                                struct eth_classify_header *hdr,
2149                                uint8_t rule_cnt)
2150{
2151        hdr->echo = cpu_to_le32(cid);
2152        hdr->rule_cnt = rule_cnt;
2153}
2154
2155static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2156                                unsigned long *accept_flags,
2157                                struct eth_filter_rules_cmd *cmd,
2158                                bool clear_accept_all)
2159{
2160        uint16_t state;
2161
2162        /* start with 'drop-all' */
2163        state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2164                ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2165
2166        if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2167                state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2168
2169        if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2170                state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2171
2172        if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2173                state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2174                state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2175        }
2176
2177        if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2178                state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2179                state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2180        }
2181
2182        if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2183                state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2184
2185        if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2186                state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2187                state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2188        }
2189
2190        if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2191                state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2192
2193        /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2194        if (clear_accept_all) {
2195                state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2196                state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2197                state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2198                state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2199        }
2200
2201        cmd->state = cpu_to_le16(state);
2202}
2203
2204static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2205                                struct bnx2x_rx_mode_ramrod_params *p)
2206{
2207        struct eth_filter_rules_ramrod_data *data = p->rdata;
2208        int rc;
2209        uint8_t rule_idx = 0;
2210
2211        /* Reset the ramrod data buffer */
2212        memset(data, 0, sizeof(*data));
2213
2214        /* Setup ramrod data */
2215
2216        /* Tx (internal switching) */
2217        if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2218                data->rules[rule_idx].client_id = p->cl_id;
2219                data->rules[rule_idx].func_id = p->func_id;
2220
2221                data->rules[rule_idx].cmd_general_data =
2222                        ETH_FILTER_RULES_CMD_TX_CMD;
2223
2224                bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2225                                               &(data->rules[rule_idx++]),
2226                                               false);
2227        }
2228
2229        /* Rx */
2230        if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2231                data->rules[rule_idx].client_id = p->cl_id;
2232                data->rules[rule_idx].func_id = p->func_id;
2233
2234                data->rules[rule_idx].cmd_general_data =
2235                        ETH_FILTER_RULES_CMD_RX_CMD;
2236
2237                bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2238                                               &(data->rules[rule_idx++]),
2239                                               false);
2240        }
2241
2242        /* If FCoE Queue configuration has been requested configure the Rx and
2243         * internal switching modes for this queue in separate rules.
2244         *
2245         * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2246         * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2247         */
2248        if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2249                /*  Tx (internal switching) */
2250                if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2251                        data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2252                        data->rules[rule_idx].func_id = p->func_id;
2253
2254                        data->rules[rule_idx].cmd_general_data =
2255                                                ETH_FILTER_RULES_CMD_TX_CMD;
2256
2257                        bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2258                                                       &(data->rules[rule_idx]),
2259                                                       true);
2260                        rule_idx++;
2261                }
2262
2263                /* Rx */
2264                if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2265                        data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2266                        data->rules[rule_idx].func_id = p->func_id;
2267
2268                        data->rules[rule_idx].cmd_general_data =
2269                                                ETH_FILTER_RULES_CMD_RX_CMD;
2270
2271                        bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2272                                                       &(data->rules[rule_idx]),
2273                                                       true);
2274                        rule_idx++;
2275                }
2276        }
2277
2278        /* Set the ramrod header (most importantly - number of rules to
2279         * configure).
2280         */
2281        bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2282
2283        DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2284                         data->header.rule_cnt, p->rx_accept_flags,
2285                         p->tx_accept_flags);
2286
2287        /* No need for an explicit memory barrier here as long as we
2288         * ensure the ordering of writing to the SPQ element
2289         * and updating of the SPQ producer which involves a memory
2290         * read. If the memory read is removed we will have to put a
2291         * full memory barrier there (inside bnx2x_sp_post()).
2292         */
2293
2294        /* Send a ramrod */
2295        rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2296                           U64_HI(p->rdata_mapping),
2297                           U64_LO(p->rdata_mapping),
2298                           ETH_CONNECTION_TYPE);
2299        if (rc)
2300                return rc;
2301
2302        /* Ramrod completion is pending */
2303        return 1;
2304}
2305
2306static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2307                                      struct bnx2x_rx_mode_ramrod_params *p)
2308{
2309        return bnx2x_state_wait(bp, p->state, p->pstate);
2310}
2311
2312static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2313                                    struct bnx2x_rx_mode_ramrod_params *p)
2314{
2315        /* Do nothing */
2316        return 0;
2317}
2318
2319int bnx2x_config_rx_mode(struct bnx2x *bp,
2320                         struct bnx2x_rx_mode_ramrod_params *p)
2321{
2322        int rc;
2323
2324        /* Configure the new classification in the chip */
2325        rc = p->rx_mode_obj->config_rx_mode(bp, p);
2326        if (rc < 0)
2327                return rc;
2328
2329        /* Wait for a ramrod completion if was requested */
2330        if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2331                rc = p->rx_mode_obj->wait_comp(bp, p);
2332                if (rc)
2333                        return rc;
2334        }
2335
2336        return rc;
2337}
2338
2339void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2340                            struct bnx2x_rx_mode_obj *o)
2341{
2342        if (CHIP_IS_E1x(bp)) {
2343                o->wait_comp      = bnx2x_empty_rx_mode_wait;
2344                o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2345        } else {
2346                o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2347                o->config_rx_mode = bnx2x_set_rx_mode_e2;
2348        }
2349}
2350
2351/********************* Multicast verbs: SET, CLEAR ****************************/
2352static inline uint8_t bnx2x_mcast_bin_from_mac(uint8_t *mac)
2353{
2354panic("Not implemented");
2355#if 0 // AKAROS_PORT
2356        return (crc32c_le(0, mac, Eaddrlen) >> 24) & 0xff;
2357#endif
2358}
2359
2360struct bnx2x_mcast_mac_elem {
2361        struct list_head link;
2362        uint8_t mac[Eaddrlen];
2363        uint8_t pad[2]; /* For a natural alignment of the following buffer */
2364};
2365
2366struct bnx2x_pending_mcast_cmd {
2367        struct list_head link;
2368        int type; /* BNX2X_MCAST_CMD_X */
2369        union {
2370                struct list_head macs_head;
2371                uint32_t macs_num; /* Needed for DEL command */
2372                int next_bin; /* Needed for RESTORE flow with aprox match */
2373        } data;
2374
2375        bool done; /* set to true, when the command has been handled,
2376                    * practically used in 57712 handling only, where one pending
2377                    * command may be handled in a few operations. As long as for
2378                    * other chips every operation handling is completed in a
2379                    * single ramrod, there is no need to utilize this field.
2380                    */
2381};
2382
2383static int bnx2x_mcast_wait(struct bnx2x *bp,
2384                            struct bnx2x_mcast_obj *o)
2385{
2386        if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2387                        o->raw.wait_comp(bp, &o->raw))
2388                return -EBUSY;
2389
2390        return 0;
2391}
2392
2393static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2394                                   struct bnx2x_mcast_obj *o,
2395                                   struct bnx2x_mcast_ramrod_params *p,
2396                                   enum bnx2x_mcast_cmd cmd)
2397{
2398        int total_sz;
2399        struct bnx2x_pending_mcast_cmd *new_cmd;
2400        struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2401        struct bnx2x_mcast_list_elem *pos;
2402        int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2403                             p->mcast_list_len : 0);
2404
2405        /* If the command is empty ("handle pending commands only"), break */
2406        if (!p->mcast_list_len)
2407                return 0;
2408
2409        total_sz = sizeof(*new_cmd) +
2410                macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2411
2412        /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2413        new_cmd = kzmalloc(total_sz, 0);
2414
2415        if (!new_cmd)
2416                return -ENOMEM;
2417
2418        DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2419           cmd, macs_list_len);
2420
2421        INIT_LIST_HEAD(&new_cmd->data.macs_head);
2422
2423        new_cmd->type = cmd;
2424        new_cmd->done = false;
2425
2426        switch (cmd) {
2427        case BNX2X_MCAST_CMD_ADD:
2428                cur_mac = (struct bnx2x_mcast_mac_elem *)
2429                          ((uint8_t *)new_cmd + sizeof(*new_cmd));
2430
2431                /* Push the MACs of the current command into the pending command
2432                 * MACs list: FIFO
2433                 */
2434                list_for_each_entry(pos, &p->mcast_list, link) {
2435                        memcpy(cur_mac->mac, pos->mac, Eaddrlen);
2436                        list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2437                        cur_mac++;
2438                }
2439
2440                break;
2441
2442        case BNX2X_MCAST_CMD_DEL:
2443                new_cmd->data.macs_num = p->mcast_list_len;
2444                break;
2445
2446        case BNX2X_MCAST_CMD_RESTORE:
2447                new_cmd->data.next_bin = 0;
2448                break;
2449
2450        default:
2451                kfree(new_cmd);
2452                BNX2X_ERR("Unknown command: %d\n", cmd);
2453                return -EINVAL;
2454        }
2455
2456        /* Push the new pending command to the tail of the pending list: FIFO */
2457        list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2458
2459        o->set_sched(o);
2460
2461        return 1;
2462}
2463
2464/**
2465 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2466 *
2467 * @o:
2468 * @last:       index to start looking from (including)
2469 *
2470 * returns the next found (set) bin or a negative value if none is found.
2471 */
2472static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2473{
2474        int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2475
2476        for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2477                if (o->registry.aprox_match.vec[i])
2478                        for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2479                                int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2480                                if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2481                                                       vec, cur_bit)) {
2482                                        return cur_bit;
2483                                }
2484                        }
2485                inner_start = 0;
2486        }
2487
2488        /* None found */
2489        return -1;
2490}
2491
2492/**
2493 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2494 *
2495 * @o:
2496 *
2497 * returns the index of the found bin or -1 if none is found
2498 */
2499static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2500{
2501        int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2502
2503        if (cur_bit >= 0)
2504                BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2505
2506        return cur_bit;
2507}
2508
2509static inline uint8_t bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2510{
2511        struct bnx2x_raw_obj *raw = &o->raw;
2512        uint8_t rx_tx_flag = 0;
2513
2514        if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2515            (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2516                rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2517
2518        if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2519            (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2520                rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2521
2522        return rx_tx_flag;
2523}
2524
2525static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2526                                        struct bnx2x_mcast_obj *o, int idx,
2527                                        union bnx2x_mcast_config_data *cfg_data,
2528                                        enum bnx2x_mcast_cmd cmd)
2529{
2530        struct bnx2x_raw_obj *r = &o->raw;
2531        struct eth_multicast_rules_ramrod_data *data =
2532                (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2533        uint8_t func_id = r->func_id;
2534        uint8_t rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2535        int bin;
2536
2537        if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2538                rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2539
2540        data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2541
2542        /* Get a bin and update a bins' vector */
2543        switch (cmd) {
2544        case BNX2X_MCAST_CMD_ADD:
2545                bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2546                BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2547                break;
2548
2549        case BNX2X_MCAST_CMD_DEL:
2550                /* If there were no more bins to clear
2551                 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2552                 * clear any (0xff) bin.
2553                 * See bnx2x_mcast_validate_e2() for explanation when it may
2554                 * happen.
2555                 */
2556                bin = bnx2x_mcast_clear_first_bin(o);
2557                break;
2558
2559        case BNX2X_MCAST_CMD_RESTORE:
2560                bin = cfg_data->bin;
2561                break;
2562
2563        default:
2564                BNX2X_ERR("Unknown command: %d\n", cmd);
2565                return;
2566        }
2567
2568        DP(BNX2X_MSG_SP, "%s bin %d\n",
2569                         ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2570                         "Setting"  : "Clearing"), bin);
2571
2572        data->rules[idx].bin_id    = (uint8_t)bin;
2573        data->rules[idx].func_id   = func_id;
2574        data->rules[idx].engine_id = o->engine_id;
2575}
2576
2577/**
2578 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2579 *
2580 * @bp:         device handle
2581 * @o:
2582 * @start_bin:  index in the registry to start from (including)
2583 * @rdata_idx:  index in the ramrod data to start from
2584 *
2585 * returns last handled bin index or -1 if all bins have been handled
2586 */
2587static inline int bnx2x_mcast_handle_restore_cmd_e2(
2588        struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2589        int *rdata_idx)
2590{
2591        int cur_bin, cnt = *rdata_idx;
2592        union bnx2x_mcast_config_data cfg_data = {NULL};
2593
2594        /* go through the registry and configure the bins from it */
2595        for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2596            cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2597
2598                cfg_data.bin = (uint8_t)cur_bin;
2599                o->set_one_rule(bp, o, cnt, &cfg_data,
2600                                BNX2X_MCAST_CMD_RESTORE);
2601
2602                cnt++;
2603
2604                DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2605
2606                /* Break if we reached the maximum number
2607                 * of rules.
2608                 */
2609                if (cnt >= o->max_cmd_len)
2610                        break;
2611        }
2612
2613        *rdata_idx = cnt;
2614
2615        return cur_bin;
2616}
2617
2618static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2619        struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2620        int *line_idx)
2621{
2622        struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2623        int cnt = *line_idx;
2624        union bnx2x_mcast_config_data cfg_data = {NULL};
2625
2626        list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2627                                 link) {
2628
2629                cfg_data.mac = &pmac_pos->mac[0];
2630                o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2631
2632                cnt++;
2633
2634                DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2635                   pmac_pos->mac);
2636
2637                list_del(&pmac_pos->link);
2638
2639                /* Break if we reached the maximum number
2640                 * of rules.
2641                 */
2642                if (cnt >= o->max_cmd_len)
2643                        break;
2644        }
2645
2646        *line_idx = cnt;
2647
2648        /* if no more MACs to configure - we are done */
2649        if (list_empty(&cmd_pos->data.macs_head))
2650                cmd_pos->done = true;
2651}
2652
2653static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2654        struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2655        int *line_idx)
2656{
2657        int cnt = *line_idx;
2658
2659        while (cmd_pos->data.macs_num) {
2660                o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2661
2662                cnt++;
2663
2664                cmd_pos->data.macs_num--;
2665
2666                  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2667                                   cmd_pos->data.macs_num, cnt);
2668
2669                /* Break if we reached the maximum
2670                 * number of rules.
2671                 */
2672                if (cnt >= o->max_cmd_len)
2673                        break;
2674        }
2675
2676        *line_idx = cnt;
2677
2678        /* If we cleared all bins - we are done */
2679        if (!cmd_pos->data.macs_num)
2680                cmd_pos->done = true;
2681}
2682
2683static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2684        struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2685        int *line_idx)
2686{
2687        cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2688                                                line_idx);
2689
2690        if (cmd_pos->data.next_bin < 0)
2691                /* If o->set_restore returned -1 we are done */
2692                cmd_pos->done = true;
2693        else
2694                /* Start from the next bin next time */
2695                cmd_pos->data.next_bin++;
2696}
2697
2698static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2699                                struct bnx2x_mcast_ramrod_params *p)
2700{
2701        struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2702        int cnt = 0;
2703        struct bnx2x_mcast_obj *o = p->mcast_obj;
2704
2705        list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2706                                 link) {
2707                switch (cmd_pos->type) {
2708                case BNX2X_MCAST_CMD_ADD:
2709                        bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2710                        break;
2711
2712                case BNX2X_MCAST_CMD_DEL:
2713                        bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2714                        break;
2715
2716                case BNX2X_MCAST_CMD_RESTORE:
2717                        bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2718                                                           &cnt);
2719                        break;
2720
2721                default:
2722                        BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2723                        return -EINVAL;
2724                }
2725
2726                /* If the command has been completed - remove it from the list
2727                 * and free the memory
2728                 */
2729                if (cmd_pos->done) {
2730                        list_del(&cmd_pos->link);
2731                        kfree(cmd_pos);
2732                }
2733
2734                /* Break if we reached the maximum number of rules */
2735                if (cnt >= o->max_cmd_len)
2736                        break;
2737        }
2738
2739        return cnt;
2740}
2741
2742static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2743        struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2744        int *line_idx)
2745{
2746        struct bnx2x_mcast_list_elem *mlist_pos;
2747        union bnx2x_mcast_config_data cfg_data = {NULL};
2748        int cnt = *line_idx;
2749
2750        list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2751                cfg_data.mac = mlist_pos->mac;
2752                o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2753
2754                cnt++;
2755
2756                DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2757                   mlist_pos->mac);
2758        }
2759
2760        *line_idx = cnt;
2761}
2762
2763static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2764        struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2765        int *line_idx)
2766{
2767        int cnt = *line_idx, i;
2768
2769        for (i = 0; i < p->mcast_list_len; i++) {
2770                o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2771
2772                cnt++;
2773
2774                DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2775                                 p->mcast_list_len - i - 1);
2776        }
2777
2778        *line_idx = cnt;
2779}
2780
2781/**
2782 * bnx2x_mcast_handle_current_cmd -
2783 *
2784 * @bp:         device handle
2785 * @p:
2786 * @cmd:
2787 * @start_cnt:  first line in the ramrod data that may be used
2788 *
2789 * This function is called iff there is enough place for the current command in
2790 * the ramrod data.
2791 * Returns number of lines filled in the ramrod data in total.
2792 */
2793static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2794                        struct bnx2x_mcast_ramrod_params *p,
2795                        enum bnx2x_mcast_cmd cmd,
2796                        int start_cnt)
2797{
2798        struct bnx2x_mcast_obj *o = p->mcast_obj;
2799        int cnt = start_cnt;
2800
2801        DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2802
2803        switch (cmd) {
2804        case BNX2X_MCAST_CMD_ADD:
2805                bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2806                break;
2807
2808        case BNX2X_MCAST_CMD_DEL:
2809                bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2810                break;
2811
2812        case BNX2X_MCAST_CMD_RESTORE:
2813                o->hdl_restore(bp, o, 0, &cnt);
2814                break;
2815
2816        default:
2817                BNX2X_ERR("Unknown command: %d\n", cmd);
2818                return -EINVAL;
2819        }
2820
2821        /* The current command has been handled */
2822        p->mcast_list_len = 0;
2823
2824        return cnt;
2825}
2826
2827static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2828                                   struct bnx2x_mcast_ramrod_params *p,
2829                                   enum bnx2x_mcast_cmd cmd)
2830{
2831        struct bnx2x_mcast_obj *o = p->mcast_obj;
2832        int reg_sz = o->get_registry_size(o);
2833
2834        switch (cmd) {
2835        /* DEL command deletes all currently configured MACs */
2836        case BNX2X_MCAST_CMD_DEL:
2837                o->set_registry_size(o, 0);
2838                /* Don't break */
2839
2840        /* RESTORE command will restore the entire multicast configuration */
2841        case BNX2X_MCAST_CMD_RESTORE:
2842                /* Here we set the approximate amount of work to do, which in
2843                 * fact may be only less as some MACs in postponed ADD
2844                 * command(s) scheduled before this command may fall into
2845                 * the same bin and the actual number of bins set in the
2846                 * registry would be less than we estimated here. See
2847                 * bnx2x_mcast_set_one_rule_e2() for further details.
2848                 */
2849                p->mcast_list_len = reg_sz;
2850                break;
2851
2852        case BNX2X_MCAST_CMD_ADD:
2853        case BNX2X_MCAST_CMD_CONT:
2854                /* Here we assume that all new MACs will fall into new bins.
2855                 * However we will correct the real registry size after we
2856                 * handle all pending commands.
2857                 */
2858                o->set_registry_size(o, reg_sz + p->mcast_list_len);
2859                break;
2860
2861        default:
2862                BNX2X_ERR("Unknown command: %d\n", cmd);
2863                return -EINVAL;
2864        }
2865
2866        /* Increase the total number of MACs pending to be configured */
2867        o->total_pending_num += p->mcast_list_len;
2868
2869        return 0;
2870}
2871
2872static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2873                                      struct bnx2x_mcast_ramrod_params *p,
2874                                      int old_num_bins)
2875{
2876        struct bnx2x_mcast_obj *o = p->mcast_obj;
2877
2878        o->set_registry_size(o, old_num_bins);
2879        o->total_pending_num -= p->mcast_list_len;
2880}
2881
2882/**
2883 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2884 *
2885 * @bp:         device handle
2886 * @p:
2887 * @len:        number of rules to handle
2888 */
2889static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2890                                        struct bnx2x_mcast_ramrod_params *p,
2891                                        uint8_t len)
2892{
2893        struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2894        struct eth_multicast_rules_ramrod_data *data =
2895                (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2896
2897        data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2898                                        (BNX2X_FILTER_MCAST_PENDING <<
2899                                         BNX2X_SWCID_SHIFT));
2900        data->header.rule_cnt = len;
2901}
2902
2903/**
2904 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2905 *
2906 * @bp:         device handle
2907 * @o:
2908 *
2909 * Recalculate the actual number of set bins in the registry using Brian
2910 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2911 *
2912 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2913 */
2914static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2915                                                  struct bnx2x_mcast_obj *o)
2916{
2917        int i, cnt = 0;
2918        uint64_t elem;
2919
2920        for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2921                elem = o->registry.aprox_match.vec[i];
2922                for (; elem; cnt++)
2923                        elem &= elem - 1;
2924        }
2925
2926        o->set_registry_size(o, cnt);
2927
2928        return 0;
2929}
2930
2931static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2932                                struct bnx2x_mcast_ramrod_params *p,
2933                                enum bnx2x_mcast_cmd cmd)
2934{
2935        struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2936        struct bnx2x_mcast_obj *o = p->mcast_obj;
2937        struct eth_multicast_rules_ramrod_data *data =
2938                (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2939        int cnt = 0, rc;
2940
2941        /* Reset the ramrod data buffer */
2942        memset(data, 0, sizeof(*data));
2943
2944        cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2945
2946        /* If there are no more pending commands - clear SCHEDULED state */
2947        if (list_empty(&o->pending_cmds_head))
2948                o->clear_sched(o);
2949
2950        /* The below may be true iff there was enough room in ramrod
2951         * data for all pending commands and for the current
2952         * command. Otherwise the current command would have been added
2953         * to the pending commands and p->mcast_list_len would have been
2954         * zeroed.
2955         */
2956        if (p->mcast_list_len > 0)
2957                cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2958
2959        /* We've pulled out some MACs - update the total number of
2960         * outstanding.
2961         */
2962        o->total_pending_num -= cnt;
2963
2964        /* send a ramrod */
2965        warn_on(o->total_pending_num < 0);
2966        warn_on(cnt > o->max_cmd_len);
2967
2968        bnx2x_mcast_set_rdata_hdr_e2(bp, p, (uint8_t)cnt);
2969
2970        /* Update a registry size if there are no more pending operations.
2971         *
2972         * We don't want to change the value of the registry size if there are
2973         * pending operations because we want it to always be equal to the
2974         * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2975         * set bins after the last requested operation in order to properly
2976         * evaluate the size of the next DEL/RESTORE operation.
2977         *
2978         * Note that we update the registry itself during command(s) handling
2979         * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2980         * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2981         * with a limited amount of update commands (per MAC/bin) and we don't
2982         * know in this scope what the actual state of bins configuration is
2983         * going to be after this ramrod.
2984         */
2985        if (!o->total_pending_num)
2986                bnx2x_mcast_refresh_registry_e2(bp, o);
2987
2988        /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2989         * RAMROD_PENDING status immediately.
2990         */
2991        if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2992                raw->clear_pending(raw);
2993                return 0;
2994        } else {
2995                /* No need for an explicit memory barrier here as long as we
2996                 * ensure the ordering of writing to the SPQ element
2997                 * and updating of the SPQ producer which involves a memory
2998                 * read. If the memory read is removed we will have to put a
2999                 * full memory barrier there (inside bnx2x_sp_post()).
3000                 */
3001
3002                /* Send a ramrod */
3003                rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3004                                   raw->cid, U64_HI(raw->rdata_mapping),
3005                                   U64_LO(raw->rdata_mapping),
3006                                   ETH_CONNECTION_TYPE);
3007                if (rc)
3008                        return rc;
3009
3010                /* Ramrod completion is pending */
3011                return 1;
3012        }
3013}
3014
3015static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3016                                    struct bnx2x_mcast_ramrod_params *p,
3017                                    enum bnx2x_mcast_cmd cmd)
3018{
3019        /* Mark, that there is a work to do */
3020        if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3021                p->mcast_list_len = 1;
3022
3023        return 0;
3024}
3025
3026static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3027                                       struct bnx2x_mcast_ramrod_params *p,
3028                                       int old_num_bins)
3029{
3030        /* Do nothing */
3031}
3032
3033#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3034do { \
3035        (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3036} while (0)
3037
3038static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3039                                           struct bnx2x_mcast_obj *o,
3040                                           struct bnx2x_mcast_ramrod_params *p,
3041                                           uint32_t *mc_filter)
3042{
3043        struct bnx2x_mcast_list_elem *mlist_pos;
3044        int bit;
3045
3046        list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3047                bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3048                BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3049
3050                DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3051                   mlist_pos->mac, bit);
3052
3053                /* bookkeeping... */
3054                BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3055                                  bit);
3056        }
3057}
3058
3059static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3060        struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3061        uint32_t *mc_filter)
3062{
3063        int bit;
3064
3065        for (bit = bnx2x_mcast_get_next_bin(o, 0);
3066             bit >= 0;
3067             bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3068                BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3069                DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3070        }
3071}
3072
3073/* On 57711 we write the multicast MACs' approximate match
3074 * table by directly into the TSTORM's internal RAM. So we don't
3075 * really need to handle any tricks to make it work.
3076 */
3077static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3078                                 struct bnx2x_mcast_ramrod_params *p,
3079                                 enum bnx2x_mcast_cmd cmd)
3080{
3081        int i;
3082        struct bnx2x_mcast_obj *o = p->mcast_obj;
3083        struct bnx2x_raw_obj *r = &o->raw;
3084
3085        /* If CLEAR_ONLY has been requested - clear the registry
3086         * and clear a pending bit.
3087         */
3088        if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3089                uint32_t mc_filter[MC_HASH_SIZE] = {0};
3090
3091                /* Set the multicast filter bits before writing it into
3092                 * the internal memory.
3093                 */
3094                switch (cmd) {
3095                case BNX2X_MCAST_CMD_ADD:
3096                        bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3097                        break;
3098
3099                case BNX2X_MCAST_CMD_DEL:
3100                        DP(BNX2X_MSG_SP,
3101                           "Invalidating multicast MACs configuration\n");
3102
3103                        /* clear the registry */
3104                        memset(o->registry.aprox_match.vec, 0,
3105                               sizeof(o->registry.aprox_match.vec));
3106                        break;
3107
3108                case BNX2X_MCAST_CMD_RESTORE:
3109                        bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3110                        break;
3111
3112                default:
3113                        BNX2X_ERR("Unknown command: %d\n", cmd);
3114                        return -EINVAL;
3115                }
3116
3117                /* Set the mcast filter in the internal memory */
3118                for (i = 0; i < MC_HASH_SIZE; i++)
3119                        REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3120        } else
3121                /* clear the registry */
3122                memset(o->registry.aprox_match.vec, 0,
3123                       sizeof(o->registry.aprox_match.vec));
3124
3125        /* We are done */
3126        r->clear_pending(r);
3127
3128        return 0;
3129}
3130
3131static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3132                                   struct bnx2x_mcast_ramrod_params *p,
3133                                   enum bnx2x_mcast_cmd cmd)
3134{
3135        struct bnx2x_mcast_obj *o = p->mcast_obj;
3136        int reg_sz = o->get_registry_size(o);
3137
3138        switch (cmd) {
3139        /* DEL command deletes all currently configured MACs */
3140        case BNX2X_MCAST_CMD_DEL:
3141                o->set_registry_size(o, 0);
3142                /* Don't break */
3143
3144        /* RESTORE command will restore the entire multicast configuration */
3145        case BNX2X_MCAST_CMD_RESTORE:
3146                p->mcast_list_len = reg_sz;
3147                  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3148                                   cmd, p->mcast_list_len);
3149                break;
3150
3151        case BNX2X_MCAST_CMD_ADD:
3152        case BNX2X_MCAST_CMD_CONT:
3153                /* Multicast MACs on 57710 are configured as unicast MACs and
3154                 * there is only a limited number of CAM entries for that
3155                 * matter.
3156                 */
3157                if (p->mcast_list_len > o->max_cmd_len) {
3158                        BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3159                                  o->max_cmd_len);
3160                        return -EINVAL;
3161                }
3162                /* Every configured MAC should be cleared if DEL command is
3163                 * called. Only the last ADD command is relevant as long as
3164                 * every ADD commands overrides the previous configuration.
3165                 */
3166                DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3167                if (p->mcast_list_len > 0)
3168                        o->set_registry_size(o, p->mcast_list_len);
3169
3170                break;
3171
3172        default:
3173                BNX2X_ERR("Unknown command: %d\n", cmd);
3174                return -EINVAL;
3175        }
3176
3177        /* We want to ensure that commands are executed one by one for 57710.
3178         * Therefore each none-empty command will consume o->max_cmd_len.
3179         */
3180        if (p->mcast_list_len)
3181                o->total_pending_num += o->max_cmd_len;
3182
3183        return 0;
3184}
3185
3186static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3187                                      struct bnx2x_mcast_ramrod_params *p,
3188                                      int old_num_macs)
3189{
3190        struct bnx2x_mcast_obj *o = p->mcast_obj;
3191
3192        o->set_registry_size(o, old_num_macs);
3193
3194        /* If current command hasn't been handled yet and we are
3195         * here means that it's meant to be dropped and we have to
3196         * update the number of outstanding MACs accordingly.
3197         */
3198        if (p->mcast_list_len)
3199                o->total_pending_num -= o->max_cmd_len;
3200}
3201
3202static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3203                                        struct bnx2x_mcast_obj *o, int idx,
3204                                        union bnx2x_mcast_config_data *cfg_data,
3205                                        enum bnx2x_mcast_cmd cmd)
3206{
3207        struct bnx2x_raw_obj *r = &o->raw;
3208        struct mac_configuration_cmd *data =
3209                (struct mac_configuration_cmd *)(r->rdata);
3210
3211        /* copy mac */
3212        if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3213                bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3214                                      &data->config_table[idx].middle_mac_addr,
3215                                      &data->config_table[idx].lsb_mac_addr,
3216                                      cfg_data->mac);
3217
3218                data->config_table[idx].vlan_id = 0;
3219                data->config_table[idx].pf_id = r->func_id;
3220                data->config_table[idx].clients_bit_vector =
3221                        cpu_to_le32(1 << r->cl_id);
3222
3223                SET_FLAG(data->config_table[idx].flags,
3224                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3225                         T_ETH_MAC_COMMAND_SET);
3226        }
3227}
3228
3229/**
3230 * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3231 *
3232 * @bp:         device handle
3233 * @p:
3234 * @len:        number of rules to handle
3235 */
3236static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3237                                        struct bnx2x_mcast_ramrod_params *p,
3238                                        uint8_t len)
3239{
3240        struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3241        struct mac_configuration_cmd *data =
3242                (struct mac_configuration_cmd *)(r->rdata);
3243
3244        uint8_t offset = (CHIP_REV_IS_SLOW(bp) ?
3245                     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3246                     BNX2X_MAX_MULTICAST*(1 + r->func_id));
3247
3248        data->hdr.offset = offset;
3249        data->hdr.client_id = cpu_to_le16(0xff);
3250        data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3251                                     (BNX2X_FILTER_MCAST_PENDING <<
3252                                      BNX2X_SWCID_SHIFT));
3253        data->hdr.length = len;
3254}
3255
3256/**
3257 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3258 *
3259 * @bp:         device handle
3260 * @o:
3261 * @start_idx:  index in the registry to start from
3262 * @rdata_idx:  index in the ramrod data to start from
3263 *
3264 * restore command for 57710 is like all other commands - always a stand alone
3265 * command - start_idx and rdata_idx will always be 0. This function will always
3266 * succeed.
3267 * returns -1 to comply with 57712 variant.
3268 */
3269static inline int bnx2x_mcast_handle_restore_cmd_e1(
3270        struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3271        int *rdata_idx)
3272{
3273        struct bnx2x_mcast_mac_elem *elem;
3274        int i = 0;
3275        union bnx2x_mcast_config_data cfg_data = {NULL};
3276
3277        /* go through the registry and configure the MACs from it. */
3278        list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3279                cfg_data.mac = &elem->mac[0];
3280                o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3281
3282                i++;
3283
3284                  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3285                     cfg_data.mac);
3286        }
3287
3288        *rdata_idx = i;
3289
3290        return -1;
3291}
3292
3293static inline int bnx2x_mcast_handle_pending_cmds_e1(
3294        struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3295{
3296        struct bnx2x_pending_mcast_cmd *cmd_pos;
3297        struct bnx2x_mcast_mac_elem *pmac_pos;
3298        struct bnx2x_mcast_obj *o = p->mcast_obj;
3299        union bnx2x_mcast_config_data cfg_data = {NULL};
3300        int cnt = 0;
3301
3302        /* If nothing to be done - return */
3303        if (list_empty(&o->pending_cmds_head))
3304                return 0;
3305
3306        /* Handle the first command */
3307        cmd_pos = list_first_entry(&o->pending_cmds_head,
3308                                   struct bnx2x_pending_mcast_cmd, link);
3309
3310        switch (cmd_pos->type) {
3311        case BNX2X_MCAST_CMD_ADD:
3312                list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3313                        cfg_data.mac = &pmac_pos->mac[0];
3314                        o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3315
3316                        cnt++;
3317
3318                        DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3319                           pmac_pos->mac);
3320                }
3321                break;
3322
3323        case BNX2X_MCAST_CMD_DEL:
3324                cnt = cmd_pos->data.macs_num;
3325                DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3326                break;
3327
3328        case BNX2X_MCAST_CMD_RESTORE:
3329                o->hdl_restore(bp, o, 0, &cnt);
3330                break;
3331
3332        default:
3333                BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3334                return -EINVAL;
3335        }
3336
3337        list_del(&cmd_pos->link);
3338        kfree(cmd_pos);
3339
3340        return cnt;
3341}
3342
3343/**
3344 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3345 *
3346 * @fw_hi:
3347 * @fw_mid:
3348 * @fw_lo:
3349 * @mac:
3350 */
3351static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3352                                         __le16 *fw_lo, uint8_t *mac)
3353{
3354        mac[1] = ((uint8_t *)fw_hi)[0];
3355        mac[0] = ((uint8_t *)fw_hi)[1];
3356        mac[3] = ((uint8_t *)fw_mid)[0];
3357        mac[2] = ((uint8_t *)fw_mid)[1];
3358        mac[5] = ((uint8_t *)fw_lo)[0];
3359        mac[4] = ((uint8_t *)fw_lo)[1];
3360}
3361
3362/**
3363 * bnx2x_mcast_refresh_registry_e1 -
3364 *
3365 * @bp:         device handle
3366 * @cnt:
3367 *
3368 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3369 * and update the registry correspondingly: if ADD - allocate a memory and add
3370 * the entries to the registry (list), if DELETE - clear the registry and free
3371 * the memory.
3372 */
3373static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3374                                                  struct bnx2x_mcast_obj *o)
3375{
3376        struct bnx2x_raw_obj *raw = &o->raw;
3377        struct bnx2x_mcast_mac_elem *elem;
3378        struct mac_configuration_cmd *data =
3379                        (struct mac_configuration_cmd *)(raw->rdata);
3380
3381        /* If first entry contains a SET bit - the command was ADD,
3382         * otherwise - DEL_ALL
3383         */
3384        if (GET_FLAG(data->config_table[0].flags,
3385                        MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3386                int i, len = data->hdr.length;
3387
3388                /* Break if it was a RESTORE command */
3389                if (!list_empty(&o->registry.exact_match.macs))
3390                        return 0;
3391
3392                elem = kzmalloc((len) * (sizeof(*elem)), 0);
3393                if (!elem) {
3394                        BNX2X_ERR("Failed to allocate registry memory\n");
3395                        return -ENOMEM;
3396                }
3397
3398                for (i = 0; i < len; i++, elem++) {
3399                        bnx2x_get_fw_mac_addr(
3400                                &data->config_table[i].msb_mac_addr,
3401                                &data->config_table[i].middle_mac_addr,
3402                                &data->config_table[i].lsb_mac_addr,
3403                                elem->mac);
3404                        DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3405                           elem->mac);
3406                        list_add_tail(&elem->link,
3407                                      &o->registry.exact_match.macs);
3408                }
3409        } else {
3410                elem = list_first_entry(&o->registry.exact_match.macs,
3411                                        struct bnx2x_mcast_mac_elem, link);
3412                DP(BNX2X_MSG_SP, "Deleting a registry\n");
3413                kfree(elem);
3414                INIT_LIST_HEAD(&o->registry.exact_match.macs);
3415        }
3416
3417        return 0;
3418}
3419
3420static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3421                                struct bnx2x_mcast_ramrod_params *p,
3422                                enum bnx2x_mcast_cmd cmd)
3423{
3424        struct bnx2x_mcast_obj *o = p->mcast_obj;
3425        struct bnx2x_raw_obj *raw = &o->raw;
3426        struct mac_configuration_cmd *data =
3427                (struct mac_configuration_cmd *)(raw->rdata);
3428        int cnt = 0, i, rc;
3429
3430        /* Reset the ramrod data buffer */
3431        memset(data, 0, sizeof(*data));
3432
3433        /* First set all entries as invalid */
3434        for (i = 0; i < o->max_cmd_len ; i++)
3435                SET_FLAG(data->config_table[i].flags,
3436                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3437                         T_ETH_MAC_COMMAND_INVALIDATE);
3438
3439        /* Handle pending commands first */
3440        cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3441
3442        /* If there are no more pending commands - clear SCHEDULED state */
3443        if (list_empty(&o->pending_cmds_head))
3444                o->clear_sched(o);
3445
3446        /* The below may be true iff there were no pending commands */
3447        if (!cnt)
3448                cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3449
3450        /* For 57710 every command has o->max_cmd_len length to ensure that
3451         * commands are done one at a time.
3452         */
3453        o->total_pending_num -= o->max_cmd_len;
3454
3455        /* send a ramrod */
3456
3457        warn_on(cnt > o->max_cmd_len);
3458
3459        /* Set ramrod header (in particular, a number of entries to update) */
3460        bnx2x_mcast_set_rdata_hdr_e1(bp, p, (uint8_t)cnt);
3461
3462        /* update a registry: we need the registry contents to be always up
3463         * to date in order to be able to execute a RESTORE opcode. Here
3464         * we use the fact that for 57710 we sent one command at a time
3465         * hence we may take the registry update out of the command handling
3466         * and do it in a simpler way here.
3467         */
3468        rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3469        if (rc)
3470                return rc;
3471
3472        /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3473         * RAMROD_PENDING status immediately.
3474         */
3475        if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3476                raw->clear_pending(raw);
3477                return 0;
3478        } else {
3479                /* No need for an explicit memory barrier here as long as we
3480                 * ensure the ordering of writing to the SPQ element
3481                 * and updating of the SPQ producer which involves a memory
3482                 * read. If the memory read is removed we will have to put a
3483                 * full memory barrier there (inside bnx2x_sp_post()).
3484                 */
3485
3486                /* Send a ramrod */
3487                rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3488                                   U64_HI(raw->rdata_mapping),
3489                                   U64_LO(raw->rdata_mapping),
3490                                   ETH_CONNECTION_TYPE);
3491                if (rc)
3492                        return rc;
3493
3494                /* Ramrod completion is pending */
3495                return 1;
3496        }
3497}
3498
3499static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3500{
3501        return o->registry.exact_match.num_macs_set;
3502}
3503
3504static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3505{
3506        return o->registry.aprox_match.num_bins_set;
3507}
3508
3509static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3510                                                int n)
3511{
3512        o->registry.exact_match.num_macs_set = n;
3513}
3514
3515static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3516                                                int n)
3517{
3518        o->registry.aprox_match.num_bins_set = n;
3519}
3520
3521int bnx2x_config_mcast(struct bnx2x *bp,
3522                       struct bnx2x_mcast_ramrod_params *p,
3523                       enum bnx2x_mcast_cmd cmd)
3524{
3525        struct bnx2x_mcast_obj *o = p->mcast_obj;
3526        struct bnx2x_raw_obj *r = &o->raw;
3527        int rc = 0, old_reg_size;
3528
3529        /* This is needed to recover number of currently configured mcast macs
3530         * in case of failure.
3531         */
3532        old_reg_size = o->get_registry_size(o);
3533
3534        /* Do some calculations and checks */
3535        rc = o->validate(bp, p, cmd);
3536        if (rc)
3537                return rc;
3538
3539        /* Return if there is no work to do */
3540        if ((!p->mcast_list_len) && (!o->check_sched(o)))
3541                return 0;
3542
3543        DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3544           o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3545
3546        /* Enqueue the current command to the pending list if we can't complete
3547         * it in the current iteration
3548         */
3549        if (r->check_pending(r) ||
3550            ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3551                rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3552                if (rc < 0)
3553                        goto error_exit1;
3554
3555                /* As long as the current command is in a command list we
3556                 * don't need to handle it separately.
3557                 */
3558                p->mcast_list_len = 0;
3559        }
3560
3561        if (!r->check_pending(r)) {
3562
3563                /* Set 'pending' state */
3564                r->set_pending(r);
3565
3566                /* Configure the new classification in the chip */
3567                rc = o->config_mcast(bp, p, cmd);
3568                if (rc < 0)
3569                        goto error_exit2;
3570
3571                /* Wait for a ramrod completion if was requested */
3572                if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3573                        rc = o->wait_comp(bp, o);
3574        }
3575
3576        return rc;
3577
3578error_exit2:
3579        r->clear_pending(r);
3580
3581error_exit1:
3582        o->revert(bp, p, old_reg_size);
3583
3584        return rc;
3585}
3586
3587static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3588{
3589        cmb();
3590        clear_bit(o->sched_state, o->raw.pstate);
3591        cmb();
3592}
3593
3594static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3595{
3596        cmb();
3597        set_bit(o->sched_state, o->raw.pstate);
3598        cmb();
3599}
3600
3601static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3602{
3603        return !!test_bit(o->sched_state, o->raw.pstate);
3604}
3605
3606static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3607{
3608        return o->raw.check_pending(&o->raw) || o->check_sched(o);
3609}
3610
3611void bnx2x_init_mcast_obj(struct bnx2x *bp,
3612                          struct bnx2x_mcast_obj *mcast_obj,
3613                          uint8_t mcast_cl_id, uint32_t mcast_cid,
3614                          uint8_t func_id,
3615                          uint8_t engine_id, void *rdata,
3616                          dma_addr_t rdata_mapping,
3617                          int state, unsigned long *pstate, bnx2x_obj_type type)
3618{
3619        memset(mcast_obj, 0, sizeof(*mcast_obj));
3620
3621        bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3622                           rdata, rdata_mapping, state, pstate, type);
3623
3624        mcast_obj->engine_id = engine_id;
3625
3626        INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3627
3628        mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3629        mcast_obj->check_sched = bnx2x_mcast_check_sched;
3630        mcast_obj->set_sched = bnx2x_mcast_set_sched;
3631        mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3632
3633        if (CHIP_IS_E1(bp)) {
3634                mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3635                mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3636                mcast_obj->hdl_restore       =
3637                        bnx2x_mcast_handle_restore_cmd_e1;
3638                mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3639
3640                if (CHIP_REV_IS_SLOW(bp))
3641                        mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3642                else
3643                        mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3644
3645                mcast_obj->wait_comp         = bnx2x_mcast_wait;
3646                mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3647                mcast_obj->validate          = bnx2x_mcast_validate_e1;
3648                mcast_obj->revert            = bnx2x_mcast_revert_e1;
3649                mcast_obj->get_registry_size =
3650                        bnx2x_mcast_get_registry_size_exact;
3651                mcast_obj->set_registry_size =
3652                        bnx2x_mcast_set_registry_size_exact;
3653
3654                /* 57710 is the only chip that uses the exact match for mcast
3655                 * at the moment.
3656                 */
3657                INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3658
3659        } else if (CHIP_IS_E1H(bp)) {
3660                mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3661                mcast_obj->enqueue_cmd   = NULL;
3662                mcast_obj->hdl_restore   = NULL;
3663                mcast_obj->check_pending = bnx2x_mcast_check_pending;
3664
3665                /* 57711 doesn't send a ramrod, so it has unlimited credit
3666                 * for one command.
3667                 */
3668                mcast_obj->max_cmd_len       = -1;
3669                mcast_obj->wait_comp         = bnx2x_mcast_wait;
3670                mcast_obj->set_one_rule      = NULL;
3671                mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3672                mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3673                mcast_obj->get_registry_size =
3674                        bnx2x_mcast_get_registry_size_aprox;
3675                mcast_obj->set_registry_size =
3676                        bnx2x_mcast_set_registry_size_aprox;
3677        } else {
3678                mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3679                mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3680                mcast_obj->hdl_restore       =
3681                        bnx2x_mcast_handle_restore_cmd_e2;
3682                mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3683                /* TODO: There should be a proper HSI define for this number!!!
3684                 */
3685                mcast_obj->max_cmd_len       = 16;
3686                mcast_obj->wait_comp         = bnx2x_mcast_wait;
3687                mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3688                mcast_obj->validate          = bnx2x_mcast_validate_e2;
3689                mcast_obj->revert            = bnx2x_mcast_revert_e2;
3690                mcast_obj->get_registry_size =
3691                        bnx2x_mcast_get_registry_size_aprox;
3692                mcast_obj->set_registry_size =
3693                        bnx2x_mcast_set_registry_size_aprox;
3694        }
3695}
3696
3697/*************************** Credit handling **********************************/
3698
3699/**
3700 * atomic_add_ifless - add if the result is less than a given value.
3701 *
3702 * @v:  pointer of type atomic_t
3703 * @a:  the amount to add to v...
3704 * @u:  ...if (v + a) is less than u.
3705 *
3706 * returns true if (v + a) was less than u, and false otherwise.
3707 *
3708 */
3709static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3710{
3711        int c, old;
3712
3713        c = atomic_read(v);
3714        for (;;) {
3715                if (unlikely(c + a >= u))
3716                        return false;
3717
3718                old = atomic_cmpxchg((v), c, c + a);
3719                if (likely(old == c))
3720                        break;
3721                c = old;
3722        }
3723
3724        return true;
3725}
3726
3727/**
3728 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3729 *
3730 * @v:  pointer of type atomic_t
3731 * @a:  the amount to dec from v...
3732 * @u:  ...if (v - a) is more or equal than u.
3733 *
3734 * returns true if (v - a) was more or equal than u, and false
3735 * otherwise.
3736 */
3737static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3738{
3739        int c, old;
3740
3741        c = atomic_read(v);
3742        for (;;) {
3743                if (unlikely(c - a < u))
3744                        return false;
3745
3746                old = atomic_cmpxchg((v), c, c - a);
3747                if (likely(old == c))
3748                        break;
3749                c = old;
3750        }
3751
3752        return true;
3753}
3754
3755static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3756{
3757        bool rc;
3758
3759        mb();
3760        rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3761        mb();
3762
3763        return rc;
3764}
3765
3766static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3767{
3768        bool rc;
3769
3770        mb();
3771
3772        /* Don't let to refill if credit + cnt > pool_sz */
3773        rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3774
3775        mb();
3776
3777        return rc;
3778}
3779
3780static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3781{
3782        int cur_credit;
3783
3784        mb();
3785        cur_credit = atomic_read(&o->credit);
3786
3787        return cur_credit;
3788}
3789
3790static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3791                                          int cnt)
3792{
3793        return true;
3794}
3795
3796static bool bnx2x_credit_pool_get_entry(
3797        struct bnx2x_credit_pool_obj *o,
3798        int *offset)
3799{
3800        int idx, vec, i;
3801
3802        *offset = -1;
3803
3804        /* Find "internal cam-offset" then add to base for this object... */
3805        for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3806
3807                /* Skip the current vector if there are no free entries in it */
3808                if (!o->pool_mirror[vec])
3809                        continue;
3810
3811                /* If we've got here we are going to find a free entry */
3812                for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3813                      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3814
3815                        if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3816                                /* Got one!! */
3817                                BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3818                                *offset = o->base_pool_offset + idx;
3819                                return true;
3820                        }
3821        }
3822
3823        return false;
3824}
3825
3826static bool bnx2x_credit_pool_put_entry(
3827        struct bnx2x_credit_pool_obj *o,
3828        int offset)
3829{
3830        if (offset < o->base_pool_offset)
3831                return false;
3832
3833        offset -= o->base_pool_offset;
3834
3835        if (offset >= o->pool_sz)
3836                return false;
3837
3838        /* Return the entry to the pool */
3839        BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3840
3841        return true;
3842}
3843
3844static bool bnx2x_credit_pool_put_entry_always_true(
3845        struct bnx2x_credit_pool_obj *o,
3846        int offset)
3847{
3848        return true;
3849}
3850
3851static bool bnx2x_credit_pool_get_entry_always_true(
3852        struct bnx2x_credit_pool_obj *o,
3853        int *offset)
3854{
3855        *offset = -1;
3856        return true;
3857}
3858/**
3859 * bnx2x_init_credit_pool - initialize credit pool internals.
3860 *
3861 * @p:
3862 * @base:       Base entry in the CAM to use.
3863 * @credit:     pool size.
3864 *
3865 * If base is negative no CAM entries handling will be performed.
3866 * If credit is negative pool operations will always succeed (unlimited pool).
3867 *
3868 */
3869static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3870                                          int base, int credit)
3871{
3872        /* Zero the object first */
3873        memset(p, 0, sizeof(*p));
3874
3875        /* Set the table to all 1s */
3876        memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3877
3878        /* Init a pool as full */
3879        atomic_set(&p->credit, credit);
3880
3881        /* The total poll size */
3882        p->pool_sz = credit;
3883
3884        p->base_pool_offset = base;
3885
3886        /* Commit the change */
3887        mb();
3888
3889        p->check = bnx2x_credit_pool_check;
3890
3891        /* if pool credit is negative - disable the checks */
3892        if (credit >= 0) {
3893                p->put      = bnx2x_credit_pool_put;
3894                p->get      = bnx2x_credit_pool_get;
3895                p->put_entry = bnx2x_credit_pool_put_entry;
3896                p->get_entry = bnx2x_credit_pool_get_entry;
3897        } else {
3898                p->put      = bnx2x_credit_pool_always_true;
3899                p->get      = bnx2x_credit_pool_always_true;
3900                p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3901                p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3902        }
3903
3904        /* If base is negative - disable entries handling */
3905        if (base < 0) {
3906                p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3907                p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3908        }
3909}
3910
3911void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3912                                struct bnx2x_credit_pool_obj *p,
3913                                uint8_t func_id,
3914                                uint8_t func_num)
3915{
3916/* TODO: this will be defined in consts as well... */
3917#define BNX2X_CAM_SIZE_EMUL 5
3918
3919        int cam_sz;
3920
3921        if (CHIP_IS_E1(bp)) {
3922                /* In E1, Multicast is saved in cam... */
3923                if (!CHIP_REV_IS_SLOW(bp))
3924                        cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3925                else
3926                        cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3927
3928                bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3929
3930        } else if (CHIP_IS_E1H(bp)) {
3931                /* CAM credit is equaly divided between all active functions
3932                 * on the PORT!.
3933                 */
3934                if ((func_num > 0)) {
3935                        if (!CHIP_REV_IS_SLOW(bp))
3936                                cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3937                        else
3938                                cam_sz = BNX2X_CAM_SIZE_EMUL;
3939                        bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3940                } else {
3941                        /* this should never happen! Block MAC operations. */
3942                        bnx2x_init_credit_pool(p, 0, 0);
3943                }
3944
3945        } else {
3946
3947                /* CAM credit is equaly divided between all active functions
3948                 * on the PATH.
3949                 */
3950                if ((func_num > 0)) {
3951                        if (!CHIP_REV_IS_SLOW(bp))
3952                                cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3953                        else
3954                                cam_sz = BNX2X_CAM_SIZE_EMUL;
3955
3956                        /* No need for CAM entries handling for 57712 and
3957                         * newer.
3958                         */
3959                        bnx2x_init_credit_pool(p, -1, cam_sz);
3960                } else {
3961                        /* this should never happen! Block MAC operations. */
3962                        bnx2x_init_credit_pool(p, 0, 0);
3963                }
3964        }
3965}
3966
3967void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3968                                 struct bnx2x_credit_pool_obj *p,
3969                                 uint8_t func_id,
3970                                 uint8_t func_num)
3971{
3972        if (CHIP_IS_E1x(bp)) {
3973                /* There is no VLAN credit in HW on 57710 and 57711 only
3974                 * MAC / MAC-VLAN can be set
3975                 */
3976                bnx2x_init_credit_pool(p, 0, -1);
3977        } else {
3978                /* CAM credit is equally divided between all active functions
3979                 * on the PATH.
3980                 */
3981                if (func_num > 0) {
3982                        int credit = MAX_VLAN_CREDIT_E2 / func_num;
3983                        bnx2x_init_credit_pool(p, func_id * credit, credit);
3984                } else
3985                        /* this should never happen! Block VLAN operations. */
3986                        bnx2x_init_credit_pool(p, 0, 0);
3987        }
3988}
3989
3990/****************** RSS Configuration ******************/
3991/**
3992 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3993 *
3994 * @bp:         driver handle
3995 * @p:          pointer to rss configuration
3996 *
3997 * Prints it when NETIF_MSG_IFUP debug level is configured.
3998 */
3999static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4000                                        struct bnx2x_config_rss_params *p)
4001{
4002        int i;
4003
4004        DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4005        DP(BNX2X_MSG_SP, "0x0000: ");
4006        for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4007                DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4008
4009                /* Print 4 bytes in a line */
4010                if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4011                    (((i + 1) & 0x3) == 0)) {
4012                        DP_CONT(BNX2X_MSG_SP, "\n");
4013                        DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4014                }
4015        }
4016
4017        DP_CONT(BNX2X_MSG_SP, "\n");
4018}
4019
4020/**
4021 * bnx2x_setup_rss - configure RSS
4022 *
4023 * @bp:         device handle
4024 * @p:          rss configuration
4025 *
4026 * sends on UPDATE ramrod for that matter.
4027 */
4028static int bnx2x_setup_rss(struct bnx2x *bp,
4029                           struct bnx2x_config_rss_params *p)
4030{
4031        struct bnx2x_rss_config_obj *o = p->rss_obj;
4032        struct bnx2x_raw_obj *r = &o->raw;
4033        struct eth_rss_update_ramrod_data *data =
4034                (struct eth_rss_update_ramrod_data *)(r->rdata);
4035        uint16_t caps = 0;
4036        uint8_t rss_mode = 0;
4037        int rc;
4038
4039        memset(data, 0, sizeof(*data));
4040
4041        DP(BNX2X_MSG_SP, "Configuring RSS\n");
4042
4043        /* Set an echo field */
4044        data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4045                                 (r->state << BNX2X_SWCID_SHIFT));
4046
4047        /* RSS mode */
4048        if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4049                rss_mode = ETH_RSS_MODE_DISABLED;
4050        else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4051                rss_mode = ETH_RSS_MODE_REGULAR;
4052
4053        data->rss_mode = rss_mode;
4054
4055        DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4056
4057        /* RSS capabilities */
4058        if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4059                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4060
4061        if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4062                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4063
4064        if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4065                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4066
4067        if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4068                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4069
4070        if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4071                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4072
4073        if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4074                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4075
4076        if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
4077                caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
4078
4079        /* RSS keys */
4080        if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4081                memcpy(&data->rss_key[0], &p->rss_key[0],
4082                       sizeof(data->rss_key));
4083                caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4084        }
4085
4086        data->capabilities = cpu_to_le16(caps);
4087
4088        /* Hashing mask */
4089        data->rss_result_mask = p->rss_result_mask;
4090
4091        /* RSS engine ID */
4092        data->rss_engine_id = o->engine_id;
4093
4094        DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4095
4096        /* Indirection table */
4097        memcpy(data->indirection_table, p->ind_table,
4098                  T_ETH_INDIRECTION_TABLE_SIZE);
4099
4100        /* Remember the last configuration */
4101        memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4102
4103        /* Print the indirection table */
4104        #if 0 // AKAROS_PORT
4105        if (netif_msg_ifup(bp))
4106        #endif
4107                bnx2x_debug_print_ind_table(bp, p);
4108
4109        /* No need for an explicit memory barrier here as long as we
4110         * ensure the ordering of writing to the SPQ element
4111         * and updating of the SPQ producer which involves a memory
4112         * read. If the memory read is removed we will have to put a
4113         * full memory barrier there (inside bnx2x_sp_post()).
4114         */
4115
4116        /* Send a ramrod */
4117        rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4118                           U64_HI(r->rdata_mapping),
4119                           U64_LO(r->rdata_mapping),
4120                           ETH_CONNECTION_TYPE);
4121
4122        if (rc < 0)
4123                return rc;
4124
4125        return 1;
4126}
4127
4128void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4129                             uint8_t *ind_table)
4130{
4131        memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4132}
4133
4134int bnx2x_config_rss(struct bnx2x *bp,
4135                     struct bnx2x_config_rss_params *p)
4136{
4137        int rc;
4138        struct bnx2x_rss_config_obj *o = p->rss_obj;
4139        struct bnx2x_raw_obj *r = &o->raw;
4140
4141        /* Do nothing if only driver cleanup was requested */
4142        if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4143                DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4144                   p->ramrod_flags);
4145                return 0;
4146        }
4147
4148        r->set_pending(r);
4149
4150        rc = o->config_rss(bp, p);
4151        if (rc < 0) {
4152                r->clear_pending(r);
4153                return rc;
4154        }
4155
4156        if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4157                rc = r->wait_comp(bp, r);
4158
4159        return rc;
4160}
4161
4162void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4163                               struct bnx2x_rss_config_obj *rss_obj,
4164                               uint8_t cl_id, uint32_t cid, uint8_t func_id,
4165                               uint8_t engine_id,
4166                               void *rdata, dma_addr_t rdata_mapping,
4167                               int state, unsigned long *pstate,
4168                               bnx2x_obj_type type)
4169{
4170        bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4171                           rdata_mapping, state, pstate, type);
4172
4173        rss_obj->engine_id  = engine_id;
4174        rss_obj->config_rss = bnx2x_setup_rss;
4175}
4176
4177/********************** Queue state object ***********************************/
4178
4179/**
4180 * bnx2x_queue_state_change - perform Queue state change transition
4181 *
4182 * @bp:         device handle
4183 * @params:     parameters to perform the transition
4184 *
4185 * returns 0 in case of successfully completed transition, negative error
4186 * code in case of failure, positive (EBUSY) value if there is a completion
4187 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4188 * not set in params->ramrod_flags for asynchronous commands).
4189 *
4190 */
4191int bnx2x_queue_state_change(struct bnx2x *bp,
4192                             struct bnx2x_queue_state_params *params)
4193{
4194        struct bnx2x_queue_sp_obj *o = params->q_obj;
4195        int rc, pending_bit;
4196        unsigned long *pending = &o->pending;
4197
4198        /* Check that the requested transition is legal */
4199        rc = o->check_transition(bp, o, params);
4200        if (rc) {
4201                BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4202                return -EINVAL;
4203        }
4204
4205        /* Set "pending" bit */
4206        DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4207        pending_bit = o->set_pending(o, params);
4208        DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4209
4210        /* Don't send a command if only driver cleanup was requested */
4211        if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4212                o->complete_cmd(bp, o, pending_bit);
4213        else {
4214                /* Send a ramrod */
4215                rc = o->send_cmd(bp, params);
4216                if (rc) {
4217                        o->next_state = BNX2X_Q_STATE_MAX;
4218                        clear_bit(pending_bit, pending);
4219                        cmb();
4220                        return rc;
4221                }
4222
4223                if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4224                        rc = o->wait_comp(bp, o, pending_bit);
4225                        if (rc)
4226                                return rc;
4227
4228                        return 0;
4229                }
4230        }
4231
4232        return !!test_bit(pending_bit, pending);
4233}
4234
4235static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4236                                   struct bnx2x_queue_state_params *params)
4237{
4238        enum bnx2x_queue_cmd cmd = params->cmd, bit;
4239
4240        /* ACTIVATE and DEACTIVATE commands are implemented on top of
4241         * UPDATE command.
4242         */
4243        if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4244            (cmd == BNX2X_Q_CMD_DEACTIVATE))
4245                bit = BNX2X_Q_CMD_UPDATE;
4246        else
4247                bit = cmd;
4248
4249        set_bit(bit, &obj->pending);
4250        return bit;
4251}
4252
4253static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4254                                 struct bnx2x_queue_sp_obj *o,
4255                                 enum bnx2x_queue_cmd cmd)
4256{
4257        return bnx2x_state_wait(bp, cmd, &o->pending);
4258}
4259
4260/**
4261 * bnx2x_queue_comp_cmd - complete the state change command.
4262 *
4263 * @bp:         device handle
4264 * @o:
4265 * @cmd:
4266 *
4267 * Checks that the arrived completion is expected.
4268 */
4269static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4270                                struct bnx2x_queue_sp_obj *o,
4271                                enum bnx2x_queue_cmd cmd)
4272{
4273        unsigned long cur_pending = o->pending;
4274
4275        if (!test_and_clear_bit(cmd, &cur_pending)) {
4276                BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4277                          cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4278                          o->state, cur_pending, o->next_state);
4279                return -EINVAL;
4280        }
4281
4282        if (o->next_tx_only >= o->max_cos)
4283                /* >= because tx only must always be smaller than cos since the
4284                 * primary connection supports COS 0
4285                 */
4286                BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4287                           o->next_tx_only, o->max_cos);
4288
4289        DP(BNX2X_MSG_SP,
4290           "Completing command %d for queue %d, setting state to %d\n",
4291           cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4292
4293        if (o->next_tx_only)  /* print num tx-only if any exist */
4294                DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4295                   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4296
4297        o->state = o->next_state;
4298        o->num_tx_only = o->next_tx_only;
4299        o->next_state = BNX2X_Q_STATE_MAX;
4300
4301        /* It's important that o->state and o->next_state are
4302         * updated before o->pending.
4303         */
4304        wmb();
4305
4306        clear_bit(cmd, &o->pending);
4307        cmb();
4308
4309        return 0;
4310}
4311
4312static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4313                                struct bnx2x_queue_state_params *cmd_params,
4314                                struct client_init_ramrod_data *data)
4315{
4316        struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4317
4318        /* Rx data */
4319
4320        /* IPv6 TPA supported for E2 and above only */
4321        data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4322                                CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4323}
4324
4325static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4326                                struct bnx2x_queue_sp_obj *o,
4327                                struct bnx2x_general_setup_params *params,
4328                                struct client_init_general_data *gen_data,
4329                                unsigned long *flags)
4330{
4331        gen_data->client_id = o->cl_id;
4332
4333        if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4334                gen_data->statistics_counter_id =
4335                                        params->stat_id;
4336                gen_data->statistics_en_flg = 1;
4337                gen_data->statistics_zero_flg =
4338                        test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4339        } else
4340                gen_data->statistics_counter_id =
4341                                        DISABLE_STATISTIC_COUNTER_ID_VALUE;
4342
4343        gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4344        gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4345        gen_data->sp_client_id = params->spcl_id;
4346        gen_data->mtu = cpu_to_le16(params->mtu);
4347        gen_data->func_id = o->func_id;
4348
4349        gen_data->cos = params->cos;
4350
4351        gen_data->traffic_type =
4352                test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4353                LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4354
4355        gen_data->fp_hsi_ver = params->fp_hsi;
4356
4357        DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4358           gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4359}
4360
4361static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4362                                struct bnx2x_txq_setup_params *params,
4363                                struct client_init_tx_data *tx_data,
4364                                unsigned long *flags)
4365{
4366        tx_data->enforce_security_flg =
4367                test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4368        tx_data->default_vlan =
4369                cpu_to_le16(params->default_vlan);
4370        tx_data->default_vlan_flg =
4371                test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4372        tx_data->tx_switching_flg =
4373                test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4374        tx_data->anti_spoofing_flg =
4375                test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4376        tx_data->force_default_pri_flg =
4377                test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4378        tx_data->refuse_outband_vlan_flg =
4379                test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4380        tx_data->tunnel_lso_inc_ip_id =
4381                test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4382        tx_data->tunnel_non_lso_pcsum_location =
4383                test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4384                                                            CSUM_ON_BD;
4385
4386        tx_data->tx_status_block_id = params->fw_sb_id;
4387        tx_data->tx_sb_index_number = params->sb_cq_index;
4388        tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4389
4390        tx_data->tx_bd_page_base.lo =
4391                cpu_to_le32(U64_LO(params->dscr_map));
4392        tx_data->tx_bd_page_base.hi =
4393                cpu_to_le32(U64_HI(params->dscr_map));
4394
4395        /* Don't configure any Tx switching mode during queue SETUP */
4396        tx_data->state = 0;
4397}
4398
4399static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4400                                struct rxq_pause_params *params,
4401                                struct client_init_rx_data *rx_data)
4402{
4403        /* flow control data */
4404        rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4405        rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4406        rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4407        rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4408        rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4409        rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4410        rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4411}
4412
4413static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4414                                struct bnx2x_rxq_setup_params *params,
4415                                struct client_init_rx_data *rx_data,
4416                                unsigned long *flags)
4417{
4418        rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4419                                CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4420        rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4421                                CLIENT_INIT_RX_DATA_TPA_MODE;
4422        rx_data->vmqueue_mode_en_flg = 0;
4423
4424        rx_data->cache_line_alignment_log_size =
4425                params->cache_line_log;
4426        rx_data->enable_dynamic_hc =
4427                test_bit(BNX2X_Q_FLG_DHC, flags);
4428        rx_data->max_sges_for_packet = params->max_sges_pkt;
4429        rx_data->client_qzone_id = params->cl_qzone_id;
4430        rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4431
4432        /* Always start in DROP_ALL mode */
4433        rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4434                                     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4435
4436        /* We don't set drop flags */
4437        rx_data->drop_ip_cs_err_flg = 0;
4438        rx_data->drop_tcp_cs_err_flg = 0;
4439        rx_data->drop_ttl0_flg = 0;
4440        rx_data->drop_udp_cs_err_flg = 0;
4441        rx_data->inner_vlan_removal_enable_flg =
4442                test_bit(BNX2X_Q_FLG_VLAN, flags);
4443        rx_data->outer_vlan_removal_enable_flg =
4444                test_bit(BNX2X_Q_FLG_OV, flags);
4445        rx_data->status_block_id = params->fw_sb_id;
4446        rx_data->rx_sb_index_number = params->sb_cq_index;
4447        rx_data->max_tpa_queues = params->max_tpa_queues;
4448        rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4449        rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4450        rx_data->bd_page_base.lo =
4451                cpu_to_le32(U64_LO(params->dscr_map));
4452        rx_data->bd_page_base.hi =
4453                cpu_to_le32(U64_HI(params->dscr_map));
4454        rx_data->sge_page_base.lo =
4455                cpu_to_le32(U64_LO(params->sge_map));
4456        rx_data->sge_page_base.hi =
4457                cpu_to_le32(U64_HI(params->sge_map));
4458        rx_data->cqe_page_base.lo =
4459                cpu_to_le32(U64_LO(params->rcq_map));
4460        rx_data->cqe_page_base.hi =
4461                cpu_to_le32(U64_HI(params->rcq_map));
4462        rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4463
4464        if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4465                rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4466                rx_data->is_approx_mcast = 1;
4467        }
4468
4469        rx_data->rss_engine_id = params->rss_engine_id;
4470
4471        /* silent vlan removal */
4472        rx_data->silent_vlan_removal_flg =
4473                test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4474        rx_data->silent_vlan_value =
4475                cpu_to_le16(params->silent_removal_value);
4476        rx_data->silent_vlan_mask =
4477                cpu_to_le16(params->silent_removal_mask);
4478}
4479
4480/* initialize the general, tx and rx parts of a queue object */
4481static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4482                                struct bnx2x_queue_state_params *cmd_params,
4483                                struct client_init_ramrod_data *data)
4484{
4485        bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4486                                       &cmd_params->params.setup.gen_params,
4487                                       &data->general,
4488                                       &cmd_params->params.setup.flags);
4489
4490        bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4491                                  &cmd_params->params.setup.txq_params,
4492                                  &data->tx,
4493                                  &cmd_params->params.setup.flags);
4494
4495        bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4496                                  &cmd_params->params.setup.rxq_params,
4497                                  &data->rx,
4498                                  &cmd_params->params.setup.flags);
4499
4500        bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4501                                     &cmd_params->params.setup.pause_params,
4502                                     &data->rx);
4503}
4504
4505/* initialize the general and tx parts of a tx-only queue object */
4506static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4507                                struct bnx2x_queue_state_params *cmd_params,
4508                                struct tx_queue_init_ramrod_data *data)
4509{
4510        bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4511                                       &cmd_params->params.tx_only.gen_params,
4512                                       &data->general,
4513                                       &cmd_params->params.tx_only.flags);
4514
4515        bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4516                                  &cmd_params->params.tx_only.txq_params,
4517                                  &data->tx,
4518                                  &cmd_params->params.tx_only.flags);
4519
4520        DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4521                         cmd_params->q_obj->cids[0],
4522                         data->tx.tx_bd_page_base.lo,
4523                         data->tx.tx_bd_page_base.hi);
4524}
4525
4526/**
4527 * bnx2x_q_init - init HW/FW queue
4528 *
4529 * @bp:         device handle
4530 * @params:
4531 *
4532 * HW/FW initial Queue configuration:
4533 *      - HC: Rx and Tx
4534 *      - CDU context validation
4535 *
4536 */
4537static inline int bnx2x_q_init(struct bnx2x *bp,
4538                               struct bnx2x_queue_state_params *params)
4539{
4540        struct bnx2x_queue_sp_obj *o = params->q_obj;
4541        struct bnx2x_queue_init_params *init = &params->params.init;
4542        uint16_t hc_usec;
4543        uint8_t cos;
4544
4545        /* Tx HC configuration */
4546        if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4547            test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4548                hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4549
4550                bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4551                        init->tx.sb_cq_index,
4552                        !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4553                        hc_usec);
4554        }
4555
4556        /* Rx HC configuration */
4557        if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4558            test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4559                hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4560
4561                bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4562                        init->rx.sb_cq_index,
4563                        !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4564                        hc_usec);
4565        }
4566
4567        /* Set CDU context validation values */
4568        for (cos = 0; cos < o->max_cos; cos++) {
4569                DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4570                                 o->cids[cos], cos);
4571                DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4572                bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4573        }
4574
4575        /* As no ramrod is sent, complete the command immediately  */
4576        o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4577
4578        bus_wmb();
4579        mb();
4580
4581        return 0;
4582}
4583
4584static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4585                                        struct bnx2x_queue_state_params *params)
4586{
4587        struct bnx2x_queue_sp_obj *o = params->q_obj;
4588        struct client_init_ramrod_data *rdata =
4589                (struct client_init_ramrod_data *)o->rdata;
4590        dma_addr_t data_mapping = o->rdata_mapping;
4591        int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4592
4593        /* Clear the ramrod data */
4594        memset(rdata, 0, sizeof(*rdata));
4595
4596        /* Fill the ramrod data */
4597        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4598
4599        /* No need for an explicit memory barrier here as long as we
4600         * ensure the ordering of writing to the SPQ element
4601         * and updating of the SPQ producer which involves a memory
4602         * read. If the memory read is removed we will have to put a
4603         * full memory barrier there (inside bnx2x_sp_post()).
4604         */
4605        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4606                             U64_HI(data_mapping),
4607                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4608}
4609
4610static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4611                                        struct bnx2x_queue_state_params *params)
4612{
4613        struct bnx2x_queue_sp_obj *o = params->q_obj;
4614        struct client_init_ramrod_data *rdata =
4615                (struct client_init_ramrod_data *)o->rdata;
4616        dma_addr_t data_mapping = o->rdata_mapping;
4617        int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4618
4619        /* Clear the ramrod data */
4620        memset(rdata, 0, sizeof(*rdata));
4621
4622        /* Fill the ramrod data */
4623        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4624        bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4625
4626        /* No need for an explicit memory barrier here as long as we
4627         * ensure the ordering of writing to the SPQ element
4628         * and updating of the SPQ producer which involves a memory
4629         * read. If the memory read is removed we will have to put a
4630         * full memory barrier there (inside bnx2x_sp_post()).
4631         */
4632        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4633                             U64_HI(data_mapping),
4634                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4635}
4636
4637static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4638                                  struct bnx2x_queue_state_params *params)
4639{
4640        struct bnx2x_queue_sp_obj *o = params->q_obj;
4641        struct tx_queue_init_ramrod_data *rdata =
4642                (struct tx_queue_init_ramrod_data *)o->rdata;
4643        dma_addr_t data_mapping = o->rdata_mapping;
4644        int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4645        struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4646                &params->params.tx_only;
4647        uint8_t cid_index = tx_only_params->cid_index;
4648
4649        if (cid_index >= o->max_cos) {
4650                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4651                          o->cl_id, cid_index);
4652                return -EINVAL;
4653        }
4654
4655        DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4656                         tx_only_params->gen_params.cos,
4657                         tx_only_params->gen_params.spcl_id);
4658
4659        /* Clear the ramrod data */
4660        memset(rdata, 0, sizeof(*rdata));
4661
4662        /* Fill the ramrod data */
4663        bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4664
4665        DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4666                         o->cids[cid_index], rdata->general.client_id,
4667                         rdata->general.sp_client_id, rdata->general.cos);
4668
4669        /* No need for an explicit memory barrier here as long as we
4670         * ensure the ordering of writing to the SPQ element
4671         * and updating of the SPQ producer which involves a memory
4672         * read. If the memory read is removed we will have to put a
4673         * full memory barrier there (inside bnx2x_sp_post()).
4674         */
4675        return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4676                             U64_HI(data_mapping),
4677                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4678}
4679
4680static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4681                                     struct bnx2x_queue_sp_obj *obj,
4682                                     struct bnx2x_queue_update_params *params,
4683                                     struct client_update_ramrod_data *data)
4684{
4685        /* Client ID of the client to update */
4686        data->client_id = obj->cl_id;
4687
4688        /* Function ID of the client to update */
4689        data->func_id = obj->func_id;
4690
4691        /* Default VLAN value */
4692        data->default_vlan = cpu_to_le16(params->def_vlan);
4693
4694        /* Inner VLAN stripping */
4695        data->inner_vlan_removal_enable_flg =
4696                test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4697        data->inner_vlan_removal_change_flg =
4698                test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4699                         &params->update_flags);
4700
4701        /* Outer VLAN stripping */
4702        data->outer_vlan_removal_enable_flg =
4703                test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4704        data->outer_vlan_removal_change_flg =
4705                test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4706                         &params->update_flags);
4707
4708        /* Drop packets that have source MAC that doesn't belong to this
4709         * Queue.
4710         */
4711        data->anti_spoofing_enable_flg =
4712                test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4713        data->anti_spoofing_change_flg =
4714                test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4715
4716        /* Activate/Deactivate */
4717        data->activate_flg =
4718                test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4719        data->activate_change_flg =
4720                test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4721
4722        /* Enable default VLAN */
4723        data->default_vlan_enable_flg =
4724                test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4725        data->default_vlan_change_flg =
4726                test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4727                         &params->update_flags);
4728
4729        /* silent vlan removal */
4730        data->silent_vlan_change_flg =
4731                test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4732                         &params->update_flags);
4733        data->silent_vlan_removal_flg =
4734                test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4735        data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4736        data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4737
4738        /* tx switching */
4739        data->tx_switching_flg =
4740                test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, &params->update_flags);
4741        data->tx_switching_change_flg =
4742                test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
4743                         &params->update_flags);
4744
4745        /* PTP */
4746        data->handle_ptp_pkts_flg =
4747                test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
4748        data->handle_ptp_pkts_change_flg =
4749                test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
4750}
4751
4752static inline int bnx2x_q_send_update(struct bnx2x *bp,
4753                                      struct bnx2x_queue_state_params *params)
4754{
4755        struct bnx2x_queue_sp_obj *o = params->q_obj;
4756        struct client_update_ramrod_data *rdata =
4757                (struct client_update_ramrod_data *)o->rdata;
4758        dma_addr_t data_mapping = o->rdata_mapping;
4759        struct bnx2x_queue_update_params *update_params =
4760                &params->params.update;
4761        uint8_t cid_index = update_params->cid_index;
4762
4763        if (cid_index >= o->max_cos) {
4764                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4765                          o->cl_id, cid_index);
4766                return -EINVAL;
4767        }
4768
4769        /* Clear the ramrod data */
4770        memset(rdata, 0, sizeof(*rdata));
4771
4772        /* Fill the ramrod data */
4773        bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4774
4775        /* No need for an explicit memory barrier here as long as we
4776         * ensure the ordering of writing to the SPQ element
4777         * and updating of the SPQ producer which involves a memory
4778         * read. If the memory read is removed we will have to put a
4779         * full memory barrier there (inside bnx2x_sp_post()).
4780         */
4781        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4782                             o->cids[cid_index], U64_HI(data_mapping),
4783                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4784}
4785
4786/**
4787 * bnx2x_q_send_deactivate - send DEACTIVATE command
4788 *
4789 * @bp:         device handle
4790 * @params:
4791 *
4792 * implemented using the UPDATE command.
4793 */
4794static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4795                                        struct bnx2x_queue_state_params *params)
4796{
4797        struct bnx2x_queue_update_params *update = &params->params.update;
4798
4799        memset(update, 0, sizeof(*update));
4800
4801        __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4802
4803        return bnx2x_q_send_update(bp, params);
4804}
4805
4806/**
4807 * bnx2x_q_send_activate - send ACTIVATE command
4808 *
4809 * @bp:         device handle
4810 * @params:
4811 *
4812 * implemented using the UPDATE command.
4813 */
4814static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4815                                        struct bnx2x_queue_state_params *params)
4816{
4817        struct bnx2x_queue_update_params *update = &params->params.update;
4818
4819        memset(update, 0, sizeof(*update));
4820
4821        __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4822        __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4823
4824        return bnx2x_q_send_update(bp, params);
4825}
4826
4827static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
4828                                struct bnx2x_queue_sp_obj *obj,
4829                                struct bnx2x_queue_update_tpa_params *params,
4830                                struct tpa_update_ramrod_data *data)
4831{
4832        data->client_id = obj->cl_id;
4833        data->complete_on_both_clients = params->complete_on_both_clients;
4834        data->dont_verify_rings_pause_thr_flg =
4835                params->dont_verify_thr;
4836        data->max_agg_size = cpu_to_le16(params->max_agg_sz);
4837        data->max_sges_for_packet = params->max_sges_pkt;
4838        data->max_tpa_queues = params->max_tpa_queues;
4839        data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
4840        data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
4841        data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
4842        data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
4843        data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
4844        data->tpa_mode = params->tpa_mode;
4845        data->update_ipv4 = params->update_ipv4;
4846        data->update_ipv6 = params->update_ipv6;
4847}
4848
4849static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4850                                        struct bnx2x_queue_state_params *params)
4851{
4852        struct bnx2x_queue_sp_obj *o = params->q_obj;
4853        struct tpa_update_ramrod_data *rdata =
4854                (struct tpa_update_ramrod_data *)o->rdata;
4855        dma_addr_t data_mapping = o->rdata_mapping;
4856        struct bnx2x_queue_update_tpa_params *update_tpa_params =
4857                &params->params.update_tpa;
4858        uint16_t type;
4859
4860        /* Clear the ramrod data */
4861        memset(rdata, 0, sizeof(*rdata));
4862
4863        /* Fill the ramrod data */
4864        bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
4865
4866        /* Add the function id inside the type, so that sp post function
4867         * doesn't automatically add the PF func-id, this is required
4868         * for operations done by PFs on behalf of their VFs
4869         */
4870        type = ETH_CONNECTION_TYPE |
4871                ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
4872
4873        /* No need for an explicit memory barrier here as long as we
4874         * ensure the ordering of writing to the SPQ element
4875         * and updating of the SPQ producer which involves a memory
4876         * read. If the memory read is removed we will have to put a
4877         * full memory barrier there (inside bnx2x_sp_post()).
4878         */
4879        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
4880                             o->cids[BNX2X_PRIMARY_CID_INDEX],
4881                             U64_HI(data_mapping),
4882                             U64_LO(data_mapping), type);
4883}
4884
4885static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4886                                    struct bnx2x_queue_state_params *params)
4887{
4888        struct bnx2x_queue_sp_obj *o = params->q_obj;
4889
4890        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4891                             o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4892                             ETH_CONNECTION_TYPE);
4893}
4894
4895static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4896                                       struct bnx2x_queue_state_params *params)
4897{
4898        struct bnx2x_queue_sp_obj *o = params->q_obj;
4899        uint8_t cid_idx = params->params.cfc_del.cid_index;
4900
4901        if (cid_idx >= o->max_cos) {
4902                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4903                          o->cl_id, cid_idx);
4904                return -EINVAL;
4905        }
4906
4907        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4908                             o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4909}
4910
4911static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4912                                        struct bnx2x_queue_state_params *params)
4913{
4914        struct bnx2x_queue_sp_obj *o = params->q_obj;
4915        uint8_t cid_index = params->params.terminate.cid_index;
4916
4917        if (cid_index >= o->max_cos) {
4918                BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4919                          o->cl_id, cid_index);
4920                return -EINVAL;
4921        }
4922
4923        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4924                             o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4925}
4926
4927static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4928                                     struct bnx2x_queue_state_params *params)
4929{
4930        struct bnx2x_queue_sp_obj *o = params->q_obj;
4931
4932        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4933                             o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4934                             ETH_CONNECTION_TYPE);
4935}
4936
4937static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4938                                        struct bnx2x_queue_state_params *params)
4939{
4940        switch (params->cmd) {
4941        case BNX2X_Q_CMD_INIT:
4942                return bnx2x_q_init(bp, params);
4943        case BNX2X_Q_CMD_SETUP_TX_ONLY:
4944                return bnx2x_q_send_setup_tx_only(bp, params);
4945        case BNX2X_Q_CMD_DEACTIVATE:
4946                return bnx2x_q_send_deactivate(bp, params);
4947        case BNX2X_Q_CMD_ACTIVATE:
4948                return bnx2x_q_send_activate(bp, params);
4949        case BNX2X_Q_CMD_UPDATE:
4950                return bnx2x_q_send_update(bp, params);
4951        case BNX2X_Q_CMD_UPDATE_TPA:
4952                return bnx2x_q_send_update_tpa(bp, params);
4953        case BNX2X_Q_CMD_HALT:
4954                return bnx2x_q_send_halt(bp, params);
4955        case BNX2X_Q_CMD_CFC_DEL:
4956                return bnx2x_q_send_cfc_del(bp, params);
4957        case BNX2X_Q_CMD_TERMINATE:
4958                return bnx2x_q_send_terminate(bp, params);
4959        case BNX2X_Q_CMD_EMPTY:
4960                return bnx2x_q_send_empty(bp, params);
4961        default:
4962                BNX2X_ERR("Unknown command: %d\n", params->cmd);
4963                return -EINVAL;
4964        }
4965}
4966
4967static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4968                                    struct bnx2x_queue_state_params *params)
4969{
4970        switch (params->cmd) {
4971        case BNX2X_Q_CMD_SETUP:
4972                return bnx2x_q_send_setup_e1x(bp, params);
4973        case BNX2X_Q_CMD_INIT:
4974        case BNX2X_Q_CMD_SETUP_TX_ONLY:
4975        case BNX2X_Q_CMD_DEACTIVATE:
4976        case BNX2X_Q_CMD_ACTIVATE:
4977        case BNX2X_Q_CMD_UPDATE:
4978        case BNX2X_Q_CMD_UPDATE_TPA:
4979        case BNX2X_Q_CMD_HALT:
4980        case BNX2X_Q_CMD_CFC_DEL:
4981        case BNX2X_Q_CMD_TERMINATE:
4982        case BNX2X_Q_CMD_EMPTY:
4983                return bnx2x_queue_send_cmd_cmn(bp, params);
4984        default:
4985                BNX2X_ERR("Unknown command: %d\n", params->cmd);
4986                return -EINVAL;
4987        }
4988}
4989
4990static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4991                                   struct bnx2x_queue_state_params *params)
4992{
4993        switch (params->cmd) {
4994        case BNX2X_Q_CMD_SETUP:
4995                return bnx2x_q_send_setup_e2(bp, params);
4996        case BNX2X_Q_CMD_INIT:
4997        case BNX2X_Q_CMD_SETUP_TX_ONLY:
4998        case BNX2X_Q_CMD_DEACTIVATE:
4999        case BNX2X_Q_CMD_ACTIVATE:
5000        case BNX2X_Q_CMD_UPDATE:
5001        case BNX2X_Q_CMD_UPDATE_TPA:
5002        case BNX2X_Q_CMD_HALT:
5003        case BNX2X_Q_CMD_CFC_DEL:
5004        case BNX2X_Q_CMD_TERMINATE:
5005        case BNX2X_Q_CMD_EMPTY:
5006                return bnx2x_queue_send_cmd_cmn(bp, params);
5007        default:
5008                BNX2X_ERR("Unknown command: %d\n", params->cmd);
5009                return -EINVAL;
5010        }
5011}
5012
5013/**
5014 * bnx2x_queue_chk_transition - check state machine of a regular Queue
5015 *
5016 * @bp:         device handle
5017 * @o:
5018 * @params:
5019 *
5020 * (not Forwarding)
5021 * It both checks if the requested command is legal in a current
5022 * state and, if it's legal, sets a `next_state' in the object
5023 * that will be used in the completion flow to set the `state'
5024 * of the object.
5025 *
5026 * returns 0 if a requested command is a legal transition,
5027 *         -EINVAL otherwise.
5028 */
5029static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5030                                      struct bnx2x_queue_sp_obj *o,
5031                                      struct bnx2x_queue_state_params *params)
5032{
5033        enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5034        enum bnx2x_queue_cmd cmd = params->cmd;
5035        struct bnx2x_queue_update_params *update_params =
5036                 &params->params.update;
5037        uint8_t next_tx_only = o->num_tx_only;
5038
5039        /* Forget all pending for completion commands if a driver only state
5040         * transition has been requested.
5041         */
5042        if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5043                o->pending = 0;
5044                o->next_state = BNX2X_Q_STATE_MAX;
5045        }
5046
5047        /* Don't allow a next state transition if we are in the middle of
5048         * the previous one.
5049         */
5050        if (o->pending) {
5051                BNX2X_ERR("Blocking transition since pending was %lx\n",
5052                          o->pending);
5053                return -EBUSY;
5054        }
5055
5056        switch (state) {
5057        case BNX2X_Q_STATE_RESET:
5058                if (cmd == BNX2X_Q_CMD_INIT)
5059                        next_state = BNX2X_Q_STATE_INITIALIZED;
5060
5061                break;
5062        case BNX2X_Q_STATE_INITIALIZED:
5063                if (cmd == BNX2X_Q_CMD_SETUP) {
5064                        if (test_bit(BNX2X_Q_FLG_ACTIVE,
5065                                     &params->params.setup.flags))
5066                                next_state = BNX2X_Q_STATE_ACTIVE;
5067                        else
5068                                next_state = BNX2X_Q_STATE_INACTIVE;
5069                }
5070
5071                break;
5072        case BNX2X_Q_STATE_ACTIVE:
5073                if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5074                        next_state = BNX2X_Q_STATE_INACTIVE;
5075
5076                else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5077                         (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5078                        next_state = BNX2X_Q_STATE_ACTIVE;
5079
5080                else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5081                        next_state = BNX2X_Q_STATE_MULTI_COS;
5082                        next_tx_only = 1;
5083                }
5084
5085                else if (cmd == BNX2X_Q_CMD_HALT)
5086                        next_state = BNX2X_Q_STATE_STOPPED;
5087
5088                else if (cmd == BNX2X_Q_CMD_UPDATE) {
5089                        /* If "active" state change is requested, update the
5090                         *  state accordingly.
5091                         */
5092                        if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5093                                     &update_params->update_flags) &&
5094                            !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5095                                      &update_params->update_flags))
5096                                next_state = BNX2X_Q_STATE_INACTIVE;
5097                        else
5098                                next_state = BNX2X_Q_STATE_ACTIVE;
5099                }
5100
5101                break;
5102        case BNX2X_Q_STATE_MULTI_COS:
5103                if (cmd == BNX2X_Q_CMD_TERMINATE)
5104                        next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5105
5106                else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5107                        next_state = BNX2X_Q_STATE_MULTI_COS;
5108                        next_tx_only = o->num_tx_only + 1;
5109                }
5110
5111                else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5112                         (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5113                        next_state = BNX2X_Q_STATE_MULTI_COS;
5114
5115                else if (cmd == BNX2X_Q_CMD_UPDATE) {
5116                        /* If "active" state change is requested, update the
5117                         *  state accordingly.
5118                         */
5119                        if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5120                                     &update_params->update_flags) &&
5121                            !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5122                                      &update_params->update_flags))
5123                                next_state = BNX2X_Q_STATE_INACTIVE;
5124                        else
5125                                next_state = BNX2X_Q_STATE_MULTI_COS;
5126                }
5127
5128                break;
5129        case BNX2X_Q_STATE_MCOS_TERMINATED:
5130                if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5131                        next_tx_only = o->num_tx_only - 1;
5132                        if (next_tx_only == 0)
5133                                next_state = BNX2X_Q_STATE_ACTIVE;
5134                        else
5135                                next_state = BNX2X_Q_STATE_MULTI_COS;
5136                }
5137
5138                break;
5139        case BNX2X_Q_STATE_INACTIVE:
5140                if (cmd == BNX2X_Q_CMD_ACTIVATE)
5141                        next_state = BNX2X_Q_STATE_ACTIVE;
5142
5143                else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5144                         (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5145                        next_state = BNX2X_Q_STATE_INACTIVE;
5146
5147                else if (cmd == BNX2X_Q_CMD_HALT)
5148                        next_state = BNX2X_Q_STATE_STOPPED;
5149
5150                else if (cmd == BNX2X_Q_CMD_UPDATE) {
5151                        /* If "active" state change is requested, update the
5152                         * state accordingly.
5153                         */
5154                        if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5155                                     &update_params->update_flags) &&
5156                            test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5157                                     &update_params->update_flags)){
5158                                if (o->num_tx_only == 0)
5159                                        next_state = BNX2X_Q_STATE_ACTIVE;
5160                                else /* tx only queues exist for this queue */
5161                                        next_state = BNX2X_Q_STATE_MULTI_COS;
5162                        } else
5163                                next_state = BNX2X_Q_STATE_INACTIVE;
5164                }
5165
5166                break;
5167        case BNX2X_Q_STATE_STOPPED:
5168                if (cmd == BNX2X_Q_CMD_TERMINATE)
5169                        next_state = BNX2X_Q_STATE_TERMINATED;
5170
5171                break;
5172        case BNX2X_Q_STATE_TERMINATED:
5173                if (cmd == BNX2X_Q_CMD_CFC_DEL)
5174                        next_state = BNX2X_Q_STATE_RESET;
5175
5176                break;
5177        default:
5178                BNX2X_ERR("Illegal state: %d\n", state);
5179        }
5180
5181        /* Transition is assured */
5182        if (next_state != BNX2X_Q_STATE_MAX) {
5183                DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5184                                 state, cmd, next_state);
5185                o->next_state = next_state;
5186                o->next_tx_only = next_tx_only;
5187                return 0;
5188        }
5189
5190        DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5191
5192        return -EINVAL;
5193}
5194
5195void